fix bug for pd step signal (#3230)

This commit is contained in:
chenjian
2025-08-06 10:41:52 +08:00
committed by GitHub
parent a9d231c900
commit a4572a5e5d
2 changed files with 6 additions and 5 deletions

View File

@@ -61,6 +61,7 @@ class InternalAdapter:
"max_batch_size": int(available_batch_size),
"max_input_token_num": self.cfg.max_num_batched_tokens,
"unhandled_request_num": self.engine.scheduler.get_unhandled_request_num(),
"available_batch": int(self.engine.resource_manager.available_batch()),
}
return server_info

View File

@@ -1268,11 +1268,6 @@ class GPUModelRunner(ModelRunnerBase):
We plan to replace it with 'ModelForwardBatch'.
intermediate_tensors:
"""
# 1. Prepare inputs of model and sampler.
skip_idx_list = self._get_skip_idx(model_forward_batch)
self._prepare_inputs()
self.sampler.pre_process(skip_idx_list)
# NOTE(wufeisheng): If `not_need_stop`` is False, it means the current worker is in an idle state.
# This logic is not used in TP (Tensor Parallelism) mode. However, in EP (Expert Parallelism) mode,
# when there is data on other runner, the current runner is required to execute part of the model.
@@ -1280,6 +1275,11 @@ class GPUModelRunner(ModelRunnerBase):
self._execute_empty_input()
return None
# 1. Prepare inputs of model and sampler.
skip_idx_list = self._get_skip_idx(model_forward_batch)
self._prepare_inputs()
self.sampler.pre_process(skip_idx_list)
# 2. Padding inputs for cuda graph
self.padding_cudagraph_inputs()