[BugFix] Fix PP/async scheduling with pooling models (#28899)

Signed-off-by: Nick Hill <nhill@redhat.com>
Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk>
This commit is contained in:
Nick Hill 2025-11-18 00:20:45 -08:00 committed by GitHub
parent 896e41ae04
commit 439368496d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 6 additions and 5 deletions

View File

@ -184,6 +184,7 @@ class EngineCore:
vllm_config.ec_transfer_config is not None vllm_config.ec_transfer_config is not None
and vllm_config.ec_transfer_config.is_ec_producer and vllm_config.ec_transfer_config.is_ec_producer
) )
self.is_pooling_model = vllm_config.model_config.runner_type == "pooling"
self.request_block_hasher: Callable[[Request], list[BlockHash]] | None = None self.request_block_hasher: Callable[[Request], list[BlockHash]] | None = None
if vllm_config.cache_config.enable_prefix_caching or kv_connector is not None: if vllm_config.cache_config.enable_prefix_caching or kv_connector is not None:
@ -392,7 +393,7 @@ class EngineCore:
if not self.ec_producer: if not self.ec_producer:
model_executed = scheduler_output.total_num_scheduled_tokens > 0 model_executed = scheduler_output.total_num_scheduled_tokens > 0
if not model_executed: if self.is_pooling_model or not model_executed:
# No sampling required (no requests scheduled). # No sampling required (no requests scheduled).
future = cast(Future[ModelRunnerOutput], exec_future) future = cast(Future[ModelRunnerOutput], exec_future)
else: else:

View File

@ -99,9 +99,9 @@ class RayDistributedExecutor(Executor):
# KV connector setup # KV connector setup
self.has_connector = self.vllm_config.kv_transfer_config is not None self.has_connector = self.vllm_config.kv_transfer_config is not None
self.ec_producer = ( self.uses_sampler = self.vllm_config.model_config.runner_type != "pooling" and (
self.vllm_config.ec_transfer_config is not None self.vllm_config.ec_transfer_config is None
and self.vllm_config.ec_transfer_config.is_ec_producer or not self.vllm_config.ec_transfer_config.is_ec_producer
) )
self.scheduler_output: SchedulerOutput | None = None self.scheduler_output: SchedulerOutput | None = None
@ -401,7 +401,7 @@ class RayDistributedExecutor(Executor):
"after execute_model() returns None." "after execute_model() returns None."
) )
if self.ec_producer or not scheduler_output.total_num_scheduled_tokens: if not self.uses_sampler or not scheduler_output.total_num_scheduled_tokens:
# Model will not execute, call model runner immediately. # Model will not execute, call model runner immediately.
return self._execute_dag(scheduler_output, None, non_block) return self._execute_dag(scheduler_output, None, non_block)