[Misc] Remove unused executor.apply_model (#26215)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung 2025-10-04 16:45:53 +08:00 committed by GitHub
parent 736fbf4c89
commit 7c2e91c4e0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -7,8 +7,7 @@ from abc import ABC, abstractmethod
from functools import cached_property
from typing import Any, Awaitable, Callable, List, Optional, Set, Union
import torch.nn as nn
from typing_extensions import TypeVar, deprecated
from typing_extensions import TypeVar
import vllm.platforms
from vllm.config import VllmConfig
@ -127,16 +126,6 @@ class ExecutorBase(ABC):
self.collective_rpc("initialize_cache",
args=(num_gpu_blocks, num_cpu_blocks))
@deprecated("`llm_engine.model_executor.apply_model` will no longer work "
"in V1 Engine. Please replace with `llm_engine.apply_model` "
"and set `VLLM_ALLOW_INSECURE_SERIALIZATION=1`.")
def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
"""
Run a function directly on the model inside each worker,
returning the result for each of them.
"""
return self.collective_rpc("apply_model", args=(func, ))
@cached_property # Avoid unnecessary RPC calls
def supported_tasks(self) -> tuple[SupportedTask, ...]:
output = self.collective_rpc("get_supported_tasks")