mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-14 06:25:02 +08:00
[V1] Fix Detokenizer loading in AsyncLLM (#10997)
Signed-off-by: Roger Wang <ywang@roblox.com>
This commit is contained in:
parent
d1c2e15eb3
commit
c690357928
@ -65,7 +65,12 @@ class AsyncLLM(EngineClient):
|
|||||||
input_registry)
|
input_registry)
|
||||||
|
|
||||||
# Detokenizer (converts EngineCoreOutputs --> RequestOutput).
|
# Detokenizer (converts EngineCoreOutputs --> RequestOutput).
|
||||||
self.detokenizer = Detokenizer(vllm_config.model_config.tokenizer)
|
self.detokenizer = Detokenizer(
|
||||||
|
tokenizer_name=vllm_config.model_config.tokenizer,
|
||||||
|
tokenizer_mode=vllm_config.model_config.tokenizer_mode,
|
||||||
|
trust_remote_code=vllm_config.model_config.trust_remote_code,
|
||||||
|
revision=vllm_config.model_config.tokenizer_revision,
|
||||||
|
)
|
||||||
|
|
||||||
# EngineCore (starts the engine in background process).
|
# EngineCore (starts the engine in background process).
|
||||||
self.engine_core = EngineCoreClient.make_client(
|
self.engine_core = EngineCoreClient.make_client(
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user