From 04a797cd0e94567ee2b158a0246d6112a197fdc5 Mon Sep 17 00:00:00 2001 From: Didier Durand <2927957+didier-durand@users.noreply.github.com> Date: Sat, 29 Nov 2025 10:15:39 +0100 Subject: [PATCH] [Doc]: fixing typos in various files. (#29717) Signed-off-by: Didier Durand --- vllm/distributed/device_communicators/pynccl_allocator.py | 2 +- vllm/distributed/parallel_state.py | 2 +- vllm/entrypoints/openai/serving_models.py | 2 +- .../layers/quantization/quark/schemes/quark_ocp_mx.py | 2 +- vllm/transformers_utils/repo_utils.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vllm/distributed/device_communicators/pynccl_allocator.py b/vllm/distributed/device_communicators/pynccl_allocator.py index 401b80046f60..2e5d94de9d01 100644 --- a/vllm/distributed/device_communicators/pynccl_allocator.py +++ b/vllm/distributed/device_communicators/pynccl_allocator.py @@ -157,7 +157,7 @@ class nccl_symm_mem_context: if self.disabled: return self assert self.pynccl_comm is not None, ( - "Symmetric memory requires pynccl to be initalized" + "Symmetric memory requires pynccl to be initialized" ) assert self.pynccl_comm.nccl_version >= 22703, ( "NCCL version 2.27.3 or higher is required for NCCL symmetric memory" diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 52b433cfaf1b..c82a77c216af 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -1583,7 +1583,7 @@ def destroy_distributed_environment(): def cleanup_dist_env_and_memory(shutdown_ray: bool = False): - # Ensure all objects are not freezed before cleanup + # Ensure all objects are not frozen before cleanup gc.unfreeze() destroy_model_parallel() diff --git a/vllm/entrypoints/openai/serving_models.py b/vllm/entrypoints/openai/serving_models.py index 165de5b618c4..953398a9a72a 100644 --- a/vllm/entrypoints/openai/serving_models.py +++ b/vllm/entrypoints/openai/serving_models.py @@ -150,7 +150,7 @@ class OpenAIServingModels: lora_request.base_model_name = base_model_name # Validate that the adapter can be loaded into the engine - # This will also pre-load it for incoming requests + # This will also preload it for incoming requests try: await self.engine_client.add_lora(lora_request) except Exception as e: diff --git a/vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py b/vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py index 33e9f9806b27..eeb60023dc0e 100644 --- a/vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +++ b/vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py @@ -37,7 +37,7 @@ logger = init_logger(__name__) # use `rocm_aiter_ops.is_asm_fp4_gemm_dynamic_quant_enabled()` # for envs checks which does not require @cache anymore. # triton kernel is torch compile compatible. -# does not require direct registeration. +# does not require direct registration. # use `rocm_aiter_ops.triton_fp4_gemm_dynamic_qaunt`. @cache def is_rocm_aiter_fp4_asm_gemm_enabled() -> bool: diff --git a/vllm/transformers_utils/repo_utils.py b/vllm/transformers_utils/repo_utils.py index 3ccec04fc487..b63288914cf8 100644 --- a/vllm/transformers_utils/repo_utils.py +++ b/vllm/transformers_utils/repo_utils.py @@ -171,7 +171,7 @@ def file_or_path_exists( repo_id=model, filename=config_name, revision=revision ) if isinstance(cached_filepath, str): - # The config file exists in cache- we can continue trying to load + # The config file exists in cache - we can continue trying to load return True # NB: file_exists will only check for the existence of the config file on