From 42b42824ae8223fa84e7545709572c1fd231436b Mon Sep 17 00:00:00 2001 From: Kevin McKay Date: Sun, 21 Dec 2025 23:14:02 -0600 Subject: [PATCH] [Misc] Fix grammar errors in comments and messages (#31115) Signed-off-by: c0de128 --- tests/quantization/test_compressed_tensors.py | 6 +++--- vllm/attention/ops/merge_attn_states.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/quantization/test_compressed_tensors.py b/tests/quantization/test_compressed_tensors.py index 412b21328a325..535f028202275 100644 --- a/tests/quantization/test_compressed_tensors.py +++ b/tests/quantization/test_compressed_tensors.py @@ -83,7 +83,7 @@ def test_compressed_tensors_w8a8_static_setup(vllm_runner, model_args): current_platform.is_rocm() and model_path not in ROCM_TRITON_SCALED_MM_SUPPORTED_INT8_MODEL ): - pytest.skip(f"Skip model {model_path} as it is not support on ROCm.") + pytest.skip(f"Skip model {model_path} as it is not supported on ROCm.") with vllm_runner(model_path, enforce_eager=True) as llm: @@ -161,7 +161,7 @@ def test_compressed_tensors_w8a8_logprobs( current_platform.is_rocm() and model_path not in ROCM_TRITON_SCALED_MM_SUPPORTED_INT8_MODEL ): - pytest.skip(f"Skip model {model_path} as it is not support on ROCm.") + pytest.skip(f"Skip model {model_path} as it is not supported on ROCm.") if use_aiter: if model_path not in ROCM_AITER_SUPPORTED_INT8_MODEL: @@ -231,7 +231,7 @@ def test_compressed_tensors_w8a8_dynamic_per_token( current_platform.is_rocm() and model_path not in ROCM_TRITON_SCALED_MM_SUPPORTED_INT8_MODEL ): - pytest.skip(f"Skip model {model_path} as it is not support on ROCm.") + pytest.skip(f"Skip model {model_path} as it is not supported on ROCm.") if use_aiter: if model_path not in ROCM_AITER_SUPPORTED_INT8_MODEL: diff --git a/vllm/attention/ops/merge_attn_states.py b/vllm/attention/ops/merge_attn_states.py index 16106f3c93a6a..f347fb3fbba51 100644 --- a/vllm/attention/ops/merge_attn_states.py +++ b/vllm/attention/ops/merge_attn_states.py @@ -15,7 +15,7 @@ def merge_attn_states( output_lse: torch.Tensor | None = None, ) -> None: # NOTE(DefTruth): Currently, custom merge_attn_states CUDA kernel - # is not support for FP8 dtype, fallback to use Triton kernel. + # does not support FP8 dtype, fallback to use Triton kernel. def supported_dtypes(o: torch.Tensor) -> bool: return o.dtype in [torch.float32, torch.half, torch.bfloat16]