diff --git a/tests/v1/e2e/test_async_scheduling.py b/tests/v1/e2e/test_async_scheduling.py index 13b36c54123ce..5cef9b33c9984 100644 --- a/tests/v1/e2e/test_async_scheduling.py +++ b/tests/v1/e2e/test_async_scheduling.py @@ -152,8 +152,8 @@ def run_tests( m.setenv("VLLM_ATTENTION_BACKEND", "ROCM_AITER_FA") else: m.setenv("VLLM_ATTENTION_BACKEND", "FLEX_ATTENTION") - # lock matmul precision to full FP32 - m.setenv("VLLM_FLOAT32_MATMUL_PRECISION", "highest") + # lock matmul precision to full FP32 (IEEE) + m.setenv("VLLM_FLOAT32_MATMUL_PRECISION", "ieee") # m.setenv("VLLM_BATCH_INVARIANT", "1") outputs: list[tuple[str, list, list]] = [] for n, ( diff --git a/vllm/envs.py b/vllm/envs.py index 0cf0408054063..cb75ba1a62de9 100755 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -74,7 +74,7 @@ if TYPE_CHECKING: VLLM_MEDIA_CONNECTOR: str = "http" VLLM_TARGET_DEVICE: str = "cuda" VLLM_MAIN_CUDA_VERSION: str = "12.9" - VLLM_FLOAT32_MATMUL_PRECISION: Literal["highest", "high", "medium"] = "highest" + VLLM_FLOAT32_MATMUL_PRECISION: Literal["ieee", "tf32"] = "ieee" MAX_JOBS: str | None = None NVCC_THREADS: str | None = None VLLM_USE_PRECOMPILED: bool = False @@ -456,11 +456,13 @@ environment_variables: dict[str, Callable[[], Any]] = { "VLLM_MAIN_CUDA_VERSION": lambda: os.getenv("VLLM_MAIN_CUDA_VERSION", "").lower() or "12.9", # Controls PyTorch float32 matmul precision mode within vLLM workers. - # Valid options mirror torch.set_float32_matmul_precision + # Accepted values: + # - "ieee" (default): force full IEEE FP32 matmul precision. + # - "tf32": enable TensorFloat32-based fast matmul. "VLLM_FLOAT32_MATMUL_PRECISION": env_with_choices( "VLLM_FLOAT32_MATMUL_PRECISION", - "highest", - ["highest", "high", "medium"], + "ieee", + ["ieee", "tf32"], case_sensitive=False, ), # Maximum number of compilation jobs to run in parallel. diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index f2b6a1f76b0b9..25ac5aaf99818 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -81,7 +81,7 @@ class Worker(WorkerBase): # configure float32 matmul precision according to vLLM env. precision = envs.VLLM_FLOAT32_MATMUL_PRECISION - torch.set_float32_matmul_precision(precision) + torch.backends.cuda.matmul.fp32_precision = precision if self.model_config.trust_remote_code: # note: lazy import to avoid importing torch before initializing