[Misc] refactor code by import as for torch._inductor.config (#23677)

Signed-off-by: Andy Xie <andy.xning@gmail.com>
This commit is contained in:
Ning Xie 2025-09-01 14:05:42 +08:00 committed by GitHub
parent ff0e59d83a
commit 499b074bfd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -138,14 +138,14 @@ def _torch_cuda_wrapper():
@contextmanager @contextmanager
def _set_global_compilation_settings(config: VllmConfig): def _set_global_compilation_settings(config: VllmConfig):
import torch._inductor.config import torch._inductor.config as torch_inductor_config
inductor_config = config.compilation_config.inductor_compile_config inductor_config = config.compilation_config.inductor_compile_config
# Note: The MKLDNN and CPPGEMM backend requires freezing parameters. # Note: The MKLDNN and CPPGEMM backend requires freezing parameters.
freezing_value = torch._inductor.config.freezing freezing_value = torch_inductor_config.freezing
try: try:
if inductor_config.get("max_autotune", False): if inductor_config.get("max_autotune", False):
torch._inductor.config.freezing = True torch_inductor_config.freezing = True
yield yield
finally: finally:
torch._inductor.config.freezing = freezing_value torch_inductor_config.freezing = freezing_value