Add data_parallel_size to VllmConfig string representation (#24298)

Co-authored-by: Cong Chen <congc@meta.com>
This commit is contained in:
cong-meta 2025-09-08 21:35:18 -07:00 committed by GitHub
parent 82dfb12e52
commit b2f7745774
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -3790,6 +3790,7 @@ class VllmConfig:
f"load_format={self.load_config.load_format}, " f"load_format={self.load_config.load_format}, "
f"tensor_parallel_size={self.parallel_config.tensor_parallel_size}, " # noqa f"tensor_parallel_size={self.parallel_config.tensor_parallel_size}, " # noqa
f"pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, " # noqa f"pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, " # noqa
f"data_parallel_size={self.parallel_config.data_parallel_size}, " # noqa
f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, " # noqa f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, " # noqa
f"quantization={self.model_config.quantization}, " f"quantization={self.model_config.quantization}, "
f"enforce_eager={self.model_config.enforce_eager}, " f"enforce_eager={self.model_config.enforce_eager}, "