Skip device and quant Pydantic validation to make plugin device work (#18843)

Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
This commit is contained in:
Yikun Jiang 2025-05-29 11:12:30 +08:00 committed by GitHub
parent 1661a9c28f
commit 3c49dbdd03
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -304,7 +304,7 @@ class ModelConfig:
- 25.6k -> 25,600"""
spec_target_max_model_len: Optional[int] = None
"""Specify the maximum length for spec decoding draft models."""
quantization: Optional[QuantizationMethods] = None
quantization: SkipValidation[Optional[QuantizationMethods]] = None
"""Method used to quantize the weights. If `None`, we first check the
`quantization_config` attribute in the model config file. If that is
`None`, we assume the model weights are not quantized and use `dtype` to
@ -2231,7 +2231,7 @@ Device = Literal["auto", "cuda", "neuron", "cpu", "tpu", "xpu", "hpu"]
class DeviceConfig:
"""Configuration for the device to use for vLLM execution."""
device: Union[Device, torch.device] = "auto"
device: SkipValidation[Union[Device, torch.device]] = "auto"
"""Device type for vLLM execution.
This parameter is deprecated and will be
removed in a future release.