diff --git a/vllm/config.py b/vllm/config.py index 085060535e2b..6cec97a5f11b 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -304,7 +304,7 @@ class ModelConfig: - 25.6k -> 25,600""" spec_target_max_model_len: Optional[int] = None """Specify the maximum length for spec decoding draft models.""" - quantization: Optional[QuantizationMethods] = None + quantization: SkipValidation[Optional[QuantizationMethods]] = None """Method used to quantize the weights. If `None`, we first check the `quantization_config` attribute in the model config file. If that is `None`, we assume the model weights are not quantized and use `dtype` to @@ -2231,7 +2231,7 @@ Device = Literal["auto", "cuda", "neuron", "cpu", "tpu", "xpu", "hpu"] class DeviceConfig: """Configuration for the device to use for vLLM execution.""" - device: Union[Device, torch.device] = "auto" + device: SkipValidation[Union[Device, torch.device]] = "auto" """Device type for vLLM execution. This parameter is deprecated and will be removed in a future release.