Remove checks for None for fields which should never be None (#17985)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor 2025-05-28 22:32:19 +01:00 committed by GitHub
parent 643622ba46
commit 6dbe5b5c93
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -4282,25 +4282,22 @@ class VllmConfig:
self.model_config.verify_dual_chunk_attention_config(
self.load_config)
if self.cache_config is not None:
self.cache_config.verify_with_parallel_config(self.parallel_config)
self.cache_config.verify_with_parallel_config(self.parallel_config)
if self.lora_config:
if self.lora_config is not None:
self.lora_config.verify_with_cache_config(self.cache_config)
self.lora_config.verify_with_model_config(self.model_config)
self.lora_config.verify_lora_support()
if self.prompt_adapter_config:
if self.prompt_adapter_config is not None:
self.prompt_adapter_config.verify_with_model_config(
self.model_config)
if self.quant_config is None and \
self.model_config is not None and self.load_config is not None:
if self.quant_config is None and self.model_config is not None:
self.quant_config = VllmConfig._get_quantization_config(
self.model_config, self.load_config)
from vllm.platforms import current_platform
if self.scheduler_config is not None and \
self.model_config is not None and \
if self.model_config is not None and \
self.scheduler_config.chunked_prefill_enabled and \
self.model_config.dtype == torch.float32 and \
current_platform.get_device_capability() == (7, 5):
@ -4335,8 +4332,7 @@ class VllmConfig:
self._set_cudagraph_sizes()
if self.cache_config is not None and \
self.cache_config.cpu_offload_gb > 0 and \
if self.cache_config.cpu_offload_gb > 0 and \
self.compilation_config.level != CompilationLevel.NO_COMPILATION \
and not envs.VLLM_USE_V1:
logger.warning(
@ -4358,16 +4354,16 @@ class VllmConfig:
"full_cuda_graph is not supported with "
"cascade attention. Disabling cascade attention.")
self.model_config.disable_cascade_attn = True
if self.cache_config is not None:
self.cache_config.enable_prefix_caching = False
self.cache_config.enable_prefix_caching = False
if (self.kv_events_config
if (self.kv_events_config is not None
and self.kv_events_config.enable_kv_cache_events
and not self.cache_config.enable_prefix_caching):
logger.warning(
"KV cache events are on, but prefix caching is not enabled."
"Use --enable-prefix-caching to enable.")
if (self.kv_events_config and self.kv_events_config.publisher != "null"
if (self.kv_events_config is not None
and self.kv_events_config.publisher != "null"
and not self.kv_events_config.enable_kv_cache_events):
logger.warning("KV cache events are disabled,"
"but the scheduler is configured to publish them."