Various cosmetic/comment fixes (#12089)

Signed-off-by: mgoin <michael@neuralmagic.com>
This commit is contained in:
Michael Goin 2025-01-16 04:59:06 -05:00 committed by GitHub
parent f8ef146f03
commit 9aa1519f08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 16 additions and 29 deletions

View File

@ -42,7 +42,7 @@ class CompressedTensors24(CompressedTensorsScheme):
if not sparse_cutlass_supported():
raise ValueError(
"Sparse CUTLASS not supported. vLLM must be built with"
"Sparse CUTLASS not supported. vLLM must be built with "
"CUDA 12.2 or later to use this feature")
self.output_dtype = params_dtype

View File

@ -390,8 +390,7 @@ class AriaMoELMModel(LlamaModel):
continue
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -440,8 +440,7 @@ class CohereForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -452,8 +452,7 @@ class DbrxForCausalLM(nn.Module, SupportsPP):
for name, loaded_weight in weights:
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -533,8 +533,7 @@ class ExaoneForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
continue
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -316,8 +316,7 @@ class GPTJForCausalLM(nn.Module, SupportsPP):
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -475,8 +475,7 @@ class GraniteForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
continue
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -105,9 +105,9 @@ class LlamaAttention(nn.Module):
max_position_embeddings: int = 8192,
quant_config: Optional[QuantizationConfig] = None,
bias: bool = False,
bias_o_proj: bool = False,
cache_config: Optional[CacheConfig] = None,
prefix: str = "",
bias_o_proj: bool = False) -> None:
prefix: str = "") -> None:
super().__init__()
layer_idx = extract_layer_index(prefix)
self.hidden_size = hidden_size
@ -397,8 +397,7 @@ class LlamaModel(nn.Module):
continue
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -431,8 +431,7 @@ class MixtralForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -1432,8 +1432,7 @@ class MllamaForConditionalGeneration(nn.Module, SupportsMultiModal):
loaded_weight = loaded_weight.view(loaded_weight.shape[0], -1)
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -492,8 +492,7 @@ class NemotronForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
continue
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -626,8 +626,7 @@ class PhiMoEForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -367,8 +367,7 @@ class Qwen2Model(nn.Module):
continue
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)

View File

@ -492,8 +492,7 @@ class SolarForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
continue
if (self.quant_config is not None and
(scale_name := self.quant_config.get_cache_scale(name))):
# Loading kv cache scales for quark and
# compressed-tensors quantization
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)