mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-23 16:14:37 +08:00
[Quantization] Enable compressed-tensors AWQ for Turing GPU (#29732)
Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn>
This commit is contained in:
parent
a491b0911b
commit
e1464c3a08
@ -79,8 +79,8 @@ class CompressedTensorsWNA16(CompressedTensorsScheme):
|
||||
|
||||
@classmethod
|
||||
def get_min_capability(cls) -> int:
|
||||
# ampere and up
|
||||
return 80
|
||||
# Turing and up
|
||||
return 75
|
||||
|
||||
def create_weights(
|
||||
self,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user