[Kernel] Add punica dimension for Baichuan-13B (#4053)

This commit is contained in:
Jee Li 2024-04-13 22:55:05 +08:00 committed by GitHub
parent 0a430b4ae2
commit 989ae2538d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 3 additions and 1 deletions

View File

@ -47,6 +47,7 @@ void bgmv_kernel(out_T *__restrict__ Y, const in_T *__restrict__ X,
f(in_T, out_T, W_T, narrow, 13696) \
f(in_T, out_T, W_T, narrow, 13824) \
f(in_T, out_T, W_T, narrow, 14336) \
f(in_T, out_T, W_T, narrow, 15360) \
f(in_T, out_T, W_T, narrow, 16384) \
f(in_T, out_T, W_T, narrow, 20480) \
f(in_T, out_T, W_T, narrow, 22016) \

View File

@ -62,7 +62,7 @@ def test_baichuan_lora(baichuan_lora_files):
@pytest.mark.skip("Requires multiple GPUs")
def test_llama_tensor_parallel_equality(baichuan_lora_files):
def test_baichuan_tensor_parallel_equality(baichuan_lora_files):
# Cannot use as it will initialize torch.cuda too early...
# if torch.cuda.device_count() < 4:
# pytest.skip(f"Not enough GPUs for tensor parallelism {4}")

View File

@ -72,6 +72,7 @@ H1 = H2 = [
11008,
13824,
14336,
15360,
22016,
24576,
27392,