mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-14 18:25:01 +08:00
Fix lora tests failure in TPU CI due to the removal of LoRA bias (#26723)
Signed-off-by: Xiongfei Wei <isaacwxf23@gmail.com>
This commit is contained in:
parent
2e36cdbe2b
commit
4497c8f821
@ -2128,12 +2128,11 @@ def replace_set_lora(model):
|
|||||||
lora_a: torch.Tensor,
|
lora_a: torch.Tensor,
|
||||||
lora_b: torch.Tensor,
|
lora_b: torch.Tensor,
|
||||||
embeddings_tensor: torch.Tensor | None,
|
embeddings_tensor: torch.Tensor | None,
|
||||||
bias: torch.Tensor | None = None,
|
|
||||||
):
|
):
|
||||||
# TODO: The integer index leads to a recompilation, but converting it
|
# TODO: The integer index leads to a recompilation, but converting it
|
||||||
# to a tensor doesn't seem to work anymore. This might be fixed with a
|
# to a tensor doesn't seem to work anymore. This might be fixed with a
|
||||||
# later release of torch_xla.
|
# later release of torch_xla.
|
||||||
self._original_set_lora(index, lora_a, lora_b, embeddings_tensor, bias)
|
self._original_set_lora(index, lora_a, lora_b, embeddings_tensor)
|
||||||
torch_xla.sync(wait=False)
|
torch_xla.sync(wait=False)
|
||||||
|
|
||||||
def _tpu_reset_lora(self, index: int):
|
def _tpu_reset_lora(self, index: int):
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user