[Misc] Minor fix in KVCache type (#3652)

This commit is contained in:
Woosuk Kwon 2024-03-26 23:14:06 -07:00 committed by GitHub
parent 76879342a3
commit e66b629c04
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 4 additions and 8 deletions

View File

@ -56,8 +56,8 @@ Next, you need to rewrite the :code:`forward` methods of your model by following
- return_dict: Optional[bool] = None, - return_dict: Optional[bool] = None,
-) -> Union[Tuple, CausalLMOutputWithPast]: -) -> Union[Tuple, CausalLMOutputWithPast]:
+ positions: torch.Tensor, + positions: torch.Tensor,
+ kv_caches: List[KVCache], + kv_caches: List[torch.Tensor],
+ input_metadata: InputMetadata, + attn_metadata: AttentionMetadata,
+) -> Optional[SamplerOutput]: +) -> Optional[SamplerOutput]:
1. Update the code by considering that :code:`input_ids` and :code:`positions` are now flattened tensors. 1. Update the code by considering that :code:`input_ids` and :code:`positions` are now flattened tensors.

View File

@ -1,4 +1,4 @@
from typing import List, Optional, Tuple from typing import List, Optional
import torch import torch
from torch import nn from torch import nn
@ -19,8 +19,6 @@ from vllm.model_executor.weight_utils import (default_weight_loader,
hf_model_weights_iterator) hf_model_weights_iterator)
from vllm.sequence import SamplerOutput from vllm.sequence import SamplerOutput
KVCache = Tuple[torch.Tensor, torch.Tensor]
_KEYS_TO_MODIFY_MAPPING = { _KEYS_TO_MODIFY_MAPPING = {
"language_model.lm_head": "lm_head", "language_model.lm_head": "lm_head",
"language_model.model": "language_model", "language_model.model": "language_model",
@ -102,7 +100,7 @@ class LlavaForConditionalGeneration(nn.Module):
self, self,
input_ids: torch.Tensor, input_ids: torch.Tensor,
positions: torch.Tensor, positions: torch.Tensor,
kv_caches: List[KVCache], kv_caches: List[torch.Tensor],
attn_metadata: AttentionMetadata, attn_metadata: AttentionMetadata,
image_input: Optional[torch.Tensor] = None image_input: Optional[torch.Tensor] = None
) -> SamplerOutput: # noqa: E501 ) -> SamplerOutput: # noqa: E501

View File

@ -14,8 +14,6 @@ from vllm.utils import (async_tensor_h2d, is_pin_memory_available,
logger = init_logger(__name__) logger = init_logger(__name__)
KVCache = Tuple[torch.Tensor, torch.Tensor]
class NeuronModelRunner: class NeuronModelRunner: