[torch.compile] Adding torch compile annotations to some models (#9614)

This commit is contained in:
Yongzao 2024-10-24 01:07:48 +08:00 committed by GitHub
parent fd0e2cfdb2
commit 9013e24f7b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 12 additions and 0 deletions

View File

@ -26,6 +26,7 @@ from torch import nn
from transformers import PretrainedConfig
from vllm.attention import Attention, AttentionMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, LoRAConfig
from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size)
@ -250,6 +251,7 @@ class BaiChuanDecoderLayer(nn.Module):
return hidden_states, residual
@support_torch_compile
class BaiChuanModel(nn.Module):
def __init__(self,

View File

@ -24,6 +24,7 @@ from torch import nn
from transformers import BloomConfig
from vllm.attention import Attention, AttentionMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig
from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size)
@ -218,6 +219,7 @@ class BloomBlock(nn.Module):
return output
@support_torch_compile
class BloomModel(nn.Module):
def __init__(

View File

@ -28,6 +28,7 @@ from torch import nn
from transformers import CohereConfig
from vllm.attention import Attention, AttentionMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, LoRAConfig
from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
from vllm.model_executor.layers.activation import SiluAndMul
@ -250,6 +251,7 @@ class CohereDecoderLayer(nn.Module):
return hidden_states, residual
@support_torch_compile
class CohereModel(nn.Module):
def __init__(

View File

@ -29,6 +29,7 @@ import torch
from torch import nn
from vllm.attention import Attention, AttentionMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, LoRAConfig
from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size)
@ -311,6 +312,7 @@ class ExaoneDecoderLayer(nn.Module):
return hidden_states, residual
@support_torch_compile
class ExaoneModel(nn.Module):
def __init__(

View File

@ -22,6 +22,7 @@ from torch import nn
from transformers import GemmaConfig
from vllm.attention import Attention, AttentionMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, LoRAConfig
from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
from vllm.logger import init_logger
@ -239,6 +240,7 @@ class GemmaDecoderLayer(nn.Module):
return hidden_states, residual
@support_torch_compile
class GemmaModel(nn.Module):
def __init__(

View File

@ -24,6 +24,7 @@ from torch import nn
from transformers import GPT2Config
from vllm.attention import Attention, AttentionMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig
from vllm.distributed.parallel_state import (
get_pp_group, get_tensor_model_parallel_world_size)
@ -182,6 +183,7 @@ class GPT2Block(nn.Module):
return hidden_states
@support_torch_compile
class GPT2Model(nn.Module):
def __init__(