mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 02:25:01 +08:00
40 lines
1.6 KiB
Python
40 lines
1.6 KiB
Python
from vllm.model_executor.models.aquila import AquilaForCausalLM
|
|
from vllm.model_executor.models.baichuan import (BaiChuanForCausalLM,
|
|
BaichuanForCausalLM)
|
|
from vllm.model_executor.models.bloom import BloomForCausalLM
|
|
from vllm.model_executor.models.falcon import FalconForCausalLM
|
|
from vllm.model_executor.models.gpt2 import GPT2LMHeadModel
|
|
from vllm.model_executor.models.gpt_bigcode import GPTBigCodeForCausalLM
|
|
from vllm.model_executor.models.gpt_j import GPTJForCausalLM
|
|
from vllm.model_executor.models.gpt_neox import GPTNeoXForCausalLM
|
|
from vllm.model_executor.models.internlm import InternLMForCausalLM
|
|
from vllm.model_executor.models.llama import LlamaForCausalLM
|
|
from vllm.model_executor.models.mistral import MistralForCausalLM
|
|
from vllm.model_executor.models.mpt import MPTForCausalLM
|
|
from vllm.model_executor.models.opt import OPTForCausalLM
|
|
from vllm.model_executor.models.phi_1_5 import PhiForCausalLM
|
|
from vllm.model_executor.models.qwen import QWenLMHeadModel
|
|
from vllm.model_executor.models.chatglm import ChatGLMForCausalLM
|
|
from vllm.model_executor.models.yi import YiForCausalLM
|
|
|
|
__all__ = [
|
|
"AquilaForCausalLM",
|
|
"BaiChuanForCausalLM",
|
|
"BaichuanForCausalLM",
|
|
"BloomForCausalLM",
|
|
"ChatGLMForCausalLM",
|
|
"FalconForCausalLM",
|
|
"GPT2LMHeadModel",
|
|
"GPTBigCodeForCausalLM",
|
|
"GPTJForCausalLM",
|
|
"GPTNeoXForCausalLM",
|
|
"InternLMForCausalLM",
|
|
"LlamaForCausalLM",
|
|
"MPTForCausalLM",
|
|
"OPTForCausalLM",
|
|
"PhiForCausalLM",
|
|
"QWenLMHeadModel",
|
|
"MistralForCausalLM",
|
|
"YiForCausalLM",
|
|
]
|