From f7389f4763c37579d249d0f9d80917e2ecfc4ead Mon Sep 17 00:00:00 2001 From: Zhuohan Li Date: Wed, 2 Aug 2023 16:45:12 -0700 Subject: [PATCH] [Doc] Add Baichuan 13B to supported models (#656) --- README.md | 2 +- docs/source/models/supported_models.rst | 4 ++-- vllm/model_executor/models/__init__.py | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 30746f0c7dad..7c4892a8f788 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ vLLM is flexible and easy to use with: vLLM seamlessly supports many Huggingface models, including the following architectures: -- Baichuan-7B (`baichuan-inc/Baichuan-7B`) +- Baichuan (`baichuan-inc/Baichuan-7B`, `baichuan-inc/Baichuan-13B-Chat`, etc.) - BLOOM (`bigscience/bloom`, `bigscience/bloomz`, etc.) - Falcon (`tiiuae/falcon-7b`, `tiiuae/falcon-40b`, `tiiuae/falcon-rw-7b`, etc.) - GPT-2 (`gpt2`, `gpt2-xl`, etc.) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 46f7c1198cf0..aca7cbc85cd3 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -15,8 +15,8 @@ Alongside each architecture, we include some popular models that use it. - Models - Example HuggingFace Models * - :code:`BaiChuanForCausalLM` - - Baichuan-7B - - :code:`baichuan-inc/Baichuan-7B`. + - Baichuan + - :code:`baichuan-inc/Baichuan-7B`, `baichuan-inc/Baichuan-13B-Chat`, etc. * - :code:`BloomForCausalLM` - BLOOM, BLOOMZ, BLOOMChat - :code:`bigscience/bloom`, :code:`bigscience/bloomz`, etc. diff --git a/vllm/model_executor/models/__init__.py b/vllm/model_executor/models/__init__.py index 787cb4789e13..6d61f95452c1 100644 --- a/vllm/model_executor/models/__init__.py +++ b/vllm/model_executor/models/__init__.py @@ -1,4 +1,5 @@ -from vllm.model_executor.models.baichuan import BaiChuanForCausalLM, BaichuanForCausalLM +from vllm.model_executor.models.baichuan import (BaiChuanForCausalLM, + BaichuanForCausalLM) from vllm.model_executor.models.bloom import BloomForCausalLM from vllm.model_executor.models.falcon import FalconForCausalLM from vllm.model_executor.models.gpt2 import GPT2LMHeadModel