diff --git a/requirements-rocm.txt b/requirements-rocm.txt index 42ab9d14450d..c2e0dc3f464f 100644 --- a/requirements-rocm.txt +++ b/requirements-rocm.txt @@ -10,7 +10,7 @@ numpy tokenizers>=0.15.0 huggingface_hub<0.18,>=0.16.4 einops # Required for phi-1_5 -transformers >= 4.34.0 # Required for Mistral. +transformers >= 4.36.0 # Required for Mixtral. fastapi uvicorn[standard] pydantic == 1.10.13 # Required for OpenAI server. diff --git a/requirements.txt b/requirements.txt index 5ec101a080e2..04b19b97babf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ sentencepiece # Required for LLaMA tokenizer. numpy einops # Required for phi-1_5 torch >= 2.1.1 -transformers >= 4.34.0 # Required for Mistral. +transformers >= 4.36.0 # Required for Mixtral. xformers >= 0.0.23 # Required for CUDA 12.1. fastapi uvicorn[standard] diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index c1fd26e9b759..3021ced88d07 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -29,7 +29,7 @@ import torch import torch.nn.functional as F from torch import nn -from transformers import MistralConfig +from transformers import MixtralConfig try: import megablocks.ops as ops @@ -395,7 +395,7 @@ class MixtralDecoderLayer(nn.Module): def __init__( self, - config: MistralConfig, + config: MixtralConfig, ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -443,7 +443,7 @@ class MixtralForCausalLM(nn.Module): def __init__( self, - config: MistralConfig, + config: MixtralConfig, linear_method: Optional[LinearMethodBase] = None, ) -> None: super().__init__()