Upgrade transformers version to 4.36.0 (#2046)

This commit is contained in:
Woosuk Kwon 2023-12-11 18:39:14 -08:00 committed by GitHub
parent f3e024bece
commit cb3f30c600
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 5 additions and 5 deletions

View File

@ -10,7 +10,7 @@ numpy
tokenizers>=0.15.0
huggingface_hub<0.18,>=0.16.4
einops # Required for phi-1_5
transformers >= 4.34.0 # Required for Mistral.
transformers >= 4.36.0 # Required for Mixtral.
fastapi
uvicorn[standard]
pydantic == 1.10.13 # Required for OpenAI server.

View File

@ -7,7 +7,7 @@ sentencepiece # Required for LLaMA tokenizer.
numpy
einops # Required for phi-1_5
torch >= 2.1.1
transformers >= 4.34.0 # Required for Mistral.
transformers >= 4.36.0 # Required for Mixtral.
xformers >= 0.0.23 # Required for CUDA 12.1.
fastapi
uvicorn[standard]

View File

@ -29,7 +29,7 @@ import torch
import torch.nn.functional as F
from torch import nn
from transformers import MistralConfig
from transformers import MixtralConfig
try:
import megablocks.ops as ops
@ -395,7 +395,7 @@ class MixtralDecoderLayer(nn.Module):
def __init__(
self,
config: MistralConfig,
config: MixtralConfig,
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
@ -443,7 +443,7 @@ class MixtralForCausalLM(nn.Module):
def __init__(
self,
config: MistralConfig,
config: MixtralConfig,
linear_method: Optional[LinearMethodBase] = None,
) -> None:
super().__init__()