[Bugfix] Migrate to REGEX Library to prevent catastrophic backtracking (#18454)

Signed-off-by: Crucifixion-Fxl <xmufxl@gmail.com>
Co-authored-by: Crucifixion-Fxl <xmufxl@gmail.com>
This commit is contained in:
Feng XiaoLong 2025-05-24 07:16:26 +08:00 committed by GitHub
parent f2036734fb
commit 4fc1bf813a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
52 changed files with 62 additions and 58 deletions

2
.github/scripts/cleanup_pr_body.sh vendored Executable file → Normal file
View File

@ -26,7 +26,7 @@ sed -i '/\*\*BEFORE SUBMITTING, PLEASE READ.*\*\*/,$d' "${NEW}"
# Remove HTML <details> section that includes <summary> text of "PR Checklist (Click to Expand)" # Remove HTML <details> section that includes <summary> text of "PR Checklist (Click to Expand)"
python3 - <<EOF python3 - <<EOF
import re import regex as re
with open("${NEW}", "r") as file: with open("${NEW}", "r") as file:
content = file.read() content = file.read()

View File

@ -672,7 +672,7 @@ async def benchmark(
def evaluate(ret, args): def evaluate(ret, args):
def _eval_correctness_json(expected, actual): def _eval_correctness_json(expected, actual):
# extract json string from string using regex # extract json string from string using regex
import re import regex as re
actual = actual.replace("\n", "").replace(" ", "").strip() actual = actual.replace("\n", "").replace(" ", "").strip()
try: try:
@ -687,7 +687,7 @@ def evaluate(ret, args):
return actual in args.choice return actual in args.choice
def _eval_correctness_regex(expected, actual): def _eval_correctness_regex(expected, actual):
import re import regex as re
return re.match(args.regex, actual) is not None return re.match(args.regex, actual) is not None

View File

@ -2,11 +2,11 @@
import math import math
import pickle import pickle
import re
from collections import defaultdict from collections import defaultdict
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import pandas as pd import pandas as pd
import regex as re
import seaborn as sns import seaborn as sns
from torch.utils.benchmark import Measurement as TMeasurement from torch.utils.benchmark import Measurement as TMeasurement

View File

@ -20,12 +20,12 @@ python prithvi_geospatial_mae.py
import argparse import argparse
import datetime import datetime
import os import os
import re
from typing import Union from typing import Union
import albumentations import albumentations
import numpy as np import numpy as np
import rasterio import rasterio
import regex as re
import torch import torch
from einops import rearrange from einops import rearrange
from terratorch.datamodules import Sen1Floods11NonGeoDataModule from terratorch.datamodules import Sen1Floods11NonGeoDataModule

View File

@ -8,6 +8,7 @@ requires = [
"setuptools-scm>=8.0", "setuptools-scm>=8.0",
"torch == 2.7.0", "torch == 2.7.0",
"wheel", "wheel",
"regex",
"jinja2", "jinja2",
] ]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"

View File

@ -7,3 +7,4 @@ setuptools-scm>=8
torch==2.7.0 torch==2.7.0
wheel wheel
jinja2>=3.1.6 jinja2>=3.1.6
regex

View File

@ -1,3 +1,4 @@
regex # Replace re for higher-performance regex matching
cachetools cachetools
psutil psutil
sentencepiece # Required for LLaMA tokenizer. sentencepiece # Required for LLaMA tokenizer.

3
setup.py Executable file → Normal file
View File

@ -5,12 +5,12 @@ import importlib.util
import json import json
import logging import logging
import os import os
import re
import subprocess import subprocess
import sys import sys
from pathlib import Path from pathlib import Path
from shutil import which from shutil import which
import regex as re
import torch import torch
from packaging.version import Version, parse from packaging.version import Version, parse
from setuptools import Extension, setup from setuptools import Extension, setup
@ -389,7 +389,6 @@ class repackage_wheel(build_ext):
# vllm_flash_attn python code: # vllm_flash_attn python code:
# Regex from # Regex from
# `glob.translate('vllm/vllm_flash_attn/**/*.py', recursive=True)` # `glob.translate('vllm/vllm_flash_attn/**/*.py', recursive=True)`
import re
compiled_regex = re.compile( compiled_regex = re.compile(
r"vllm/vllm_flash_attn/(?:[^/.][^/]*/)*(?!\.)[^/]*\.py") r"vllm/vllm_flash_attn/(?:[^/.][^/]*/)*(?!\.)[^/]*\.py")
file_members += list( file_members += list(

View File

@ -1,12 +1,12 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
import weakref import weakref
from enum import Enum from enum import Enum
import jsonschema import jsonschema
import pytest import pytest
import regex as re
from pydantic import BaseModel from pydantic import BaseModel
from vllm.distributed import cleanup_dist_env_and_memory from vllm.distributed import cleanup_dist_env_and_memory

View File

@ -2,13 +2,13 @@
# imports for guided decoding tests # imports for guided decoding tests
import json import json
import re
from typing import Optional from typing import Optional
import jsonschema import jsonschema
import openai # use the official client for correctness check import openai # use the official client for correctness check
import pytest import pytest
import pytest_asyncio import pytest_asyncio
import regex as re
import requests import requests
import torch import torch
from openai import BadRequestError, OpenAI from openai import BadRequestError, OpenAI

View File

@ -1,8 +1,6 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# imports for guided decoding tests # imports for guided decoding tests
import json import json
import re
import shutil import shutil
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import Optional from typing import Optional
@ -11,6 +9,7 @@ import jsonschema
import openai # use the official client for correctness check import openai # use the official client for correctness check
import pytest import pytest
import pytest_asyncio import pytest_asyncio
import regex as re
# downloading lora to test lora requests # downloading lora to test lora requests
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
from openai import BadRequestError from openai import BadRequestError

View File

@ -1,10 +1,9 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# imports for guided decoding tests # imports for guided decoding tests
import re
import openai import openai
import pytest import pytest
import regex as re
from ...utils import RemoteOpenAIServer from ...utils import RemoteOpenAIServer
@ -32,7 +31,7 @@ async def test_out_of_vocab_token_ids():
client = remote_server.get_async_client() client = remote_server.get_async_client()
with pytest.raises(openai.BadRequestError, with pytest.raises(openai.BadRequestError,
match=re.compile('.*out of vocabulary.*')): match=re.compile('.*out of vocabulary.*').pattern):
await client.completions.create(model=model_name, await client.completions.create(model=model_name,
prompt=[999999], prompt=[999999],
max_tokens=5, max_tokens=5,
@ -46,9 +45,10 @@ async def test_reject_multistep_with_guided_decoding():
with RemoteOpenAIServer(model_name, server_args) as remote_server: with RemoteOpenAIServer(model_name, server_args) as remote_server:
client = remote_server.get_async_client() client = remote_server.get_async_client()
with pytest.raises(openai.BadRequestError, with pytest.raises(
openai.BadRequestError,
match=re.compile( match=re.compile(
'.*Guided decoding .* multi-step decoding.*')): '.*Guided decoding .* multi-step decoding.*').pattern):
await client.completions.create( await client.completions.create(
model=model_name, model=model_name,
prompt="Hello", prompt="Hello",

View File

@ -1,12 +1,12 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import os import os
import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Optional from typing import Optional
import librosa import librosa
import pytest import pytest
import regex as re
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
from transformers import AutoTokenizer from transformers import AutoTokenizer

View File

@ -3,11 +3,11 @@
for manipulating the input / output of HF & vLLM test runners, which are for manipulating the input / output of HF & vLLM test runners, which are
typically specific to a small subset of models. typically specific to a small subset of models.
""" """
import re
import types import types
from pathlib import PosixPath from pathlib import PosixPath
from typing import Optional, Union from typing import Optional, Union
import regex as re
import torch import torch
from PIL.Image import Image from PIL.Image import Image
from transformers import (AutoConfig, AutoTokenizer, BatchFeature, from transformers import (AutoConfig, AutoTokenizer, BatchFeature,

View File

@ -1,10 +1,10 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
from copy import deepcopy from copy import deepcopy
from unittest.mock import MagicMock from unittest.mock import MagicMock
import pytest import pytest
import regex as re
from pydantic import TypeAdapter from pydantic import TypeAdapter
from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,

View File

@ -4,12 +4,12 @@
from __future__ import annotations from __future__ import annotations
import json import json
import re
from enum import Enum from enum import Enum
from typing import TYPE_CHECKING, Any from typing import TYPE_CHECKING, Any
import jsonschema import jsonschema
import pytest import pytest
import regex as re
from pydantic import BaseModel from pydantic import BaseModel
from tests.reasoning.utils import run_reasoning_extraction from tests.reasoning.utils import run_reasoning_extraction

View File

@ -1,11 +1,11 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re
from typing import Optional from typing import Optional
import openai # use the official client for correctness check import openai # use the official client for correctness check
import pytest import pytest
import pytest_asyncio import pytest_asyncio
import regex as re
from openai import BadRequestError from openai import BadRequestError
from tests.utils import RemoteOpenAIServer from tests.utils import RemoteOpenAIServer

View File

@ -1,9 +1,10 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re
from enum import Enum from enum import Enum
from typing import Optional from typing import Optional
import regex as re
from vllm import CompletionOutput from vllm import CompletionOutput

View File

@ -6,7 +6,6 @@ import enum
import hashlib import hashlib
import inspect import inspect
import json import json
import re
import textwrap import textwrap
import uuid import uuid
import warnings import warnings
@ -20,6 +19,7 @@ from pathlib import Path
from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Literal, Optional, from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Literal, Optional,
Protocol, TypeVar, Union, cast, get_args, get_origin) Protocol, TypeVar, Union, cast, get_args, get_origin)
import regex as re
import torch import torch
from torch.distributed import ProcessGroup, ReduceOp from torch.distributed import ProcessGroup, ReduceOp
from transformers import PretrainedConfig from transformers import PretrainedConfig

View File

@ -4,7 +4,6 @@
import argparse import argparse
import dataclasses import dataclasses
import json import json
import re
import sys import sys
import threading import threading
import warnings import warnings
@ -13,6 +12,7 @@ from itertools import permutations
from typing import (Annotated, Any, Callable, Dict, List, Literal, Optional, from typing import (Annotated, Any, Callable, Dict, List, Literal, Optional,
Type, TypeVar, Union, cast, get_args, get_origin) Type, TypeVar, Union, cast, get_args, get_origin)
import regex as re
import torch import torch
from typing_extensions import TypeIs, deprecated from typing_extensions import TypeIs, deprecated

View File

@ -7,7 +7,6 @@ import importlib
import inspect import inspect
import multiprocessing import multiprocessing
import os import os
import re
import signal import signal
import socket import socket
import tempfile import tempfile
@ -21,6 +20,7 @@ from json import JSONDecodeError
from typing import Annotated, Optional, Union from typing import Annotated, Optional, Union
import prometheus_client import prometheus_client
import regex as re
import uvloop import uvloop
from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Request from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Request
from fastapi.exceptions import RequestValidationError from fastapi.exceptions import RequestValidationError

View File

@ -3,11 +3,11 @@
# Adapted from # Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py # https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py
import json import json
import re
import time import time
from http import HTTPStatus from http import HTTPStatus
from typing import Annotated, Any, ClassVar, Literal, Optional, Union from typing import Annotated, Any, ClassVar, Literal, Optional, Union
import regex as re
import torch import torch
from fastapi import HTTPException, UploadFile from fastapi import HTTPException, UploadFile
from pydantic import (BaseModel, ConfigDict, Field, TypeAdapter, from pydantic import (BaseModel, ConfigDict, Field, TypeAdapter,

View File

@ -2,7 +2,6 @@
import asyncio import asyncio
import json import json
import re
import time import time
from collections.abc import AsyncGenerator, AsyncIterator from collections.abc import AsyncGenerator, AsyncIterator
from collections.abc import Sequence as GenericSequence from collections.abc import Sequence as GenericSequence
@ -10,6 +9,7 @@ from typing import Callable, Final, Optional, Union
import jinja2 import jinja2
import partial_json_parser import partial_json_parser
import regex as re
from fastapi import Request from fastapi import Request
from pydantic import TypeAdapter from pydantic import TypeAdapter

View File

@ -1,9 +1,10 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Union from typing import Union
import regex as re
from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
DeltaFunctionCall, DeltaMessage, DeltaFunctionCall, DeltaMessage,
DeltaToolCall, DeltaToolCall,

View File

@ -1,12 +1,12 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
from collections.abc import Sequence from collections.abc import Sequence
from json import JSONDecoder from json import JSONDecoder
from typing import Union from typing import Union
import partial_json_parser import partial_json_parser
import regex as re
from partial_json_parser.core.options import Allow from partial_json_parser.core.options import Allow
from vllm.entrypoints.chat_utils import random_tool_call_id from vllm.entrypoints.chat_utils import random_tool_call_id

View File

@ -1,11 +1,11 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Union from typing import Union
import partial_json_parser import partial_json_parser
import regex as re
from partial_json_parser.core.options import Allow from partial_json_parser.core.options import Allow
from vllm.entrypoints.chat_utils import random_tool_call_id from vllm.entrypoints.chat_utils import random_tool_call_id

View File

@ -1,11 +1,11 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Union from typing import Union
import partial_json_parser import partial_json_parser
import regex as re
from partial_json_parser.core.options import Allow from partial_json_parser.core.options import Allow
from vllm.entrypoints.chat_utils import random_tool_call_id from vllm.entrypoints.chat_utils import random_tool_call_id

View File

@ -1,12 +1,12 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
from collections.abc import Sequence from collections.abc import Sequence
from json import JSONDecoder from json import JSONDecoder
from typing import Union from typing import Union
import partial_json_parser import partial_json_parser
import regex as re
from partial_json_parser.core.options import Allow from partial_json_parser.core.options import Allow
from transformers import PreTrainedTokenizerBase from transformers import PreTrainedTokenizerBase

View File

@ -1,13 +1,13 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
from collections.abc import Sequence from collections.abc import Sequence
from random import choices from random import choices
from string import ascii_letters, digits from string import ascii_letters, digits
from typing import Union from typing import Union
import partial_json_parser import partial_json_parser
import regex as re
from partial_json_parser.core.options import Allow from partial_json_parser.core.options import Allow
from pydantic import Field from pydantic import Field

View File

@ -1,10 +1,10 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Any, Optional from typing import Any, Optional
import regex as re
from transformers import PreTrainedTokenizerBase from transformers import PreTrainedTokenizerBase
from vllm.entrypoints.chat_utils import random_tool_call_id from vllm.entrypoints.chat_utils import random_tool_call_id

View File

@ -2,10 +2,10 @@
import ast import ast
import json import json
import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Any, Union from typing import Any, Union
import regex as re
from transformers import PreTrainedTokenizerBase from transformers import PreTrainedTokenizerBase
from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,

View File

@ -3,11 +3,11 @@
import copy import copy
import math import math
import os import os
import re
from collections.abc import Sequence from collections.abc import Sequence
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Any, Callable, Optional, Union from typing import Any, Callable, Optional, Union
import regex as re
import safetensors.torch import safetensors.torch
import torch import torch
from torch import nn from torch import nn

View File

@ -1,10 +1,10 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import os import os
import re
from typing import Optional, Union from typing import Optional, Union
import huggingface_hub import huggingface_hub
import regex as re
from huggingface_hub.utils import (EntryNotFoundError, HfHubHTTPError, from huggingface_hub.utils import (EntryNotFoundError, HfHubHTTPError,
HFValidationError, RepositoryNotFoundError) HFValidationError, RepositoryNotFoundError)
from torch import nn from torch import nn

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re import regex as re
def has_xgrammar_unsupported_json_features(schema: dict) -> bool: def has_xgrammar_unsupported_json_features(schema: dict) -> bool:

View File

@ -4,10 +4,10 @@
from __future__ import annotations from __future__ import annotations
import json import json
import re
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any from typing import TYPE_CHECKING, Any
import regex as re
import torch import torch
import vllm.envs import vllm.envs

View File

@ -1,10 +1,10 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re
from collections.abc import Iterable, Mapping from collections.abc import Iterable, Mapping
from types import MappingProxyType from types import MappingProxyType
from typing import Optional from typing import Optional
import regex as re
from compressed_tensors import CompressionFormat from compressed_tensors import CompressionFormat
from torch.nn import Module from torch.nn import Module

View File

@ -228,7 +228,7 @@ class ModelOptNvFp4Config(QuantizationConfig):
exclude_modules, group_size) exclude_modules, group_size)
def is_layer_excluded(self, prefix: str, exclude_modules: list): def is_layer_excluded(self, prefix: str, exclude_modules: list):
import re import regex as re
for pattern in exclude_modules: for pattern in exclude_modules:
regex_str = pattern.replace('.', r'\.').replace('*', r'.*') regex_str = pattern.replace('.', r'\.').replace('*', r'.*')
if re.fullmatch(regex_str, prefix): if re.fullmatch(regex_str, prefix):

View File

@ -1,10 +1,11 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re
from collections.abc import Iterable, Mapping from collections.abc import Iterable, Mapping
from types import MappingProxyType from types import MappingProxyType
from typing import Any, Optional from typing import Any, Optional
import regex as re
def deep_compare(dict1: Any, dict2: Any) -> bool: def deep_compare(dict1: Any, dict2: Any) -> bool:
if type(dict1) is not type(dict2): if type(dict1) is not type(dict2):

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re
from copy import deepcopy from copy import deepcopy
from typing import Optional, Union from typing import Optional, Union
import regex as re
import torch import torch
from vllm.config import QuantizationConfig from vllm.config import QuantizationConfig

View File

@ -7,7 +7,6 @@ import dataclasses
import io import io
import json import json
import os import os
import re
import threading import threading
import time import time
from collections.abc import Generator from collections.abc import Generator
@ -15,6 +14,7 @@ from dataclasses import dataclass
from functools import partial from functools import partial
from typing import Any, BinaryIO, Optional, Union from typing import Any, BinaryIO, Optional, Union
import regex as re
import torch import torch
from torch import nn from torch import nn
from torch.utils._python_dispatch import TorchDispatchMode from torch.utils._python_dispatch import TorchDispatchMode

View File

@ -250,7 +250,7 @@ class MiMoMTP(nn.Module):
return loaded_params return loaded_params
def map_model_name_to_mtp_param_name(self, name: str) -> str: def map_model_name_to_mtp_param_name(self, name: str) -> str:
import re import regex as re
name_without_prefix = [ name_without_prefix = [
"token_layernorm", "hidden_layernorm", "input_proj", "token_layernorm", "hidden_layernorm", "input_proj",
"final_layernorm" "final_layernorm"

View File

@ -2,10 +2,10 @@
"""Inference-only MiniMaxText01 model.""" """Inference-only MiniMaxText01 model."""
import copy import copy
import math import math
import re
from collections.abc import Iterable from collections.abc import Iterable
from typing import Optional, Union from typing import Optional, Union
import regex as re
import torch import torch
import torch.distributed import torch.distributed
import torch.nn.functional as F import torch.nn.functional as F

View File

@ -14,10 +14,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import re
from collections.abc import Iterable, Mapping, Sequence from collections.abc import Iterable, Mapping, Sequence
from typing import Any, Literal, Optional, TypedDict, Union from typing import Any, Literal, Optional, TypedDict, Union
import regex as re
import torch import torch
import torch.nn as nn import torch.nn as nn
from transformers import (BatchFeature, CLIPVisionConfig, PretrainedConfig, from transformers import (BatchFeature, CLIPVisionConfig, PretrainedConfig,

View File

@ -7,12 +7,12 @@
import copy import copy
import math import math
import re
import unicodedata import unicodedata
from collections.abc import Collection, Mapping, Sequence, Set from collections.abc import Collection, Mapping, Sequence, Set
from functools import lru_cache, partial from functools import lru_cache, partial
from typing import Callable, Literal, Optional, TypedDict, Union from typing import Callable, Literal, Optional, TypedDict, Union
import regex as re
import torch import torch
from torch import nn from torch import nn
from torchvision import transforms from torchvision import transforms

View File

@ -14,11 +14,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Wrapper around `transformers` models""" """Wrapper around `transformers` models"""
import re
from collections.abc import Iterable from collections.abc import Iterable
from contextlib import nullcontext from contextlib import nullcontext
from typing import Literal, Optional, Union from typing import Literal, Optional, Union
import regex as re
import torch import torch
from torch import nn from torch import nn
from transformers import AutoModel, PretrainedConfig, PreTrainedModel from transformers import AutoModel, PretrainedConfig, PreTrainedModel

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json import json
import re
import sys import sys
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections import defaultdict from collections import defaultdict
@ -12,6 +11,7 @@ from functools import lru_cache
from typing import (TYPE_CHECKING, Generic, NamedTuple, Optional, Protocol, from typing import (TYPE_CHECKING, Generic, NamedTuple, Optional, Protocol,
TypeVar, Union, cast) TypeVar, Union, cast)
import regex as re
import torch import torch
from typing_extensions import assert_never from typing_extensions import assert_never

View File

@ -1,9 +1,9 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Optional, Union from typing import Optional, Union
import regex as re
from transformers import PreTrainedTokenizerBase from transformers import PreTrainedTokenizerBase
from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,

View File

@ -1,12 +1,12 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import os import os
import re
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, Union, cast from typing import TYPE_CHECKING, Any, Optional, Union, cast
import huggingface_hub import huggingface_hub
import regex as re
from huggingface_hub import HfApi, hf_hub_download from huggingface_hub import HfApi, hf_hub_download
from vllm.logger import init_logger from vllm.logger import init_logger

View File

@ -19,7 +19,6 @@ import json
import multiprocessing import multiprocessing
import os import os
import pickle import pickle
import re
import signal import signal
import socket import socket
import subprocess import subprocess
@ -54,6 +53,7 @@ import cloudpickle
import numpy as np import numpy as np
import numpy.typing as npt import numpy.typing as npt
import psutil import psutil
import regex as re
import torch import torch
import torch.types import torch.types
import yaml import yaml

View File

@ -2,7 +2,7 @@
from __future__ import annotations from __future__ import annotations
import re import regex as re
def grammar_is_likely_lark(grammar_str: str) -> bool: def grammar_is_likely_lark(grammar_str: str) -> bool: