Remove unused imports (#26972)

Signed-off-by: Lukas Geiger <lukas.geiger94@gmail.com>
This commit is contained in:
Lukas Geiger 2025-10-17 03:51:17 +01:00 committed by GitHub
parent 17c540a993
commit 4d055ef465
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 0 additions and 13 deletions

View File

@ -10,7 +10,6 @@ if TYPE_CHECKING:
from typing import TYPE_CHECKING
import torch
import torch.distributed
import torch.nn.functional as F
from einops import rearrange
from torch import nn
@ -41,9 +40,6 @@ from vllm.v1.attention.backends.linear_attn import LinearAttentionMetadata
if TYPE_CHECKING:
from vllm.attention.backends.abstract import AttentionBackend
import torch
import torch.distributed
class MiniMaxText01RMSNormTP(CustomOp):
name = "MiniMaxText01RMSNormTP"

View File

@ -848,7 +848,6 @@ class CompressedTensorsW8A8Fp8MoEMethod(CompressedTensorsMoEMethod):
# Property to determine if AITER is used
if self.rocm_aiter_moe_enabled:
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( # noqa E501
rocm_aiter_fused_experts,
shuffle_weights,
)

View File

@ -58,7 +58,6 @@ from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsTransc
from .utils import (
AutoWeightsLoader,
WeightsMapper,
flatten_bn,
init_vllm_registered_model,
maybe_prefix,
)

View File

@ -11,7 +11,6 @@ if TYPE_CHECKING:
import regex as re
import torch
import torch.distributed
from torch import nn
from transformers import MiniMaxConfig

View File

@ -31,13 +31,11 @@ if TYPE_CHECKING:
from .inputs import (
BatchedTensorInputs,
MultiModalKwargsItem,
MultiModalKwargsItems,
MultiModalPlaceholderDict,
)
else:
BatchedTensorInputs = Any
MultiModalKwargsItem = Any
MultiModalKwargsItems = Any
MultiModalPlaceholderDict = Any
global_thread_pool = ThreadPoolExecutor(

View File

@ -21,11 +21,9 @@ from vllm.transformers_utils.utils import check_gguf_file
if TYPE_CHECKING:
from vllm.config import ModelConfig
from vllm.lora.request import LoRARequest
from vllm.transformers_utils.tokenizer_base import TokenizerBase
else:
ModelConfig = Any
LoRARequest = Any
TokenizerBase = Any
logger = init_logger(__name__)

View File

@ -900,7 +900,6 @@ def _cuda_device_count_stateless(cuda_visible_devices: str | None = None) -> int
# c1cd946818442aca8c7f812b16d187ce1586c3bc/
# torch/cuda/__init__.py#L831C1-L831C17
import torch.cuda
import torch.version
from vllm.platforms import current_platform

View File

@ -7,7 +7,6 @@ from collections.abc import Callable
from typing import Any, TypeVar
import torch
import torch.distributed
import torch.nn as nn
import vllm.envs as envs