[Refactor] Change the way of import triton (#20774)

Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
Wentao Ye 2025-07-12 22:39:55 -04:00 committed by GitHub
parent 3b3b778d4a
commit c1acd6d7d4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 5 additions and 9 deletions

View File

@ -6,7 +6,6 @@ from typing import Optional
import pytest
import torch
import triton.language as tl
from tests.kernels.moe.utils import (batched_moe,
make_quantized_test_activations,
@ -18,6 +17,7 @@ from vllm.model_executor.layers.fused_moe.fused_batched_moe import (
invoke_moe_batched_triton_kernel)
from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk
from vllm.platforms import current_platform
from vllm.triton_utils import tl
MNK_FACTORS = [
(1, 128, 128),

View File

@ -8,10 +8,9 @@
# - Thomas Parnell <tpa@zurich.ibm.com>
import torch
import triton
import triton.language as tl
from vllm.logger import init_logger
from vllm.triton_utils import tl, triton
logger = init_logger(__name__)

View File

@ -8,12 +8,11 @@ https://arxiv.org/abs/2310.18547
"""
import torch
import triton
import triton.language as tl
from vllm.lora.ops.triton_ops.kernel_utils import do_expand_kernel
from vllm.lora.ops.triton_ops.utils import _get_lora_b_ptr
from vllm.platforms import current_platform
from vllm.triton_utils import tl, triton
from vllm.utils import direct_register_custom_op

View File

@ -8,12 +8,11 @@ https://arxiv.org/abs/2310.18547
"""
import torch
import triton
import triton.language as tl
from vllm.lora.ops.triton_ops.kernel_utils import do_shrink_kernel
from vllm.lora.ops.triton_ops.utils import _get_lora_a_ptr
from vllm.platforms import current_platform
from vllm.triton_utils import tl, triton
from vllm.utils import direct_register_custom_op

View File

@ -4,8 +4,6 @@
from typing import Optional
import torch
import triton
import triton.language as tl
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig
@ -18,6 +16,7 @@ from vllm.model_executor.layers.fused_moe.utils import (
normalize_scales_shape)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
group_broadcast)
from vllm.triton_utils import tl, triton
@triton.jit