diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 472e8b061a9e1..14ea03444484d 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -82,7 +82,7 @@ from .vision import get_vit_attn_backend, run_dp_sharded_mrope_vision_model logger = init_logger(__name__) # For profile run -_MAX_FRAMES_PER_VIDEO = 600 +_MAX_FRAMES_PER_VIDEO = 32 # === Vision Inputs === # diff --git a/vllm/model_executor/models/qwen3_vl.py b/vllm/model_executor/models/qwen3_vl.py index 4ea93d3fdf417..ede477cde1a22 100644 --- a/vllm/model_executor/models/qwen3_vl.py +++ b/vllm/model_executor/models/qwen3_vl.py @@ -715,6 +715,18 @@ class Qwen3VLDummyInputsBuilder(BaseDummyInputsBuilder[Qwen3VLProcessingInfo]): video_items.append(video_item) return video_items + def get_dummy_processor_inputs(self, seq_len, mm_counts): + processor_inputs = super().get_dummy_processor_inputs( + seq_len, mm_counts) + # HACK(Isotr0py): We set do_resize to False here to reuse Qwen2-VL's + # profiling logic, which will be problematic for configurable mm + # profiling. + # TODO(Isotr0py): Switch to the implementation in + # https://github.com/vllm-project/vllm/pull/25557 + # after supporting configurable mm profiling. + processor_inputs.hf_processor_mm_kwargs = {"do_resize": False} + return processor_inputs + class Qwen3VLMultiModalProcessor(BaseMultiModalProcessor[Qwen3VLProcessingInfo] ):