mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-14 20:35:40 +08:00
[Misc] improve docs (#18734)
Signed-off-by: reidliu41 <reid201711@gmail.com> Co-authored-by: reidliu41 <reid201711@gmail.com>
This commit is contained in:
parent
753944fa9b
commit
fc6d0c290f
@ -15,6 +15,8 @@ prompts = [
|
|||||||
"What is annapurna labs?",
|
"What is annapurna labs?",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
# Create a sampling params object.
|
# Create a sampling params object.
|
||||||
sampling_params = SamplingParams(top_k=1, max_tokens=500, ignore_eos=True)
|
sampling_params = SamplingParams(top_k=1, max_tokens=500, ignore_eos=True)
|
||||||
|
|
||||||
@ -52,3 +54,7 @@ for output in outputs:
|
|||||||
prompt = output.prompt
|
prompt = output.prompt
|
||||||
generated_text = output.outputs[0].text
|
generated_text = output.outputs[0].text
|
||||||
print(f"Prompt: {prompt!r}, \n\n\n\ Generated text: {generated_text!r}")
|
print(f"Prompt: {prompt!r}, \n\n\n\ Generated text: {generated_text!r}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|||||||
@ -6,14 +6,19 @@ This folder provides several example scripts on how to inference Qwen2.5-Omni of
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Audio + image + video
|
# Audio + image + video
|
||||||
python examples/offline_inference/qwen2_5_omni/only_thinker.py -q mixed_modalities
|
python examples/offline_inference/qwen2_5_omni/only_thinker.py \
|
||||||
|
-q mixed_modalities
|
||||||
|
|
||||||
# Read vision and audio inputs from a single video file
|
# Read vision and audio inputs from a single video file
|
||||||
# NOTE: V1 engine does not support interleaved modalities yet.
|
# NOTE: V1 engine does not support interleaved modalities yet.
|
||||||
VLLM_USE_V1=0 python examples/offline_inference/qwen2_5_omni/only_thinker.py -q use_audio_in_video
|
VLLM_USE_V1=0 \
|
||||||
|
python examples/offline_inference/qwen2_5_omni/only_thinker.py \
|
||||||
|
-q use_audio_in_video
|
||||||
|
|
||||||
# Multiple audios
|
# Multiple audios
|
||||||
VLLM_USE_V1=0 python examples/offline_inference/qwen2_5_omni/only_thinker.py -q multi_audios
|
VLLM_USE_V1=0 \
|
||||||
|
python examples/offline_inference/qwen2_5_omni/only_thinker.py \
|
||||||
|
-q multi_audios
|
||||||
```
|
```
|
||||||
|
|
||||||
This script will run the thinker part of Qwen2.5-Omni, and generate text response.
|
This script will run the thinker part of Qwen2.5-Omni, and generate text response.
|
||||||
@ -22,11 +27,16 @@ You can also test Qwen2.5-Omni on a single modality:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Process audio inputs
|
# Process audio inputs
|
||||||
python examples/offline_inference/audio_language.py --model-type qwen2_5_omni
|
python examples/offline_inference/audio_language.py \
|
||||||
|
--model-type qwen2_5_omni
|
||||||
|
|
||||||
# Process image inputs
|
# Process image inputs
|
||||||
python examples/offline_inference/vision_language.py --modality image --model-type qwen2_5_omni
|
python examples/offline_inference/vision_language.py \
|
||||||
|
--modality image \
|
||||||
|
--model-type qwen2_5_omni
|
||||||
|
|
||||||
# Process video inputs
|
# Process video inputs
|
||||||
python examples/offline_inference/vision_language.py --modality video --model-type qwen2_5_omni
|
python examples/offline_inference/vision_language.py \
|
||||||
|
--modality video \
|
||||||
|
--model-type qwen2_5_omni
|
||||||
```
|
```
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user