diff --git a/vllm/benchmarks/datasets.py b/vllm/benchmarks/datasets.py index 49ee0faf049d1..067e31f4303b6 100644 --- a/vllm/benchmarks/datasets.py +++ b/vllm/benchmarks/datasets.py @@ -1847,7 +1847,6 @@ def get_samples(args, tokenizer: TokenizerLike) -> list[SampleRequest]: random_seed=args.seed, dataset_path=args.dataset_path, disable_shuffle=args.disable_shuffle, - prefix_len=args.common_prefix_len, ).sample( tokenizer=tokenizer, num_requests=args.num_prompts, diff --git a/vllm/benchmarks/serve.py b/vllm/benchmarks/serve.py index 12756d1700c9f..f10f50834e4c9 100644 --- a/vllm/benchmarks/serve.py +++ b/vllm/benchmarks/serve.py @@ -1281,12 +1281,6 @@ def add_cli_args(parser: argparse.ArgumentParser): help="Repetition penalty sampling parameter. Only has effect on " "openai-compatible backends.", ) - sampling_group.add_argument( - "--common-prefix-len", - type=int, - default=None, - help="Common prefix length shared by all prompts (used by random dataset)", - ) parser.add_argument( "--served-model-name",