mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-09 09:35:34 +08:00
[Benchmark][Bug] Fix multiple bugs in bench and add args to spec_decode offline (#20083)
This commit is contained in:
parent
2582683566
commit
9502c38138
@ -349,8 +349,9 @@ class RandomDataset(BenchmarkDataset):
|
||||
# [1650, 939, 486] -> ['Ġcall', 'sh', 'ere']
|
||||
# To avoid uncontrolled change of the prompt length,
|
||||
# the encoded sequence is truncated before being decode again.
|
||||
total_input_len = prefix_len + int(input_lens[i])
|
||||
re_encoded_sequence = tokenizer.encode(prompt, add_special_tokens=False)[
|
||||
: input_lens[i]
|
||||
:total_input_len
|
||||
]
|
||||
prompt = tokenizer.decode(re_encoded_sequence)
|
||||
total_input_len = len(re_encoded_sequence)
|
||||
|
||||
@ -39,6 +39,9 @@ def parse_args():
|
||||
parser.add_argument("--top-k", type=int, default=-1)
|
||||
parser.add_argument("--print-output", action="store_true")
|
||||
parser.add_argument("--output-len", type=int, default=256)
|
||||
parser.add_argument("--model-dir", type=str, default=None)
|
||||
parser.add_argument("--eagle-dir", type=str, default=None)
|
||||
parser.add_argument("--max-model-len", type=int, default=2048)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -46,9 +49,10 @@ def main():
|
||||
args = parse_args()
|
||||
args.endpoint_type = "openai-chat"
|
||||
|
||||
model_dir = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
model_dir = args.model_dir
|
||||
if args.model_dir is None:
|
||||
model_dir = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
||||
max_model_len = 2048
|
||||
|
||||
prompts = get_samples(args, tokenizer)
|
||||
# add_special_tokens is False to avoid adding bos twice when using chat templates
|
||||
@ -57,16 +61,18 @@ def main():
|
||||
]
|
||||
|
||||
if args.method == "eagle" or args.method == "eagle3":
|
||||
if args.method == "eagle":
|
||||
eagle_dir = args.eagle_dir
|
||||
if args.method == "eagle" and eagle_dir is None:
|
||||
eagle_dir = "yuhuili/EAGLE-LLaMA3.1-Instruct-8B"
|
||||
elif args.method == "eagle3":
|
||||
|
||||
elif args.method == "eagle3" and eagle_dir is None:
|
||||
eagle_dir = "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B"
|
||||
speculative_config = {
|
||||
"method": args.method,
|
||||
"model": eagle_dir,
|
||||
"num_speculative_tokens": args.num_spec_tokens,
|
||||
"draft_tensor_parallel_size": args.draft_tp,
|
||||
"max_model_len": max_model_len,
|
||||
"max_model_len": args.max_model_len,
|
||||
}
|
||||
elif args.method == "ngram":
|
||||
speculative_config = {
|
||||
@ -74,7 +80,7 @@ def main():
|
||||
"num_speculative_tokens": args.num_spec_tokens,
|
||||
"prompt_lookup_max": args.prompt_lookup_max,
|
||||
"prompt_lookup_min": args.prompt_lookup_min,
|
||||
"max_model_len": max_model_len,
|
||||
"max_model_len": args.max_model_len,
|
||||
}
|
||||
else:
|
||||
raise ValueError(f"unknown method: {args.method}")
|
||||
@ -86,7 +92,7 @@ def main():
|
||||
enable_chunked_prefill=args.enable_chunked_prefill,
|
||||
max_num_batched_tokens=args.max_num_batched_tokens,
|
||||
enforce_eager=args.enforce_eager,
|
||||
max_model_len=max_model_len,
|
||||
max_model_len=args.max_model_len,
|
||||
max_num_seqs=args.max_num_seqs,
|
||||
gpu_memory_utilization=0.8,
|
||||
speculative_config=speculative_config,
|
||||
|
||||
@ -320,6 +320,8 @@ class RandomDataset(BenchmarkDataset):
|
||||
**kwargs,
|
||||
) -> None:
|
||||
super().__init__(**kwargs)
|
||||
random.seed(self.random_seed)
|
||||
np.random.seed(self.random_seed)
|
||||
|
||||
def sample(
|
||||
self,
|
||||
@ -376,10 +378,11 @@ class RandomDataset(BenchmarkDataset):
|
||||
# [1650, 939, 486] -> ['Ġcall', 'sh', 'ere']
|
||||
# To avoid uncontrolled change of the prompt length,
|
||||
# the encoded sequence is truncated before being decode again.
|
||||
re_encoded_sequence = tokenizer.encode(
|
||||
prompt, add_special_tokens=False)[:input_lens[i]]
|
||||
prompt = tokenizer.decode(re_encoded_sequence)
|
||||
total_input_len = prefix_len + int(input_lens[i])
|
||||
re_encoded_sequence = tokenizer.encode(
|
||||
prompt, add_special_tokens=False)[:total_input_len]
|
||||
prompt = tokenizer.decode(re_encoded_sequence)
|
||||
total_input_len = len(re_encoded_sequence)
|
||||
requests.append(
|
||||
SampleRequest(
|
||||
prompt=prompt,
|
||||
@ -692,7 +695,8 @@ def get_samples(args, tokenizer) -> list[SampleRequest]:
|
||||
dataset_path=args.dataset_path).
|
||||
sample(tokenizer=tokenizer, num_requests=args.num_prompts),
|
||||
"random":
|
||||
lambda: RandomDataset(dataset_path=args.dataset_path).sample(
|
||||
lambda: RandomDataset(random_seed=args.seed,
|
||||
dataset_path=args.dataset_path).sample(
|
||||
tokenizer=tokenizer,
|
||||
num_requests=args.num_prompts,
|
||||
prefix_len=args.random_prefix_len,
|
||||
|
||||
@ -631,6 +631,12 @@ def add_cli_args(parser: argparse.ArgumentParser):
|
||||
help="The label (prefix) of the benchmark results. If not specified, "
|
||||
"the endpoint type will be used as the label.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--backend",
|
||||
type=str,
|
||||
default="vllm",
|
||||
choices=list(ASYNC_REQUEST_FUNCS.keys()),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base-url",
|
||||
type=str,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user