mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-15 20:25:51 +08:00
[v1] fix use compile sizes (#11000)
Signed-off-by: youkaichao <youkaichao@gmail.com>
This commit is contained in:
parent
cbcbdb1ceb
commit
1a2f8fb828
@ -2522,6 +2522,7 @@ class VllmConfig:
|
|||||||
self.compilation_config.custom_ops = ["none"]
|
self.compilation_config.custom_ops = ["none"]
|
||||||
self.compilation_config.use_cudagraph = True
|
self.compilation_config.use_cudagraph = True
|
||||||
self.compilation_config.use_inductor = True
|
self.compilation_config.use_inductor = True
|
||||||
|
self.compilation_config.cudagraph_num_of_warmups = 1
|
||||||
self.compilation_config.pass_config.enable_fusion = False
|
self.compilation_config.pass_config.enable_fusion = False
|
||||||
self.compilation_config.pass_config.enable_reshape = False
|
self.compilation_config.pass_config.enable_reshape = False
|
||||||
self.compilation_config.level = CompilationLevel.PIECEWISE
|
self.compilation_config.level = CompilationLevel.PIECEWISE
|
||||||
|
|||||||
@ -582,6 +582,9 @@ class GPUModelRunner:
|
|||||||
# can reuse the memory pool allocated for the large shapes.
|
# can reuse the memory pool allocated for the large shapes.
|
||||||
with graph_capture():
|
with graph_capture():
|
||||||
for num_tokens in reversed(self.cudagraph_batch_sizes):
|
for num_tokens in reversed(self.cudagraph_batch_sizes):
|
||||||
|
for _ in range(self.vllm_config.compilation_config.
|
||||||
|
cudagraph_num_of_warmups):
|
||||||
|
self._dummy_run(self.model, num_tokens, self.kv_caches)
|
||||||
self._dummy_run(self.model, num_tokens, self.kv_caches)
|
self._dummy_run(self.model, num_tokens, self.kv_caches)
|
||||||
|
|
||||||
end_time = time.perf_counter()
|
end_time = time.perf_counter()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user