mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 11:26:15 +08:00
[Misc][Gaudi] Avoid torch.compile and enable lazy collectives (#10897)
Signed-off-by: Konrad Zawora <kzawora@habana.ai>
This commit is contained in:
parent
998eeafe58
commit
a43065272f
@ -29,6 +29,20 @@ def load_general_plugins():
|
||||
if current_platform.is_xpu():
|
||||
# see https://github.com/pytorch/pytorch/blob/8cada5cbe5450e17c26fb8b358116785324537b2/torch/_dynamo/config.py#L158 # noqa
|
||||
os.environ['TORCH_COMPILE_DISABLE'] = 'True'
|
||||
if current_platform.is_hpu():
|
||||
# NOTE(kzawora): PT HPU lazy backend (PT_HPU_LAZY_MODE = 1)
|
||||
# does not support torch.compile
|
||||
# Eager backend (PT_HPU_LAZY_MODE = 0) must be selected for
|
||||
# torch.compile support
|
||||
is_lazy = os.environ.get('PT_HPU_LAZY_MODE', '1') == '1'
|
||||
if is_lazy:
|
||||
# see https://github.com/pytorch/pytorch/blob/43c5f59/torch/_dynamo/config.py#L158
|
||||
torch._dynamo.config.disable = True
|
||||
# NOTE(kzawora) multi-HPU inference with HPUGraphs (lazy-only)
|
||||
# requires enabling lazy collectives
|
||||
# see https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_HPU_Graphs.html # noqa: E501
|
||||
os.environ['PT_HPU_ENABLE_LAZY_COLLECTIVES'] = 'true'
|
||||
|
||||
global plugins_loaded
|
||||
if plugins_loaded:
|
||||
return
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user