mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-12 01:47:05 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
252 lines
9.2 KiB
Python
252 lines
9.2 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
"""
|
|
Demonstrates how to co-locate a vLLM inference worker and training
|
|
actors on the same set of GPUs for reinforcement learning from human feedback
|
|
(RLHF) workloads.
|
|
|
|
Ray serves as the distributed execution framework in this example. Ray
|
|
placement groups allocate both training actors and vLLM workers to the
|
|
same GPU bundles, enabling fast, in-GPU communication between the two
|
|
components.
|
|
|
|
The script shows how to do the following:
|
|
|
|
* Configure environment variables (`VLLM_RAY_PER_WORKER_GPUS` and
|
|
`VLLM_RAY_BUNDLE_INDICES`) so that vLLM workers land on the desired
|
|
devices.
|
|
* Exchange tensors between processes by means of CUDA inter-process
|
|
communication (IPC). CUDA IPC sidesteps NCCL limitations that occur
|
|
when multiple processes share a single GPU.
|
|
|
|
Note that this example assumes a single-node cluster with four GPUs, but Ray
|
|
supports multi-node clusters. vLLM expects exclusive use of the GPUs during
|
|
its initialization for memory profiling. Residual GPU activity interferes
|
|
with vLLM memory profiling and causes unexpected behavior.
|
|
|
|
Learn more about Ray placement groups:
|
|
https://docs.ray.io/en/latest/placement-groups.html
|
|
"""
|
|
|
|
import gc
|
|
import os
|
|
|
|
import ray
|
|
import torch
|
|
import zmq
|
|
from ray.util.placement_group import placement_group
|
|
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
|
|
from torch.multiprocessing.reductions import reduce_tensor
|
|
|
|
from vllm import LLM
|
|
|
|
|
|
class MyLLM(LLM):
|
|
"""Configure the vLLM worker for Ray placement group execution.
|
|
|
|
The constructor sets environment variables that allow multiple vLLM
|
|
workers to share a single physical GPU and that encode the bundle
|
|
indices assigned by the placement group.
|
|
|
|
Args:
|
|
*args: Positional arguments forwarded to `vllm.LLM`.
|
|
bundle_indices (list[int]): Placement-group bundle indices
|
|
assigned to this worker.
|
|
**kwargs: Keyword arguments forwarded to `vllm.LLM`.
|
|
"""
|
|
|
|
def __init__(self, *args, bundle_indices: list[int], **kwargs):
|
|
# Prevent Ray from manipulating the top-level CUDA_VISIBLE_DEVICES variable
|
|
# so that vLLM can its own device placement inside the worker.
|
|
os.environ.pop("CUDA_VISIBLE_DEVICES", None)
|
|
# Each worker uses 0.4 GPU so that two instances fit on the same GPUs.
|
|
os.environ["VLLM_RAY_PER_WORKER_GPUS"] = "0.4"
|
|
os.environ["VLLM_RAY_BUNDLE_INDICES"] = ",".join(map(str, bundle_indices))
|
|
print(f"creating LLM with bundle_indices={bundle_indices}")
|
|
super().__init__(*args, **kwargs)
|
|
|
|
|
|
class RayTrainingActor:
|
|
"""Training actor that hosts a Facebook OPT-125M model from Hugging Face.
|
|
|
|
The model is loaded onto the first GPU assigned to this actor, and expose
|
|
the CUDA IPC handles so that colocated vLLM workers can map tensors
|
|
directly.
|
|
"""
|
|
|
|
def __init__(self):
|
|
# Ray sets CUDA_VISIBLE_DEVICES to the GPUs assigned to this actor.
|
|
from transformers import AutoModelForCausalLM
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
|
|
self.model.to("cuda:0")
|
|
# Zero out all the parameters.
|
|
for name, p in self.model.named_parameters():
|
|
p.data.zero_()
|
|
torch.cuda.synchronize()
|
|
# The argument for `get_device_uuid` is the index of the GPU in the
|
|
# list of visible devices.
|
|
from vllm.platforms import current_platform
|
|
|
|
self.device_uuid = current_platform.get_device_uuid(0)
|
|
self.zmq_context = zmq.Context()
|
|
self.zmq_address_counter = 0
|
|
self.zmq_handle = None
|
|
|
|
def report_device_id(self) -> str:
|
|
return self.device_uuid
|
|
|
|
def get_zmq_handles(self) -> dict[str, str]:
|
|
suffix = f"{self.device_uuid}-{self.zmq_address_counter}"
|
|
self.zmq_handle = f"ipc:///tmp/rl-colocate-zmq-{suffix}.sock"
|
|
self.zmq_address_counter += 1
|
|
return {self.device_uuid: self.zmq_handle}
|
|
|
|
def update_weights(self):
|
|
# align size to avoid misaligned address
|
|
align_size = 256
|
|
|
|
def get_size(p: torch.Tensor) -> int:
|
|
return (p.nbytes + align_size - 1) // align_size * align_size
|
|
|
|
named_parameters: dict[str, torch.nn.Parameter] = dict(
|
|
self.model.named_parameters()
|
|
)
|
|
max_tensor_size = max(get_size(p) for p in named_parameters.values())
|
|
# use max_tensor_size * 2 as buffer size
|
|
buffer = torch.empty(max_tensor_size * 2, dtype=torch.uint8, device="cuda:0")
|
|
s = self.zmq_context.socket(zmq.REQ)
|
|
s.bind(self.zmq_handle)
|
|
handle = reduce_tensor(buffer)
|
|
|
|
offset = 0
|
|
buckets: list[tuple[list[dict], list[torch.Tensor]]] = []
|
|
named_tensors: list[dict] = []
|
|
real_tensors: list[torch.Tensor] = []
|
|
for name, p in named_parameters.items():
|
|
size = get_size(p)
|
|
if offset + size > buffer.numel():
|
|
buckets.append((named_tensors, real_tensors))
|
|
named_tensors, real_tensors = [], []
|
|
offset = 0
|
|
# assume tensors are contiguous
|
|
named_tensors.append(
|
|
{"name": name, "dtype": p.dtype, "shape": p.shape, "offset": offset}
|
|
)
|
|
real_tensors.append(p)
|
|
offset += size
|
|
if named_tensors:
|
|
buckets.append((named_tensors, real_tensors))
|
|
s.send_pyobj(handle)
|
|
s.recv()
|
|
for named_tensors, real_tensors in buckets:
|
|
offset = 0
|
|
for p in real_tensors:
|
|
buffer[offset : offset + p.nbytes].data.copy_(
|
|
p.data.view(-1).view(dtype=torch.uint8), non_blocking=True
|
|
)
|
|
offset += get_size(p)
|
|
torch.cuda.synchronize()
|
|
s.send_pyobj(named_tensors)
|
|
s.recv()
|
|
s.send_pyobj(None)
|
|
s.recv()
|
|
s.close()
|
|
del buffer
|
|
gc.collect()
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
# Ray manages four GPUs.
|
|
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
|
|
ray.init()
|
|
|
|
# Co-locate vLLM instances and training actors on the same set of GPUs:
|
|
# * GPU 0 and 1: training actor 0, training actor 1, and vLLM instance 0
|
|
# (tensor parallelism = 2).
|
|
# * GPU 2 and 3: training actor 2, training actor 3, and vLLM instance 1
|
|
# (tensor parallelism = 2).
|
|
|
|
pg = placement_group([{"GPU": 1, "CPU": 0}] * 4)
|
|
ray.get(pg.ready())
|
|
print(f"placement group has bundles {pg.bundle_specs=}")
|
|
|
|
training_actors = []
|
|
training_actor_device_ids = []
|
|
inference_engines = []
|
|
inference_engine_device_ids = []
|
|
|
|
for bundle_index in [0, 1, 2, 3]:
|
|
training_actor = ray.remote(
|
|
num_cpus=0,
|
|
num_gpus=0.4,
|
|
scheduling_strategy=PlacementGroupSchedulingStrategy(
|
|
placement_group=pg,
|
|
placement_group_capture_child_tasks=True,
|
|
placement_group_bundle_index=bundle_index,
|
|
),
|
|
)(RayTrainingActor).remote()
|
|
training_actors.append(training_actor)
|
|
|
|
for bundle_index, training_actor in enumerate(training_actors):
|
|
device_id = ray.get(training_actor.report_device_id.remote())
|
|
print(f"training actor {bundle_index} is on {device_id}")
|
|
training_actor_device_ids.append(device_id)
|
|
|
|
for i, bundle_indices in enumerate([[0, 1], [2, 3]]):
|
|
# Use the following syntax instead of the @ray.remote decorator so that
|
|
# the placement group is customized for each bundle.
|
|
llm = ray.remote(
|
|
num_cpus=0,
|
|
num_gpus=0,
|
|
scheduling_strategy=PlacementGroupSchedulingStrategy(
|
|
placement_group=pg,
|
|
placement_group_capture_child_tasks=True,
|
|
),
|
|
)(MyLLM).remote(
|
|
model="facebook/opt-125m",
|
|
enforce_eager=True,
|
|
worker_extension_cls="rlhf_utils.ColocateWorkerExtension",
|
|
tensor_parallel_size=2,
|
|
distributed_executor_backend="ray",
|
|
gpu_memory_utilization=0.4,
|
|
bundle_indices=bundle_indices,
|
|
)
|
|
inference_engines.append(llm)
|
|
# Do not call any method on the inference engine at this point; the call
|
|
# blocks until the vLLM instance finishes initialization.
|
|
|
|
for i, llm in enumerate(inference_engines):
|
|
inference_engine_device_ids.append(
|
|
ray.get(llm.collective_rpc.remote("report_device_id", args=tuple()))
|
|
)
|
|
print(f"inference engine {i} is on {inference_engine_device_ids[-1]}")
|
|
|
|
# Verify placement: the first two training actors share the same GPUs as
|
|
# the first inference engine.
|
|
assert training_actor_device_ids[:2] == inference_engine_device_ids[0]
|
|
# Verify placement: the last two training actors share the same GPUs as
|
|
# the second inference engine.
|
|
assert training_actor_device_ids[2:] == inference_engine_device_ids[1]
|
|
|
|
print("Gather all the ZMQ handles from the training actors.")
|
|
zmq_handles = {}
|
|
for actor in training_actors:
|
|
zmq_handles.update(ray.get(actor.get_zmq_handles.remote()))
|
|
|
|
print(f"ZMQ handles: {zmq_handles}")
|
|
|
|
print("Update the weights of the inference engines.")
|
|
ray.get(
|
|
[actor.update_weights.remote() for actor in training_actors]
|
|
+ [
|
|
llm.collective_rpc.remote("update_weights_from_ipc", args=(zmq_handles,))
|
|
for llm in inference_engines
|
|
]
|
|
)
|
|
|
|
print("Check if the weights are updated.")
|
|
for llm in inference_engines:
|
|
assert ray.get(llm.collective_rpc.remote("check_weights_changed", args=tuple()))
|