mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-08 01:17:04 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
419 lines
13 KiB
Python
419 lines
13 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
import json
|
|
import os
|
|
from dataclasses import dataclass
|
|
from typing import Optional, Union
|
|
|
|
import torch
|
|
from safetensors.torch import save_file
|
|
|
|
from vllm.lora.lora_weights import LoRALayerWeights, PackedLoRALayerWeights
|
|
|
|
|
|
class DummyLoRAManager:
|
|
|
|
def __init__(self, device: torch.device = "cuda:0"):
|
|
super().__init__()
|
|
self._loras: dict[str, LoRALayerWeights] = {}
|
|
self._device = device
|
|
|
|
def set_module_lora(self, module_name: str, lora: LoRALayerWeights):
|
|
self._loras[module_name] = lora
|
|
|
|
def get_module_lora(self, module_name: str) -> LoRALayerWeights:
|
|
return self._loras[module_name]
|
|
|
|
def init_random_lora(
|
|
self,
|
|
module_name: str,
|
|
weight: torch.Tensor,
|
|
rank: int = 8,
|
|
generate_embeddings_tensor: int = 0,
|
|
):
|
|
lora = LoRALayerWeights(
|
|
module_name,
|
|
rank=rank,
|
|
lora_alpha=1,
|
|
lora_a=torch.rand([rank, weight.shape[1]],
|
|
dtype=weight.dtype,
|
|
device=self._device),
|
|
lora_b=torch.rand([weight.shape[0], rank],
|
|
dtype=weight.dtype,
|
|
device=self._device),
|
|
)
|
|
if generate_embeddings_tensor:
|
|
lora.embeddings_tensor = torch.rand(
|
|
5,
|
|
generate_embeddings_tensor,
|
|
dtype=weight.dtype,
|
|
device=self._device,
|
|
)
|
|
self.set_module_lora(module_name, lora)
|
|
|
|
return lora
|
|
|
|
def init_lora(
|
|
self,
|
|
module_name: str,
|
|
input_dim: int,
|
|
output_dim: int,
|
|
rank=8,
|
|
noop=False,
|
|
embeddings_tensor=None,
|
|
):
|
|
lora = LoRALayerWeights(
|
|
module_name,
|
|
rank=rank,
|
|
lora_alpha=1,
|
|
lora_a=torch.rand([rank, input_dim], device="cuda"),
|
|
lora_b=torch.rand([output_dim, input_dim], device="cuda"),
|
|
embeddings_tensor=embeddings_tensor,
|
|
)
|
|
self.set_module_lora(module_name, lora)
|
|
return lora
|
|
|
|
def reset_lora(self):
|
|
self._loras = {}
|
|
|
|
def init_packed_lora(
|
|
self,
|
|
module_name: str,
|
|
input_dim: int,
|
|
output_dims: list[int],
|
|
noop_lora_index: Optional[list[int]] = None,
|
|
rank: int = 8,
|
|
):
|
|
base_loras: list[LoRALayerWeights] = []
|
|
noop_lora_index_set = set(noop_lora_index or [])
|
|
|
|
for i, out_dim in enumerate(output_dims):
|
|
base_lora = self.init_lora(
|
|
module_name + "_000_" + str(i),
|
|
input_dim,
|
|
out_dim,
|
|
rank=rank,
|
|
noop=i in noop_lora_index_set,
|
|
)
|
|
base_loras.append(base_lora)
|
|
packed_lora = PackedLoRALayerWeights.pack(base_loras)
|
|
self.set_module_lora(module_name, packed_lora)
|
|
return packed_lora
|
|
|
|
|
|
def assert_close(a, b):
|
|
rtol, atol = {
|
|
torch.float16: (6e-2, 6e-2),
|
|
torch.bfloat16: (6e-2, 6e-2),
|
|
torch.float32: (1e-2, 1e-2),
|
|
}[a.dtype]
|
|
torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
|
|
|
|
|
|
@dataclass
|
|
class PunicaTensors:
|
|
inputs_tensor: torch.Tensor
|
|
lora_weights: Union[torch.Tensor, list[torch.Tensor]]
|
|
our_out_tensor: torch.Tensor
|
|
ref_out_tensor: torch.Tensor
|
|
b_seq_start_loc: torch.Tensor
|
|
prompt_lora_mapping: torch.Tensor
|
|
seq_len_tensor: torch.Tensor
|
|
token_lora_mapping: torch.Tensor
|
|
|
|
def meta(self) -> tuple[int, int]:
|
|
"""
|
|
Infer max_seq_length and token_nums from the tensors
|
|
and return them.
|
|
"""
|
|
max_seq_length = self.seq_len_tensor.max()
|
|
token_nums = self.seq_len_tensor.sum().item()
|
|
if isinstance(max_seq_length, tuple):
|
|
max_seq_length = max_seq_length[0].item()
|
|
else:
|
|
max_seq_length = max_seq_length.item()
|
|
return max_seq_length, token_nums
|
|
|
|
|
|
def generate_data(
|
|
batches,
|
|
hidden_size,
|
|
lora_nums,
|
|
max_rank,
|
|
seq_length,
|
|
dtype,
|
|
op_type,
|
|
device,
|
|
) -> PunicaTensors:
|
|
seq_len_tensor = torch.randint(seq_length, seq_length + 1,
|
|
(batches, )).to(device)
|
|
b_seq_start_loc = torch.cumsum(
|
|
torch.tensor([0] + seq_len_tensor[:-1].tolist(), dtype=torch.long),
|
|
dim=0,
|
|
).to(device)
|
|
total_tokens = seq_len_tensor.sum()
|
|
if op_type == "shrink":
|
|
inputs_tensor = torch.rand((total_tokens, hidden_size),
|
|
dtype=dtype).to(device)
|
|
lora_weights = torch.rand(
|
|
(lora_nums, max_rank, hidden_size), # col-major
|
|
dtype=dtype,
|
|
).to(device)
|
|
# shrink op need atomic_add, so output is initinized by 0
|
|
ref_out_tensor = torch.zeros((total_tokens, max_rank),
|
|
dtype=dtype,
|
|
device=inputs_tensor.device)
|
|
# NOTE shrink kernel using torch.float32 as output type
|
|
our_out_tensor = torch.zeros((total_tokens, max_rank),
|
|
dtype=torch.float32).to(device)
|
|
else:
|
|
inputs_tensor = torch.rand(
|
|
(total_tokens, max_rank),
|
|
dtype=dtype,
|
|
).to(device)
|
|
lora_weights = torch.rand(
|
|
(lora_nums, hidden_size, max_rank), # col-major
|
|
dtype=dtype,
|
|
).to(device)
|
|
# expand op needs to complete y+=a@lora_b, so output is
|
|
# initinized randomly
|
|
ref_out_tensor = torch.rand(
|
|
(total_tokens, hidden_size),
|
|
dtype=dtype,
|
|
).to(device)
|
|
# Ensure the same input.
|
|
our_out_tensor = ref_out_tensor.clone()
|
|
lora_indices_tensor = torch.randint(0,
|
|
lora_nums - 1 if lora_nums > 1 else 1,
|
|
(batches, )).to(device)
|
|
indices = torch.zeros((total_tokens), dtype=torch.long).to(device)
|
|
current_offset = 0
|
|
for b_id in range(batches):
|
|
lora_index = lora_indices_tensor[b_id]
|
|
indices[current_offset:current_offset +
|
|
seq_len_tensor[b_id]].copy_(lora_index)
|
|
current_offset += seq_len_tensor[b_id].item()
|
|
|
|
return PunicaTensors(
|
|
inputs_tensor,
|
|
lora_weights,
|
|
our_out_tensor,
|
|
ref_out_tensor,
|
|
b_seq_start_loc,
|
|
lora_indices_tensor,
|
|
seq_len_tensor,
|
|
indices,
|
|
)
|
|
|
|
|
|
def generate_data_for_expand_nslices(
|
|
batches,
|
|
hidden_size,
|
|
lora_nums,
|
|
max_rank,
|
|
seq_length,
|
|
dtype,
|
|
nslices,
|
|
device,
|
|
) -> PunicaTensors:
|
|
seq_len_tensor = torch.randint(seq_length, seq_length + 1,
|
|
(batches, )).to(device)
|
|
b_seq_start_loc = torch.cumsum(
|
|
torch.tensor([0] + seq_len_tensor[:-1].tolist(), dtype=torch.long),
|
|
dim=0,
|
|
).to(device)
|
|
total_tokens = seq_len_tensor.sum()
|
|
inputs_tensor = torch.rand(
|
|
(total_tokens, max_rank),
|
|
dtype=dtype,
|
|
).to(device)
|
|
lora_weights_lst = []
|
|
for _ in range(nslices):
|
|
lora_weights_lst.append(
|
|
torch.rand(
|
|
(lora_nums, hidden_size, max_rank), # col-major
|
|
dtype=dtype,
|
|
).to(device))
|
|
# expand op needs to complete y+=a@lora_b, so output is
|
|
# initinized randomly
|
|
ref_out_tensor = torch.rand((total_tokens, hidden_size * nslices),
|
|
dtype=dtype).to(device)
|
|
# Ensure the same input.
|
|
our_out_tensor = ref_out_tensor.clone()
|
|
lora_indices_tensor = torch.randint(0,
|
|
lora_nums - 1 if lora_nums > 1 else 1,
|
|
(batches, ))
|
|
indices = torch.zeros((total_tokens), dtype=torch.long).to(device)
|
|
current_offset = 0
|
|
for b_id in range(batches):
|
|
lora_index = lora_indices_tensor[b_id]
|
|
indices[current_offset:current_offset +
|
|
seq_len_tensor[b_id]] = (lora_index.item())
|
|
current_offset += seq_len_tensor[b_id].item()
|
|
|
|
lora_indices_tensor = lora_indices_tensor.to(device)
|
|
return PunicaTensors(
|
|
inputs_tensor,
|
|
lora_weights_lst,
|
|
our_out_tensor,
|
|
ref_out_tensor,
|
|
b_seq_start_loc,
|
|
lora_indices_tensor,
|
|
seq_len_tensor,
|
|
indices,
|
|
)
|
|
|
|
|
|
def generate_data_for_nslices(
|
|
batches,
|
|
hidden_size,
|
|
lora_nums,
|
|
max_rank,
|
|
seq_length,
|
|
nslices,
|
|
dtype,
|
|
op_type,
|
|
device,
|
|
) -> PunicaTensors:
|
|
seq_len_tensor = torch.randint(seq_length, seq_length + 1,
|
|
(batches, )).to(device)
|
|
b_seq_start_loc = torch.cumsum(
|
|
torch.tensor([0] + seq_len_tensor[:-1].tolist(), dtype=torch.long),
|
|
dim=0,
|
|
).to(device)
|
|
total_tokens = seq_len_tensor.sum()
|
|
|
|
lora_weights_lst = []
|
|
if op_type == "shrink":
|
|
|
|
inputs_tensor = torch.rand((total_tokens, hidden_size),
|
|
dtype=dtype).to(device)
|
|
|
|
for _ in range(nslices):
|
|
if op_type == "shrink":
|
|
lora_weights_lst.append(
|
|
torch.rand(
|
|
(lora_nums, max_rank, hidden_size), # col-major
|
|
dtype=dtype,
|
|
).to(device))
|
|
# NOTE shrink kernel using torch.float32 as output type
|
|
# shrink op need atomic_add, so output is initinized by 0
|
|
our_out_tensor = torch.zeros(
|
|
(nslices, total_tokens, max_rank),
|
|
dtype=torch.float32,
|
|
).to(device)
|
|
else:
|
|
inputs_tensor = torch.rand(
|
|
(nslices, total_tokens, max_rank),
|
|
dtype=dtype,
|
|
).to(device)
|
|
for _ in range(nslices):
|
|
lora_weights_lst.append(
|
|
torch.rand(
|
|
(lora_nums, hidden_size, max_rank), # col-major
|
|
dtype=dtype,
|
|
).to(device))
|
|
# expand op needs to complete y+=a@lora_b, so output is
|
|
# initinized randomly
|
|
our_out_tensor = torch.rand((total_tokens, hidden_size * nslices),
|
|
dtype=dtype).to(device)
|
|
|
|
# Ensure the same input.
|
|
ref_out_tensor = our_out_tensor.clone()
|
|
lora_indices_tensor = torch.randint(0,
|
|
lora_nums - 1 if lora_nums > 1 else 1,
|
|
(batches, ))
|
|
indices = torch.zeros((total_tokens), dtype=torch.long).to(device)
|
|
current_offset = 0
|
|
for b_id in range(batches):
|
|
lora_index = lora_indices_tensor[b_id]
|
|
indices[current_offset:current_offset +
|
|
seq_len_tensor[b_id]] = (lora_index.item())
|
|
current_offset += seq_len_tensor[b_id].item()
|
|
|
|
lora_indices_tensor = lora_indices_tensor.to(device)
|
|
return PunicaTensors(
|
|
inputs_tensor,
|
|
lora_weights_lst,
|
|
our_out_tensor,
|
|
ref_out_tensor,
|
|
b_seq_start_loc,
|
|
lora_indices_tensor,
|
|
seq_len_tensor,
|
|
indices,
|
|
)
|
|
|
|
|
|
def create_peft_lora(
|
|
model: torch.nn.Module,
|
|
save_dir: str,
|
|
target_modules: list[str],
|
|
rank: int = 8,
|
|
alpha: int = 16,
|
|
dropout: float = 0.1,
|
|
lora_dtype: torch.dtype = torch.float16,
|
|
) -> dict[str, torch.Tensor]:
|
|
lora_weights = {}
|
|
adapter_config = {
|
|
"peft_type": "LORA",
|
|
"auto_mapping": None,
|
|
"base_model_name_or_path": "dummy_model",
|
|
"revision": None,
|
|
"task_type": "CAUSAL_LM",
|
|
"inference_mode": False,
|
|
"r": rank,
|
|
"lora_alpha": alpha,
|
|
"lora_dropout": dropout,
|
|
"fan_in_fan_out": False,
|
|
"bias": "none",
|
|
"modules_to_save": None,
|
|
"init_lora_weights": True,
|
|
"layers_to_transform": None,
|
|
"layers_pattern": None,
|
|
"target_modules": target_modules,
|
|
"exclude_modules": None,
|
|
"use_rslora": False,
|
|
"use_dora": False,
|
|
"loftq_config": None,
|
|
}
|
|
|
|
for module_name in target_modules:
|
|
|
|
module = model
|
|
for attr in module_name.split("."):
|
|
module = getattr(module, attr)
|
|
|
|
if hasattr(module, "input_size") and hasattr(module, "output_size"):
|
|
|
|
in_features = module.input_size
|
|
out_features = module.output_size
|
|
|
|
elif hasattr(module, "embedding_dim") and hasattr(
|
|
module, "num_embeddings"):
|
|
# ParallelLMHead
|
|
in_features = module.embedding_dim
|
|
out_features = module.num_embeddings
|
|
else:
|
|
raise ValueError(
|
|
f"Unable to determine dimensions for module {module_name}")
|
|
|
|
lora_A = torch.randn(rank, in_features, dtype=lora_dtype)
|
|
|
|
torch.nn.init.kaiming_uniform_(lora_A, a=5**0.5)
|
|
|
|
lora_B = torch.zeros(out_features, rank, dtype=lora_dtype)
|
|
|
|
# PEFT style
|
|
lora_weights[f"base_model.model.{module_name}.lora_A.weight"] = lora_A
|
|
lora_weights[f"base_model.model.{module_name}.lora_B.weight"] = lora_B
|
|
|
|
config_path = os.path.join(save_dir, "adapter_config.json")
|
|
with open(config_path, "w", encoding="utf-8") as f:
|
|
json.dump(adapter_config, f, indent=2, ensure_ascii=False)
|
|
|
|
weights_path = os.path.join(save_dir, "adapter_model.safetensors")
|
|
save_file(lora_weights, weights_path)
|
|
|
|
return lora_weights
|