mirror of
https://git.datalinker.icu/ltdrdata/ComfyUI-Manager
synced 2025-12-09 22:24:23 +08:00
update DB
This commit is contained in:
parent
819421ccbf
commit
8dd801435b
@ -673,8 +673,8 @@
|
||||
{
|
||||
"name": "CLIPVision model (stabilityai/clip_vision_g)",
|
||||
"type": "clip_vision",
|
||||
"base": "SDXL",
|
||||
"save_path": "clip_vision/SDXL",
|
||||
"base": "vit-g",
|
||||
"save_path": "clip_vision",
|
||||
"description": "[3.69GB] clip_g vision model",
|
||||
"reference": "https://huggingface.co/stabilityai/control-lora",
|
||||
"filename": "clip_vision_g.safetensors",
|
||||
@ -683,38 +683,18 @@
|
||||
{
|
||||
"name": "CLIPVision model (openai/clip-vit-large)",
|
||||
"type": "clip_vision",
|
||||
"base": "SD1.5",
|
||||
"save_path": "clip_vision/SD1.5",
|
||||
"base": "ViT-L",
|
||||
"save_path": "clip_vision",
|
||||
"description": "[1.7GB] CLIPVision model (needed for styles model)",
|
||||
"reference": "https://huggingface.co/openai/clip-vit-large-patch14",
|
||||
"filename": "pytorch_model.bin",
|
||||
"url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin"
|
||||
},
|
||||
{
|
||||
"name": "CLIPVision model (IP-Adapter) 1.5",
|
||||
"type": "clip_vision",
|
||||
"base": "SD1.5",
|
||||
"save_path": "clip_vision/SD1.5",
|
||||
"description": "[2.5GB] CLIPVision model (needed for IP-Adapter)",
|
||||
"reference": "https://huggingface.co/h94/IP-Adapter",
|
||||
"filename": "pytorch_model.bin",
|
||||
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin"
|
||||
},
|
||||
{
|
||||
"name": "CLIPVision model (IP-Adapter) XL",
|
||||
"type": "clip_vision",
|
||||
"base": "SDXL",
|
||||
"save_path": "clip_vision/SDXL",
|
||||
"description": "[3.69GB] CLIPVision model (needed for IP-Adapter)",
|
||||
"reference": "https://huggingface.co/h94/IP-Adapter",
|
||||
"filename": "pytorch_model.bin",
|
||||
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin"
|
||||
"filename": "clip-vit-large-patch14.bin",
|
||||
"url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/model.safetensors"
|
||||
},
|
||||
{
|
||||
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K",
|
||||
"type": "clip_vision",
|
||||
"base": "SD1.5",
|
||||
"save_path": "clip_vision/SD1.5",
|
||||
"base": "ViT-H",
|
||||
"save_path": "clip_vision",
|
||||
"description": "[2.5GB] CLIPVision model (needed for IP-Adapter)",
|
||||
"reference": "https://huggingface.co/h94/IP-Adapter",
|
||||
"filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors",
|
||||
@ -723,8 +703,8 @@
|
||||
{
|
||||
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k",
|
||||
"type": "clip_vision",
|
||||
"base": "SDXL",
|
||||
"save_path": "clip_vision/SDXL",
|
||||
"base": "ViT-G",
|
||||
"save_path": "clip_vision",
|
||||
"description": "[3.69GB] CLIPVision model (needed for IP-Adapter)",
|
||||
"reference": "https://huggingface.co/h94/IP-Adapter",
|
||||
"filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors",
|
||||
|
||||
@ -680,8 +680,8 @@
|
||||
{
|
||||
"name": "CLIPVision model (stabilityai/clip_vision_g)",
|
||||
"type": "clip_vision",
|
||||
"base": "SDXL",
|
||||
"save_path": "clip_vision/SDXL",
|
||||
"base": "vit-g",
|
||||
"save_path": "clip_vision",
|
||||
"description": "[3.69GB] clip_g vision model",
|
||||
"reference": "https://huggingface.co/stabilityai/control-lora",
|
||||
"filename": "clip_vision_g.safetensors",
|
||||
@ -689,24 +689,24 @@
|
||||
},
|
||||
|
||||
{
|
||||
"name": "CLIPVision model (IP-Adapter) 1.5",
|
||||
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K",
|
||||
"type": "clip_vision",
|
||||
"base": "SD1.5",
|
||||
"save_path": "clip_vision/SD1.5",
|
||||
"base": "ViT-H",
|
||||
"save_path": "clip_vision",
|
||||
"description": "[2.5GB] CLIPVision model (needed for IP-Adapter)",
|
||||
"reference": "https://huggingface.co/h94/IP-Adapter",
|
||||
"filename": "pytorch_model.bin",
|
||||
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin"
|
||||
"filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors",
|
||||
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors"
|
||||
},
|
||||
{
|
||||
"name": "CLIPVision model (IP-Adapter) XL",
|
||||
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k",
|
||||
"type": "clip_vision",
|
||||
"base": "SDXL",
|
||||
"save_path": "clip_vision/SDXL",
|
||||
"base": "ViT-G",
|
||||
"save_path": "clip_vision",
|
||||
"description": "[3.69GB] CLIPVision model (needed for IP-Adapter)",
|
||||
"reference": "https://huggingface.co/h94/IP-Adapter",
|
||||
"filename": "pytorch_model.bin",
|
||||
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin"
|
||||
"filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors",
|
||||
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user