update DB

This commit is contained in:
dr.lt.data 2024-01-25 17:04:11 +09:00
parent 819421ccbf
commit 8dd801435b
2 changed files with 22 additions and 42 deletions

View File

@ -673,8 +673,8 @@
{ {
"name": "CLIPVision model (stabilityai/clip_vision_g)", "name": "CLIPVision model (stabilityai/clip_vision_g)",
"type": "clip_vision", "type": "clip_vision",
"base": "SDXL", "base": "vit-g",
"save_path": "clip_vision/SDXL", "save_path": "clip_vision",
"description": "[3.69GB] clip_g vision model", "description": "[3.69GB] clip_g vision model",
"reference": "https://huggingface.co/stabilityai/control-lora", "reference": "https://huggingface.co/stabilityai/control-lora",
"filename": "clip_vision_g.safetensors", "filename": "clip_vision_g.safetensors",
@ -683,38 +683,18 @@
{ {
"name": "CLIPVision model (openai/clip-vit-large)", "name": "CLIPVision model (openai/clip-vit-large)",
"type": "clip_vision", "type": "clip_vision",
"base": "SD1.5", "base": "ViT-L",
"save_path": "clip_vision/SD1.5", "save_path": "clip_vision",
"description": "[1.7GB] CLIPVision model (needed for styles model)", "description": "[1.7GB] CLIPVision model (needed for styles model)",
"reference": "https://huggingface.co/openai/clip-vit-large-patch14", "reference": "https://huggingface.co/openai/clip-vit-large-patch14",
"filename": "pytorch_model.bin", "filename": "clip-vit-large-patch14.bin",
"url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin" "url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/model.safetensors"
},
{
"name": "CLIPVision model (IP-Adapter) 1.5",
"type": "clip_vision",
"base": "SD1.5",
"save_path": "clip_vision/SD1.5",
"description": "[2.5GB] CLIPVision model (needed for IP-Adapter)",
"reference": "https://huggingface.co/h94/IP-Adapter",
"filename": "pytorch_model.bin",
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin"
},
{
"name": "CLIPVision model (IP-Adapter) XL",
"type": "clip_vision",
"base": "SDXL",
"save_path": "clip_vision/SDXL",
"description": "[3.69GB] CLIPVision model (needed for IP-Adapter)",
"reference": "https://huggingface.co/h94/IP-Adapter",
"filename": "pytorch_model.bin",
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin"
}, },
{ {
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K", "name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K",
"type": "clip_vision", "type": "clip_vision",
"base": "SD1.5", "base": "ViT-H",
"save_path": "clip_vision/SD1.5", "save_path": "clip_vision",
"description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", "description": "[2.5GB] CLIPVision model (needed for IP-Adapter)",
"reference": "https://huggingface.co/h94/IP-Adapter", "reference": "https://huggingface.co/h94/IP-Adapter",
"filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors", "filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors",
@ -723,8 +703,8 @@
{ {
"name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k", "name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k",
"type": "clip_vision", "type": "clip_vision",
"base": "SDXL", "base": "ViT-G",
"save_path": "clip_vision/SDXL", "save_path": "clip_vision",
"description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", "description": "[3.69GB] CLIPVision model (needed for IP-Adapter)",
"reference": "https://huggingface.co/h94/IP-Adapter", "reference": "https://huggingface.co/h94/IP-Adapter",
"filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors", "filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors",

View File

@ -680,8 +680,8 @@
{ {
"name": "CLIPVision model (stabilityai/clip_vision_g)", "name": "CLIPVision model (stabilityai/clip_vision_g)",
"type": "clip_vision", "type": "clip_vision",
"base": "SDXL", "base": "vit-g",
"save_path": "clip_vision/SDXL", "save_path": "clip_vision",
"description": "[3.69GB] clip_g vision model", "description": "[3.69GB] clip_g vision model",
"reference": "https://huggingface.co/stabilityai/control-lora", "reference": "https://huggingface.co/stabilityai/control-lora",
"filename": "clip_vision_g.safetensors", "filename": "clip_vision_g.safetensors",
@ -689,24 +689,24 @@
}, },
{ {
"name": "CLIPVision model (IP-Adapter) 1.5", "name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K",
"type": "clip_vision", "type": "clip_vision",
"base": "SD1.5", "base": "ViT-H",
"save_path": "clip_vision/SD1.5", "save_path": "clip_vision",
"description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", "description": "[2.5GB] CLIPVision model (needed for IP-Adapter)",
"reference": "https://huggingface.co/h94/IP-Adapter", "reference": "https://huggingface.co/h94/IP-Adapter",
"filename": "pytorch_model.bin", "filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors",
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin" "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors"
}, },
{ {
"name": "CLIPVision model (IP-Adapter) XL", "name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k",
"type": "clip_vision", "type": "clip_vision",
"base": "SDXL", "base": "ViT-G",
"save_path": "clip_vision/SDXL", "save_path": "clip_vision",
"description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", "description": "[3.69GB] CLIPVision model (needed for IP-Adapter)",
"reference": "https://huggingface.co/h94/IP-Adapter", "reference": "https://huggingface.co/h94/IP-Adapter",
"filename": "pytorch_model.bin", "filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors",
"url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin" "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors"
} }
] ]
} }