diff --git a/model-list.json b/model-list.json index b1bb0448..be8c9576 100644 --- a/model-list.json +++ b/model-list.json @@ -673,8 +673,8 @@ { "name": "CLIPVision model (stabilityai/clip_vision_g)", "type": "clip_vision", - "base": "SDXL", - "save_path": "clip_vision/SDXL", + "base": "vit-g", + "save_path": "clip_vision", "description": "[3.69GB] clip_g vision model", "reference": "https://huggingface.co/stabilityai/control-lora", "filename": "clip_vision_g.safetensors", @@ -683,38 +683,18 @@ { "name": "CLIPVision model (openai/clip-vit-large)", "type": "clip_vision", - "base": "SD1.5", - "save_path": "clip_vision/SD1.5", + "base": "ViT-L", + "save_path": "clip_vision", "description": "[1.7GB] CLIPVision model (needed for styles model)", "reference": "https://huggingface.co/openai/clip-vit-large-patch14", - "filename": "pytorch_model.bin", - "url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin" - }, - { - "name": "CLIPVision model (IP-Adapter) 1.5", - "type": "clip_vision", - "base": "SD1.5", - "save_path": "clip_vision/SD1.5", - "description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", - "reference": "https://huggingface.co/h94/IP-Adapter", - "filename": "pytorch_model.bin", - "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin" - }, - { - "name": "CLIPVision model (IP-Adapter) XL", - "type": "clip_vision", - "base": "SDXL", - "save_path": "clip_vision/SDXL", - "description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", - "reference": "https://huggingface.co/h94/IP-Adapter", - "filename": "pytorch_model.bin", - "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin" + "filename": "clip-vit-large-patch14.bin", + "url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/model.safetensors" }, { "name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K", "type": "clip_vision", - "base": "SD1.5", - "save_path": "clip_vision/SD1.5", + "base": "ViT-H", + "save_path": "clip_vision", "description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", "reference": "https://huggingface.co/h94/IP-Adapter", "filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors", @@ -723,8 +703,8 @@ { "name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k", "type": "clip_vision", - "base": "SDXL", - "save_path": "clip_vision/SDXL", + "base": "ViT-G", + "save_path": "clip_vision", "description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", "reference": "https://huggingface.co/h94/IP-Adapter", "filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors", diff --git a/node_db/new/model-list.json b/node_db/new/model-list.json index 0780f781..c99a6ad1 100644 --- a/node_db/new/model-list.json +++ b/node_db/new/model-list.json @@ -680,8 +680,8 @@ { "name": "CLIPVision model (stabilityai/clip_vision_g)", "type": "clip_vision", - "base": "SDXL", - "save_path": "clip_vision/SDXL", + "base": "vit-g", + "save_path": "clip_vision", "description": "[3.69GB] clip_g vision model", "reference": "https://huggingface.co/stabilityai/control-lora", "filename": "clip_vision_g.safetensors", @@ -689,24 +689,24 @@ }, { - "name": "CLIPVision model (IP-Adapter) 1.5", + "name": "CLIPVision model (IP-Adapter) CLIP-ViT-H-14-laion2B-s32B-b79K", "type": "clip_vision", - "base": "SD1.5", - "save_path": "clip_vision/SD1.5", + "base": "ViT-H", + "save_path": "clip_vision", "description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", "reference": "https://huggingface.co/h94/IP-Adapter", - "filename": "pytorch_model.bin", - "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin" + "filename": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors" }, { - "name": "CLIPVision model (IP-Adapter) XL", + "name": "CLIPVision model (IP-Adapter) CLIP-ViT-bigG-14-laion2B-39B-b160k", "type": "clip_vision", - "base": "SDXL", - "save_path": "clip_vision/SDXL", + "base": "ViT-G", + "save_path": "clip_vision", "description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", "reference": "https://huggingface.co/h94/IP-Adapter", - "filename": "pytorch_model.bin", - "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin" + "filename": "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors" } ] }