move files

This commit is contained in:
lllyasviel 2024-01-31 10:28:27 -08:00
parent 248231d82b
commit 56b32e53f3
2 changed files with 27 additions and 27 deletions

View File

@ -1,27 +0,0 @@
from modules_forge.supported_preprocessor import PreprocessorClipVision
from modules_forge.shared import add_supported_preprocessor
from modules_forge.forge_util import numpy_to_pytorch
class PreprocessorClipVisionForIPAdapter(PreprocessorClipVision):
def __init__(self, name, url, filename):
super().__init__(name, url, filename)
self.tags = ['IP-Adapter']
self.model_filename_filters = ['IP-Adapter', 'IP_Adapter']
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
clipvision = self.load_clipvision()
return clipvision, numpy_to_pytorch(input_image)
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
name='CLIP-ViT-H (IPAdapter)',
url='https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors',
filename='CLIP-ViT-H-14.safetensors'
))
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
name='CLIP-ViT-bigG (IPAdapter)',
url='https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors',
filename='CLIP-ViT-bigG.safetensors'
))

View File

@ -1,3 +1,6 @@
from modules_forge.supported_preprocessor import PreprocessorClipVision
from modules_forge.shared import add_supported_preprocessor
from modules_forge.forge_util import numpy_to_pytorch
from modules_forge.shared import add_supported_control_model
from modules_forge.supported_controlnet import ControlModelPatcher
from lib_ipadapter.IPAdapterPlus import IPAdapterApply
@ -6,6 +9,30 @@ from lib_ipadapter.IPAdapterPlus import IPAdapterApply
opIPAdapterApply = IPAdapterApply().apply_ipadapter
class PreprocessorClipVisionForIPAdapter(PreprocessorClipVision):
def __init__(self, name, url, filename):
super().__init__(name, url, filename)
self.tags = ['IP-Adapter']
self.model_filename_filters = ['IP-Adapter', 'IP_Adapter']
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
clipvision = self.load_clipvision()
return clipvision, numpy_to_pytorch(input_image)
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
name='CLIP-ViT-H (IPAdapter)',
url='https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors',
filename='CLIP-ViT-H-14.safetensors'
))
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
name='CLIP-ViT-bigG (IPAdapter)',
url='https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors',
filename='CLIP-ViT-bigG.safetensors'
))
class IPAdapterPatcher(ControlModelPatcher):
@staticmethod
def try_build_from_state_dict(state_dict, ckpt_path):