2024-01-28 14:44:41 +00:00
|
|
|
import cv2
|
2024-01-28 17:08:33 +00:00
|
|
|
import os
|
2024-01-28 18:56:21 +00:00
|
|
|
import torch
|
2024-01-28 17:08:33 +00:00
|
|
|
|
|
|
|
from modules.paths import models_path
|
2024-01-28 18:56:21 +00:00
|
|
|
from ldm_patched.modules import model_management
|
|
|
|
from ldm_patched.modules.model_patcher import ModelPatcher
|
2024-01-28 23:31:49 +00:00
|
|
|
from modules_forge.forge_util import resize_image_with_pad
|
2024-01-28 17:08:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
controlnet_dir = os.path.join(models_path, 'ControlNet')
|
|
|
|
os.makedirs(controlnet_dir, exist_ok=True)
|
|
|
|
|
|
|
|
preprocessor_dir = os.path.join(models_path, 'ControlNetPreprocessor')
|
|
|
|
os.makedirs(preprocessor_dir, exist_ok=True)
|
2024-01-28 14:44:41 +00:00
|
|
|
|
|
|
|
shared_preprocessors = {}
|
|
|
|
|
|
|
|
|
2024-01-28 18:27:43 +00:00
|
|
|
def add_preprocessor(preprocessor):
|
|
|
|
global shared_preprocessors
|
|
|
|
p = preprocessor()
|
|
|
|
shared_preprocessors[p.name] = p
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2024-01-28 14:44:41 +00:00
|
|
|
class PreprocessorParameter:
|
2024-01-28 20:18:55 +00:00
|
|
|
def __init__(self, minimum=0.0, maximum=1.0, step=0.01, label='Parameter 1', value=0.5, visible=False, **kwargs):
|
2024-01-28 14:44:41 +00:00
|
|
|
self.gradio_update_kwargs = dict(
|
2024-01-28 20:18:55 +00:00
|
|
|
minimum=minimum, maximum=maximum, step=step, label=label, value=value, visible=visible, **kwargs
|
2024-01-28 14:44:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-01-28 23:31:49 +00:00
|
|
|
class Preprocessor:
|
2024-01-28 14:44:41 +00:00
|
|
|
def __init__(self):
|
|
|
|
self.name = 'PreprocessorBase'
|
2024-01-28 20:18:55 +00:00
|
|
|
self.tag = None
|
2024-01-28 23:31:49 +00:00
|
|
|
self.slider_resolution = PreprocessorParameter(label='Resolution', minimum=128, maximum=2048, value=512, step=8, visible=True)
|
2024-01-28 14:44:41 +00:00
|
|
|
self.slider_1 = PreprocessorParameter()
|
|
|
|
self.slider_2 = PreprocessorParameter()
|
|
|
|
self.slider_3 = PreprocessorParameter()
|
2024-01-28 23:31:49 +00:00
|
|
|
self.model_patcher: ModelPatcher = None
|
|
|
|
self.show_control_mode = True
|
|
|
|
self.do_not_need_model = False
|
2024-01-29 00:01:48 +00:00
|
|
|
self.sorting_priority = 0.0 # higher goes to top in the list
|
2024-01-28 14:44:41 +00:00
|
|
|
|
2024-01-28 23:31:49 +00:00
|
|
|
def setup_model_patcher(self, model, load_device=None, offload_device=None, dtype=torch.float32, **kwargs):
|
2024-01-28 18:56:21 +00:00
|
|
|
if load_device is None:
|
|
|
|
load_device = model_management.get_torch_device()
|
|
|
|
|
|
|
|
if offload_device is None:
|
|
|
|
offload_device = torch.device('cpu')
|
|
|
|
|
2024-01-28 23:31:49 +00:00
|
|
|
if not model_management.should_use_fp16(load_device):
|
|
|
|
dtype = torch.float32
|
|
|
|
|
|
|
|
model.eval()
|
|
|
|
model = model.to(device=offload_device, dtype=dtype)
|
|
|
|
|
2024-01-28 18:56:21 +00:00
|
|
|
self.model_patcher = ModelPatcher(model=model, load_device=load_device, offload_device=offload_device, **kwargs)
|
2024-01-28 23:31:49 +00:00
|
|
|
self.model_patcher.dtype = dtype
|
|
|
|
return self.model_patcher
|
|
|
|
|
|
|
|
def move_all_model_patchers_to_gpu(self):
|
|
|
|
model_management.load_models_gpu([self.model_patcher])
|
|
|
|
return
|
|
|
|
|
|
|
|
def send_tensor_to_model_device(self, x):
|
|
|
|
return x.to(device=self.model_patcher.current_device, dtype=self.model_patcher.dtype)
|
|
|
|
|
|
|
|
def lazy_memory_management(self, model):
|
|
|
|
# This is a lazy method to just free some memory
|
|
|
|
# so that we can still use old codes to manage memory in a bad way
|
|
|
|
# Ideally this should all be removed and all memory should be managed by model patcher.
|
|
|
|
# But the workload is too big, so we just use a quick method to manage in dirty way.
|
|
|
|
required_memory = model_management.module_size(model) + model_management.minimum_inference_memory()
|
|
|
|
model_management.free_memory(required_memory, device=model_management.get_torch_device())
|
2024-01-28 18:56:21 +00:00
|
|
|
return
|
2024-01-28 14:44:41 +00:00
|
|
|
|
2024-01-28 20:18:55 +00:00
|
|
|
def process_before_every_sampling(self, process, cnet):
|
|
|
|
return
|
|
|
|
|
2024-01-28 23:31:49 +00:00
|
|
|
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
|
|
|
return input_image
|
|
|
|
|
2024-01-28 14:44:41 +00:00
|
|
|
|
2024-01-28 23:31:49 +00:00
|
|
|
class PreprocessorNone(Preprocessor):
|
2024-01-28 14:44:41 +00:00
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.name = 'None'
|
|
|
|
|
|
|
|
|
2024-01-28 23:31:49 +00:00
|
|
|
class PreprocessorCanny(Preprocessor):
|
2024-01-28 14:44:41 +00:00
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.name = 'canny'
|
2024-01-28 20:18:55 +00:00
|
|
|
self.tag = 'Canny'
|
2024-01-28 14:44:41 +00:00
|
|
|
self.slider_1 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=100, label='Low Threshold', visible=True)
|
|
|
|
self.slider_2 = PreprocessorParameter(minimum=0, maximum=256, step=1, value=200, label='High Threshold', visible=True)
|
|
|
|
|
2024-01-28 23:31:49 +00:00
|
|
|
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
|
|
|
input_image, remove_pad = resize_image_with_pad(input_image, resolution)
|
2024-01-28 14:44:41 +00:00
|
|
|
canny_image = cv2.cvtColor(cv2.Canny(input_image, int(slider_1), int(slider_2)), cv2.COLOR_GRAY2RGB)
|
2024-01-28 23:31:49 +00:00
|
|
|
return remove_pad(canny_image)
|
2024-01-28 14:44:41 +00:00
|
|
|
|
|
|
|
|
2024-01-28 18:27:43 +00:00
|
|
|
add_preprocessor(PreprocessorNone)
|
|
|
|
add_preprocessor(PreprocessorCanny)
|