diff --git a/modules/initialize.py b/modules/initialize.py index d22d478d..74db6722 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -3,6 +3,7 @@ import logging import os import sys import warnings +import os from modules.timer import startup_timer @@ -37,7 +38,7 @@ def imports(): startup_timer.record("setup paths") import ldm.modules.encoders.modules # noqa: F401 - import ldm.modules.diffusionmodules.model # noqa: F401 + import ldm.modules.diffusionmodules.model startup_timer.record("import ldm") import sgm.modules.encoders.modules # noqa: F401 diff --git a/modules/lowvram.py b/modules/lowvram.py index eed04fb0..908b5962 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -1,5 +1,5 @@ import torch - +from modules import devices, shared module_in_gpu = None cpu = torch.device("cpu") diff --git a/modules/paths.py b/modules/paths.py index 7e18b11a..545b1313 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -3,7 +3,6 @@ import sys from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, cwd # noqa: F401 import modules.safe # noqa: F401 -import ldm_patched.utils.path_utils as ldm_patched_path_utils def mute_sdxl_imports(): @@ -65,6 +64,8 @@ for d, must_exist, what, options in path_dirs: paths[what] = d +import ldm_patched.utils.path_utils as ldm_patched_path_utils + ldm_patched_path_utils.base_path = data_path ldm_patched_path_utils.models_dir = models_path ldm_patched_path_utils.output_directory = os.path.join(data_path, "output") diff --git a/modules/processing.py b/modules/processing.py index 94a08ec5..f477c9b1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -16,7 +16,7 @@ from skimage import exposure from typing import Any import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, infotext_utils, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, rng +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, infotext_utils, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng from modules.rng import slerp # noqa: F401 from modules.sd_hijack import model_hijack from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes @@ -517,9 +517,9 @@ class StableDiffusionProcessing: class Processed: - def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments="", extra_images_list=None): + def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments="", extra_images_list=[]): self.images = images_list - self.extra_images = extra_images_list if extra_images_list is not None else [] + self.extra_images = extra_images_list self.prompt = p.prompt self.negative_prompt = p.negative_prompt self.seed = seed diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 8a286237..e55cd217 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -1,8 +1,11 @@ import torch +from torch.nn.functional import silu from types import MethodType -from modules import devices, sd_hijack_optimizations, shared, script_callbacks, sd_unet, patches +from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet, patches +from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts +from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18 import ldm.modules.attention import ldm.modules.diffusionmodules.model diff --git a/modules/sd_models.py b/modules/sd_models.py index 4364857c..83d9cb80 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -6,17 +6,22 @@ import threading import torch import re import safetensors.torch -from omegaconf import ListConfig +from omegaconf import OmegaConf, ListConfig from os import mkdir from urllib import request import ldm.modules.midas as midas import gc -from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, errors, hashes, cache, extra_networks, processing, patches +from ldm.util import instantiate_from_config + +from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches from modules.timer import Timer import numpy as np from modules_forge import forge_loader +import modules_forge.ops as forge_ops +from ldm_patched.modules.ops import manual_cast from ldm_patched.modules import model_management as model_management +import ldm_patched.modules.model_patcher model_dir = "Stable-diffusion" diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index f52080a6..b1dca624 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -1,9 +1,10 @@ import torch -from modules import prompt_parser, sd_samplers_common +from modules import prompt_parser, devices, sd_samplers_common -from modules.shared import state +from modules.shared import opts, state import modules.shared as shared from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback from modules_forge import forge_sampler diff --git a/modules/sd_unet.py b/modules/sd_unet.py index c9801039..151d682b 100644 --- a/modules/sd_unet.py +++ b/modules/sd_unet.py @@ -1,6 +1,6 @@ import torch.nn -from modules import script_callbacks, shared +from modules import script_callbacks, shared, devices unet_options = [] current_unet_option = None diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 672a8d6d..62fd6524 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -2,7 +2,7 @@ import os import collections from dataclasses import dataclass -from modules import paths, shared, script_callbacks, sd_models, extra_networks, sd_hijack, hashes +from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack, hashes import glob from copy import deepcopy diff --git a/modules/shared_init.py b/modules/shared_init.py index a7b8d4a2..a1cd09af 100644 --- a/modules/shared_init.py +++ b/modules/shared_init.py @@ -1,5 +1,7 @@ import os +import torch + from modules import shared from modules.shared import cmd_opts diff --git a/modules/ui.py b/modules/ui.py index 4cb4d7a8..5744f192 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -171,7 +171,7 @@ def update_token_counter(text, steps, *, is_positive=True): cond_stage_model = sd_models.model_data.sd_model.cond_stage_model assert cond_stage_model is not None except Exception: - return "?/?" + return f"?/?" flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) prompts = [prompt_text for step, prompt_text in flat_prompts] diff --git a/modules/upscaler.py b/modules/upscaler.py index 8dbf08b0..b89b86ef 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -14,7 +14,7 @@ def prepare_free_memory(aggressive=False): model_management.unload_all_models() print('Upscale script freed all memory.') return - + model_management.free_memory(memory_required=1024*1024*3, device=model_management.get_torch_device()) print('Upscale script freed memory successfully.') diff --git a/modules_forge/forge_loader.py b/modules_forge/forge_loader.py index 58ebf1a0..d662f8dd 100644 --- a/modules_forge/forge_loader.py +++ b/modules_forge/forge_loader.py @@ -71,6 +71,7 @@ def no_clip(): def load_checkpoint_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True): + sd_keys = sd.keys() clip = None clipvision = None vae = None @@ -98,6 +99,7 @@ def load_checkpoint_guess_config(sd, output_vae=True, output_clip=True, output_c if output_model: inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype) + offload_device = model_management.unet_offload_device() model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device) model.load_model_weights(sd, "model.diffusion_model.") diff --git a/modules_forge/initialization.py b/modules_forge/initialization.py index 4f3bc8e1..98bd6d2e 100644 --- a/modules_forge/initialization.py +++ b/modules_forge/initialization.py @@ -30,10 +30,10 @@ def initialize_forge(): for bad in bad_list: if bad in sys.argv: print(f'Arg {bad} is removed in Forge.') - print('Now memory management is fully automatic and you do not need any command flags.') - print('Please just remove this flag.') - print('In extreme cases, if you want to force previous lowvram/medvram behaviors, ' - 'please use --always-offload-from-vram') + print(f'Now memory management is fully automatic and you do not need any command flags.') + print(f'Please just remove this flag.') + print(f'In extreme cases, if you want to force previous lowvram/medvram behaviors, ' + f'please use --always-offload-from-vram') from ldm_patched.modules import args_parser diff --git a/modules_forge/ops.py b/modules_forge/ops.py index 35c2566d..a0a0d171 100644 --- a/modules_forge/ops.py +++ b/modules_forge/ops.py @@ -2,6 +2,7 @@ import time import torch import contextlib from ldm_patched.modules import model_management +from ldm_patched.modules.ops import use_patched_ops @contextlib.contextmanager diff --git a/modules_forge/patch_basic.py b/modules_forge/patch_basic.py index 42706cae..5b61819a 100644 --- a/modules_forge/patch_basic.py +++ b/modules_forge/patch_basic.py @@ -29,8 +29,8 @@ def build_loaded(module, loader_name): if os.path.exists(path): os.remove(path) exp += f'Forge has tried to move the corrupted file to {corrupted_backup_file} \n' - exp += 'You may try again now and Forge will download models again. \n' - raise ValueError(exp) from e + exp += f'You may try again now and Forge will download models again. \n' + raise ValueError(exp) return result setattr(module, loader_name, loader) diff --git a/modules_forge/shared.py b/modules_forge/shared.py index 0ba622a8..8b03e788 100644 --- a/modules_forge/shared.py +++ b/modules_forge/shared.py @@ -34,7 +34,7 @@ def try_load_supported_control_model(ckpt_path): global supported_control_models state_dict = ldm_patched.modules.utils.load_torch_file(ckpt_path, safe_load=True) for supported_type in supported_control_models: - state_dict_copy = dict(state_dict) + state_dict_copy = {k: v for k, v in state_dict.items()} model = supported_type.try_build_from_state_dict(state_dict_copy, ckpt_path) if model is not None: return model diff --git a/pyproject.toml b/pyproject.toml index cb723298..d03036e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,15 +12,13 @@ extend-select = [ exclude = [ "extensions", "extensions-disabled", - "extensions-builtin", - "ldm_patched", ] ignore = [ "E501", # Line too long "E721", # Do not compare types, use `isinstance` "E731", # Do not assign a `lambda` expression, use a `def` - + "I001", # Import block is un-sorted or un-formatted "C901", # Function is too complex "C408", # Rewrite as a literal