From 66767e3876dde8d0ef27ce00254cd6b75332f036 Mon Sep 17 00:00:00 2001 From: "Alessandro de Oliveira Faria (A.K.A. CABELO)" Date: Fri, 10 Nov 2023 03:45:44 -0300 Subject: [PATCH 01/31] - opensuse compatibility --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 4e083440..89e54a61 100644 --- a/README.md +++ b/README.md @@ -121,6 +121,8 @@ Alternatively, use online services (like Google Colab): sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0 # Red Hat-based: sudo dnf install wget git python3 +# openSUSE-based: +sudo zypper install wget git python3 # Arch-based: sudo pacman -S wget git python3 ``` From 6a86b3ad9bc7bb9a58dc4228ecf93a3a511ed122 Mon Sep 17 00:00:00 2001 From: "Alessandro de Oliveira Faria (A.K.A. CABELO)" Date: Fri, 10 Nov 2023 14:15:34 -0300 Subject: [PATCH 02/31] Compatibility with Debian 11, Fedora 34+ and openSUSE 15.4+ --- README.md | 4 ++-- webui.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 89e54a61..d4aa376b 100644 --- a/README.md +++ b/README.md @@ -120,9 +120,9 @@ Alternatively, use online services (like Google Colab): # Debian-based: sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0 # Red Hat-based: -sudo dnf install wget git python3 +sudo dnf install wget git python3 gperftools-libs libglvnd-glx # openSUSE-based: -sudo zypper install wget git python3 +sudo zypper install wget git python3 libtcmalloc4 libglvnd # Arch-based: sudo pacman -S wget git python3 ``` diff --git a/webui.sh b/webui.sh index 3d0f87ee..5c23c1d8 100755 --- a/webui.sh +++ b/webui.sh @@ -87,7 +87,7 @@ delimiter="################################################################" printf "\n%s\n" "${delimiter}" printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n" -printf "\e[1m\e[34mTested on Debian 11 (Bullseye)\e[0m" +printf "\e[1m\e[34mTested on Debian 11 (Bullseye), Fedora 34+ and openSUSE Leap 15.4 or newer.\e[0m" printf "\n%s\n" "${delimiter}" # Do not run as root @@ -222,7 +222,7 @@ fi # Try using TCMalloc on Linux prepare_tcmalloc() { if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then - TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)" + TCMALLOC="$(PATH=/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)" if [[ ! -z "${TCMALLOC}" ]]; then echo "Using TCMalloc: ${TCMALLOC}" export LD_PRELOAD="${TCMALLOC}" From 0fc7dc1c04a046d95588651ffc4e71a7d40378d3 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 04:01:13 -0600 Subject: [PATCH 03/31] implementing script metadata and DAG sorting mechanism --- modules/extensions.py | 80 +++++++++++++++++++++--- modules/scripts.py | 139 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 196 insertions(+), 23 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index bf9a1878..e317a404 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,3 +1,5 @@ +import configparser +import functools import os import threading @@ -23,8 +25,9 @@ class Extension: lock = threading.Lock() cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version'] - def __init__(self, name, path, enabled=True, is_builtin=False): + def __init__(self, name, path, enabled=True, is_builtin=False, canonical_name=None): self.name = name + self.canonical_name = canonical_name or name.lower() self.path = path self.enabled = enabled self.status = '' @@ -37,6 +40,17 @@ class Extension: self.remote = None self.have_info_from_repo = False + @functools.cached_property + def metadata(self): + if os.path.isfile(os.path.join(self.path, "sd_webui_metadata.ini")): + try: + config = configparser.ConfigParser() + config.read(os.path.join(self.path, "sd_webui_metadata.ini")) + return config + except Exception: + errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", exc_info=True) + return None + def to_dict(self): return {x: getattr(self, x) for x in self.cached_fields} @@ -136,9 +150,6 @@ class Extension: def list_extensions(): extensions.clear() - if not os.path.isdir(extensions_dir): - return - if shared.cmd_opts.disable_all_extensions: print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***") elif shared.opts.disable_all_extensions == "all": @@ -148,18 +159,69 @@ def list_extensions(): elif shared.opts.disable_all_extensions == "extra": print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") - extension_paths = [] + extension_dependency_map = {} + + # scan through extensions directory and load metadata for dirname in [extensions_dir, extensions_builtin_dir]: if not os.path.isdir(dirname): - return + continue for extension_dirname in sorted(os.listdir(dirname)): path = os.path.join(dirname, extension_dirname) if not os.path.isdir(path): continue - extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir)) + canonical_name = extension_dirname + requires = None - for dirname, path, is_builtin in extension_paths: - extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin) + if os.path.isfile(os.path.join(path, "sd_webui_metadata.ini")): + try: + config = configparser.ConfigParser() + config.read(os.path.join(path, "sd_webui_metadata.ini")) + canonical_name = config.get("Extension", "Name", fallback=canonical_name) + requires = config.get("Extension", "Requires", fallback=None) + continue + except Exception: + errors.report(f"Error reading sd_webui_metadata.ini for extension {extension_dirname}. " + f"Will load regardless.", exc_info=True) + + canonical_name = canonical_name.lower().strip() + + # check for duplicated canonical names + if canonical_name in extension_dependency_map: + errors.report(f"Duplicate canonical name \"{canonical_name}\" found in extensions " + f"\"{extension_dirname}\" and \"{extension_dependency_map[canonical_name]['dirname']}\". " + f"The current loading extension will be discarded.", exc_info=False) + continue + + # we want to wash the data to lowercase and remove whitespaces just in case + requires = [x.strip() for x in requires.lower().split(',')] if requires else [] + + extension_dependency_map[canonical_name] = { + "dirname": extension_dirname, + "path": path, + "requires": requires, + } + + # check for requirements + for (_, extension_data) in extension_dependency_map.items(): + dirname, path, requires = extension_data['dirname'], extension_data['path'], extension_data['requires'] + requirement_met = True + for req in requires: + if req not in extension_dependency_map: + errors.report(f"Extension \"{dirname}\" requires \"{req}\" which is not installed. " + f"The current loading extension will be discarded.", exc_info=False) + requirement_met = False + break + dep_dirname = extension_dependency_map[req]['dirname'] + if dep_dirname in shared.opts.disabled_extensions: + errors.report(f"Extension \"{dirname}\" requires \"{dep_dirname}\" which is disabled. " + f"The current loading extension will be discarded.", exc_info=False) + requirement_met = False + break + + is_builtin = dirname == extensions_builtin_dir + extension = Extension(name=dirname, path=path, + enabled=dirname not in shared.opts.disabled_extensions and requirement_met, + is_builtin=is_builtin) extensions.append(extension) diff --git a/modules/scripts.py b/modules/scripts.py index 5c6e0226..e92a34a0 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -2,6 +2,7 @@ import os import re import sys import inspect +from graphlib import TopologicalSorter, CycleError from collections import namedtuple from dataclasses import dataclass @@ -314,15 +315,131 @@ ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedi def list_scripts(scriptdirname, extension, *, include_extensions=True): scripts_list = [] + script_dependency_map = {} - basedir = os.path.join(paths.script_path, scriptdirname) - if os.path.exists(basedir): - for filename in sorted(os.listdir(basedir)): - scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename))) + # build script dependency map + + root_script_basedir = os.path.join(paths.script_path, scriptdirname) + if os.path.exists(root_script_basedir): + for filename in sorted(os.listdir(root_script_basedir)): + script_dependency_map[filename] = { + "extension": None, + "extension_dirname": None, + "script_file": ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename)), + "requires": [], + "load_before": [], + "load_after": [], + } if include_extensions: for ext in extensions.active(): - scripts_list += ext.list_files(scriptdirname, extension) + extension_scripts_list = ext.list_files(scriptdirname, extension) + for extension_script in extension_scripts_list: + # this is built on the assumption that script name is unique. + # I think bad thing is gonna happen if name collide in the current implementation anyway, but we + # will need to refactor here if this assumption is broken later on. + if extension_script.filename in script_dependency_map: + errors.report(f"Duplicate script name \"{extension_script.filename}\" found in extensions " + f"\"{ext.name}\" and \"{script_dependency_map[extension_script.filename]['extension_dirname'] or 'builtin'}\". " + f"The current loading file will be discarded.", exc_info=False) + continue + + relative_path = scriptdirname + "/" + extension_script.filename + + requires = None + load_before = None + load_after = None + + if ext.metadata is not None: + requires = ext.metadata.get(relative_path, "Requires", fallback=None) + load_before = ext.metadata.get(relative_path, "Before", fallback=None) + load_after = ext.metadata.get(relative_path, "After", fallback=None) + + requires = [x.strip() for x in requires.split(',')] if requires else [] + load_after = [x.strip() for x in load_after.split(',')] if load_after else [] + load_before = [x.strip() for x in load_before.split(',')] if load_before else [] + + script_dependency_map[extension_script.filename] = { + "extension": ext.canonical_name, + "extension_dirname": ext.name, + "script_file": extension_script, + "requires": requires, + "load_before": load_before, + "load_after": load_after, + } + + # resolve dependencies + + loaded_extensions = set() + for _, script_data in script_dependency_map.items(): + if script_data['extension'] is not None: + loaded_extensions.add(script_data['extension']) + + for script_filename, script_data in script_dependency_map.items(): + # load before requires inverse dependency + # in this case, append the script name into the load_after list of the specified script + for load_before_script in script_data['load_before']: + if load_before_script.startswith('ext:'): + # if this requires an extension to be loaded before + required_extension = load_before_script[4:] + for _, script_data2 in script_dependency_map.items(): + if script_data2['extension'] == required_extension: + script_data2['load_after'].append(script_filename) + break + else: + # if this requires an individual script to be loaded before + if load_before_script in script_dependency_map: + script_dependency_map[load_before_script]['load_after'].append(script_filename) + + # resolve extension name in load_after lists + for load_after_script in script_data['load_after']: + if load_after_script.startswith('ext:'): + # if this requires an extension to be loaded after + required_extension = load_after_script[4:] + for script_file_name2, script_data2 in script_dependency_map.items(): + if script_data2['extension'] == required_extension: + script_data['load_after'].append(script_file_name2) + + # remove all extension names in load_after lists + script_data['load_after'] = [x for x in script_data['load_after'] if not x.startswith('ext:')] + + # build the DAG + sorter = TopologicalSorter() + for script_filename, script_data in script_dependency_map.items(): + requirement_met = True + for required_script in script_data['requires']: + if required_script.startswith('ext:'): + # if this requires an extension to be installed + required_extension = required_script[4:] + if required_extension not in loaded_extensions: + errors.report(f"Script \"{script_filename}\" requires extension \"{required_extension}\" to " + f"be loaded, but it is not. Skipping.", + exc_info=False) + requirement_met = False + break + else: + # if this requires an individual script to be loaded + if required_script not in script_dependency_map: + errors.report(f"Script \"{script_filename}\" requires script \"{required_script}\" to " + f"be loaded, but it is not. Skipping.", + exc_info=False) + requirement_met = False + break + if not requirement_met: + continue + + sorter.add(script_filename, *script_data['load_after']) + + # sort the scripts + try: + ordered_script = sorter.static_order() + except CycleError: + errors.report("Cycle detected in script dependencies. Scripts will load in ascending order.", exc_info=True) + ordered_script = script_dependency_map.keys() + + for script_filename in ordered_script: + script_data = script_dependency_map[script_filename] + scripts_list.append(script_data['script_file']) scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] @@ -365,15 +482,9 @@ def load_scripts(): elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing): postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module)) - def orderby(basedir): - # 1st webui, 2nd extensions-builtin, 3rd extensions - priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0} - for key in priority: - if basedir.startswith(key): - return priority[key] - return 9999 - - for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]): + # here the scripts_list is already ordered + # processing_script is not considered though + for scriptfile in scripts_list: try: if scriptfile.basedir != paths.script_path: sys.path = [scriptfile.basedir] + sys.path From 0d1924c48be3d02650e87b12a4f53165a8b4a599 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 04:03:55 -0600 Subject: [PATCH 04/31] populate loaded_extensions from extension list instead --- modules/scripts.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index e92a34a0..7cdf288d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -371,9 +371,8 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): # resolve dependencies loaded_extensions = set() - for _, script_data in script_dependency_map.items(): - if script_data['extension'] is not None: - loaded_extensions.add(script_data['extension']) + for ext in extensions.active(): + loaded_extensions.add(ext.canonical_name) for script_filename, script_data in script_dependency_map.items(): # load before requires inverse dependency From bc1a450124ab643fc0c3ea7630d875afb4b84b84 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 04:08:45 -0600 Subject: [PATCH 05/31] reverse the extension load order so builtin extensions load earlier natively --- modules/extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index e317a404..7583a3b0 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -162,7 +162,7 @@ def list_extensions(): extension_dependency_map = {} # scan through extensions directory and load metadata - for dirname in [extensions_dir, extensions_builtin_dir]: + for dirname in [extensions_builtin_dir, extensions_dir]: if not os.path.isdir(dirname): continue From 294f8a514f982248cda1cafda30d35566f3a0321 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Sat, 11 Nov 2023 23:28:12 +0900 Subject: [PATCH 06/31] add hyperTile https://github.com/tfernd/HyperTile --- modules/processing.py | 27 ++++++++++++++++++++++++--- modules/shared_options.py | 2 ++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index b0e240a4..e2309534 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -799,6 +799,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] + unet_object = p.sd_model.model + vae_model = p.sd_model.first_stage_model + try: + from hyper_tile import split_attention, flush + except (ImportError, ModuleNotFoundError): # pip install git+https://github.com/tfernd/HyperTile@2ef64b2800d007d305755c33550537410310d7df + split_attention = lambda *args, **kwargs: lambda x: x # return a no-op context manager + flush = lambda: None + import random + saved_rng_state = random.getstate() + random.seed(p.seed) # hyper_tile uses random, so we need to seed it with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): @@ -866,15 +876,25 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): - samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) + # get largest tile size available, which is 2^x which is factor of gcd of p.width and p.height + gcd = math.gcd(p.width, p.height) + largest_tile_size_available = 1 + while gcd % (largest_tile_size_available * 2) == 0: + largest_tile_size_available *= 2 + aspect_ratio = p.width / p.height + with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): + with split_attention(unet_object, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn): + flush() + samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) if getattr(samples_ddim, 'already_decoded', False): x_samples_ddim = samples_ddim else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - - x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) + with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): + flush() + x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) @@ -980,6 +1000,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.grid_save: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True) + random.setstate(saved_rng_state) if not p.disable_extra_networks and p.extra_network_data: extra_networks.deactivate(p, p.extra_network_data) diff --git a/modules/shared_options.py b/modules/shared_options.py index d40db530..d9650265 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -200,6 +200,8 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), + "hypertile_split_unet_attn" : OptionInfo(False, "Split attention in Unet with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), + "hypertile_split_vae_attn": OptionInfo(False, "Split attention in VAE with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { From 7af576e745c79a9539e40bc158e695192ae79f25 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 10:46:47 -0600 Subject: [PATCH 07/31] remove the assumption of same name --- modules/scripts.py | 81 +++++++++++++++++----------------------------- 1 file changed, 30 insertions(+), 51 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 7cdf288d..7ad22245 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -335,15 +335,9 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): for ext in extensions.active(): extension_scripts_list = ext.list_files(scriptdirname, extension) for extension_script in extension_scripts_list: - # this is built on the assumption that script name is unique. - # I think bad thing is gonna happen if name collide in the current implementation anyway, but we - # will need to refactor here if this assumption is broken later on. - if extension_script.filename in script_dependency_map: - errors.report(f"Duplicate script name \"{extension_script.filename}\" found in extensions " - f"\"{ext.name}\" and \"{script_dependency_map[extension_script.filename]['extension_dirname'] or 'builtin'}\". " - f"The current loading file will be discarded.", exc_info=False) - continue - + script_canonical_name = ext.canonical_name + "/" + extension_script.filename + if ext.is_builtin: + script_canonical_name = "builtin/" + script_canonical_name relative_path = scriptdirname + "/" + extension_script.filename requires = None @@ -359,7 +353,7 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): load_after = [x.strip() for x in load_after.split(',')] if load_after else [] load_before = [x.strip() for x in load_before.split(',')] if load_before else [] - script_dependency_map[extension_script.filename] = { + script_dependency_map[script_canonical_name] = { "extension": ext.canonical_name, "extension_dirname": ext.name, "script_file": extension_script, @@ -374,60 +368,45 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): for ext in extensions.active(): loaded_extensions.add(ext.canonical_name) - for script_filename, script_data in script_dependency_map.items(): + for script_canonical_name, script_data in script_dependency_map.items(): # load before requires inverse dependency # in this case, append the script name into the load_after list of the specified script for load_before_script in script_data['load_before']: - if load_before_script.startswith('ext:'): - # if this requires an extension to be loaded before - required_extension = load_before_script[4:] + # if this requires an individual script to be loaded before + if load_before_script in script_dependency_map: + script_dependency_map[load_before_script]['load_after'].append(script_canonical_name) + elif load_before_script in loaded_extensions: for _, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == required_extension: - script_data2['load_after'].append(script_filename) + if script_data2['extension'] == load_before_script: + script_data2['load_after'].append(script_canonical_name) break - else: - # if this requires an individual script to be loaded before - if load_before_script in script_dependency_map: - script_dependency_map[load_before_script]['load_after'].append(script_filename) # resolve extension name in load_after lists - for load_after_script in script_data['load_after']: - if load_after_script.startswith('ext:'): - # if this requires an extension to be loaded after - required_extension = load_after_script[4:] - for script_file_name2, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == required_extension: - script_data['load_after'].append(script_file_name2) - - # remove all extension names in load_after lists - script_data['load_after'] = [x for x in script_data['load_after'] if not x.startswith('ext:')] + for load_after_script in list(script_data['load_after']): + if load_after_script not in script_dependency_map and load_after_script in loaded_extensions: + script_data['load_after'].remove(load_after_script) + for script_canonical_name2, script_data2 in script_dependency_map.items(): + if script_data2['extension'] == load_after_script: + script_data['load_after'].remove(script_canonical_name2) + break # build the DAG sorter = TopologicalSorter() - for script_filename, script_data in script_dependency_map.items(): + for script_canonical_name, script_data in script_dependency_map.items(): requirement_met = True for required_script in script_data['requires']: - if required_script.startswith('ext:'): - # if this requires an extension to be installed - required_extension = required_script[4:] - if required_extension not in loaded_extensions: - errors.report(f"Script \"{script_filename}\" requires extension \"{required_extension}\" to " - f"be loaded, but it is not. Skipping.", - exc_info=False) - requirement_met = False - break - else: - # if this requires an individual script to be loaded - if required_script not in script_dependency_map: - errors.report(f"Script \"{script_filename}\" requires script \"{required_script}\" to " - f"be loaded, but it is not. Skipping.", - exc_info=False) - requirement_met = False - break + # if this requires an individual script to be loaded + if required_script not in script_dependency_map and required_script not in loaded_extensions: + errors.report(f"Script \"{script_canonical_name}\" " + f"requires \"{required_script}\" to " + f"be loaded, but it is not. Skipping.", + exc_info=False) + requirement_met = False + break if not requirement_met: continue - sorter.add(script_filename, *script_data['load_after']) + sorter.add(script_canonical_name, *script_data['load_after']) # sort the scripts try: @@ -436,8 +415,8 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): errors.report("Cycle detected in script dependencies. Scripts will load in ascending order.", exc_info=True) ordered_script = script_dependency_map.keys() - for script_filename in ordered_script: - script_data = script_dependency_map[script_filename] + for script_canonical_name in ordered_script: + script_data = script_dependency_map[script_canonical_name] scripts_list.append(script_data['script_file']) scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] From 520e52f846892cc2b207b738b4180fa863c7b38f Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 10:58:26 -0600 Subject: [PATCH 08/31] allow comma and whitespace as separator --- modules/extensions.py | 9 ++++++--- modules/scripts.py | 6 +++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 7583a3b0..795af996 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -2,6 +2,7 @@ import configparser import functools import os import threading +import re from modules import shared, errors, cache, scripts from modules.gitpython_hack import Repo @@ -48,7 +49,8 @@ class Extension: config.read(os.path.join(self.path, "sd_webui_metadata.ini")) return config except Exception: - errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", exc_info=True) + errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", + exc_info=True) return None def to_dict(self): @@ -70,6 +72,7 @@ class Extension: self.do_read_info_from_repo() return self.to_dict() + try: d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo) self.from_dict(d) @@ -194,8 +197,8 @@ def list_extensions(): f"The current loading extension will be discarded.", exc_info=False) continue - # we want to wash the data to lowercase and remove whitespaces just in case - requires = [x.strip() for x in requires.lower().split(',')] if requires else [] + # both "," and " " are accepted as separator + requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] extension_dependency_map[canonical_name] = { "dirname": extension_dirname, diff --git a/modules/scripts.py b/modules/scripts.py index 7ad22245..5dd0555d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -349,9 +349,9 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): load_before = ext.metadata.get(relative_path, "Before", fallback=None) load_after = ext.metadata.get(relative_path, "After", fallback=None) - requires = [x.strip() for x in requires.split(',')] if requires else [] - load_after = [x.strip() for x in load_after.split(',')] if load_after else [] - load_before = [x.strip() for x in load_before.split(',')] if load_before else [] + requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] + load_after = list(filter(None, re.split(r"[,\s]+", load_after.lower()))) if load_after else [] + load_before = list(filter(None, re.split(r"[,\s]+", load_before.lower()))) if load_before else [] script_dependency_map[script_canonical_name] = { "extension": ext.canonical_name, From 48d6102b3105bb0179c8eab14ec7930945aca326 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 11:17:26 -0600 Subject: [PATCH 09/31] fix --- modules/extensions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index 795af996..5536db3e 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -183,7 +183,6 @@ def list_extensions(): config.read(os.path.join(path, "sd_webui_metadata.ini")) canonical_name = config.get("Extension", "Name", fallback=canonical_name) requires = config.get("Extension", "Requires", fallback=None) - continue except Exception: errors.report(f"Error reading sd_webui_metadata.ini for extension {extension_dirname}. " f"Will load regardless.", exc_info=True) From 3bb32befe9523a6acefbab7fe099f91660f41ea9 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 11:58:19 -0600 Subject: [PATCH 10/31] bug fix --- modules/scripts.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 5dd0555d..b1f4504a 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -322,6 +322,9 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): root_script_basedir = os.path.join(paths.script_path, scriptdirname) if os.path.exists(root_script_basedir): for filename in sorted(os.listdir(root_script_basedir)): + if not os.path.isfile(os.path.join(root_script_basedir, filename)): + continue + script_dependency_map[filename] = { "extension": None, "extension_dirname": None, @@ -335,19 +338,27 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): for ext in extensions.active(): extension_scripts_list = ext.list_files(scriptdirname, extension) for extension_script in extension_scripts_list: + if not os.path.isfile(extension_script.path): + continue + script_canonical_name = ext.canonical_name + "/" + extension_script.filename if ext.is_builtin: script_canonical_name = "builtin/" + script_canonical_name relative_path = scriptdirname + "/" + extension_script.filename - requires = None - load_before = None - load_after = None + requires = '' + load_before = '' + load_after = '' if ext.metadata is not None: - requires = ext.metadata.get(relative_path, "Requires", fallback=None) - load_before = ext.metadata.get(relative_path, "Before", fallback=None) - load_after = ext.metadata.get(relative_path, "After", fallback=None) + requires = ext.metadata.get(relative_path, "Requires", fallback='') + load_before = ext.metadata.get(relative_path, "Before", fallback='') + load_after = ext.metadata.get(relative_path, "After", fallback='') + + # propagate directory level metadata + requires = requires + ',' + ext.metadata.get(scriptdirname, "Requires", fallback='') + load_before = load_before + ',' + ext.metadata.get(scriptdirname, "Before", fallback='') + load_after = load_after + ',' + ext.metadata.get(scriptdirname, "After", fallback='') requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] load_after = list(filter(None, re.split(r"[,\s]+", load_after.lower()))) if load_after else [] @@ -387,7 +398,7 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): script_data['load_after'].remove(load_after_script) for script_canonical_name2, script_data2 in script_dependency_map.items(): if script_data2['extension'] == load_after_script: - script_data['load_after'].remove(script_canonical_name2) + script_data['load_after'].append(script_canonical_name2) break # build the DAG From b29fc6d4de8812b25c520a46676cda13c3fe64ca Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Sat, 11 Nov 2023 23:43:13 +0900 Subject: [PATCH 11/31] Implement Hypertile Co-Authored-By: Kieran Hunt --- modules/hypertile.py | 333 ++++++++++++++++++++++++++++++++++++++++++ modules/processing.py | 65 ++++----- 2 files changed, 358 insertions(+), 40 deletions(-) create mode 100644 modules/hypertile.py diff --git a/modules/hypertile.py b/modules/hypertile.py new file mode 100644 index 00000000..ab1c74c0 --- /dev/null +++ b/modules/hypertile.py @@ -0,0 +1,333 @@ +""" +Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE +Warn : The patch works well only if the input image has a width and height that are multiples of 128 +Author : @tfernd Github : https://github.com/tfernd/HyperTile +""" + +from __future__ import annotations +from typing import Callable +from typing_extensions import Literal + +import logging +from functools import wraps, cache +from contextlib import contextmanager + +import math +import torch.nn as nn +import random + +from einops import rearrange + +# TODO add SD-XL layers +DEPTH_LAYERS = { + 0: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.0.attentions.0.transformer_blocks.0.attn1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.1.1.transformer_blocks.0.attn1", + "input_blocks.2.1.transformer_blocks.0.attn1", + "output_blocks.9.1.transformer_blocks.0.attn1", + "output_blocks.10.1.transformer_blocks.0.attn1", + "output_blocks.11.1.transformer_blocks.0.attn1", + # SD 1.5 VAE + "decoder.mid_block.attentions.0", + ], + 1: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.1.attentions.0.transformer_blocks.0.attn1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.0.attn1", + "input_blocks.5.1.transformer_blocks.0.attn1", + "output_blocks.6.1.transformer_blocks.0.attn1", + "output_blocks.7.1.transformer_blocks.0.attn1", + "output_blocks.8.1.transformer_blocks.0.attn1", + ], + 2: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.2.attentions.0.transformer_blocks.0.attn1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.7.1.transformer_blocks.0.attn1", + "input_blocks.8.1.transformer_blocks.0.attn1", + "output_blocks.3.1.transformer_blocks.0.attn1", + "output_blocks.4.1.transformer_blocks.0.attn1", + "output_blocks.5.1.transformer_blocks.0.attn1", + ], + 3: [ + # SD 1.5 U-Net (diffusers) + "mid_block.attentions.0.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "middle_block.1.transformer_blocks.0.attn1", + ], +} +# XL layers, thanks for GitHub@gel-crabs for the help +DEPTH_LAYERS_XL = { + 0: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.0.attentions.0.transformer_blocks.0.attn1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.0.attn1", + "input_blocks.5.1.transformer_blocks.0.attn1", + "output_blocks.3.1.transformer_blocks.0.attn1", + "output_blocks.4.1.transformer_blocks.0.attn1", + "output_blocks.5.1.transformer_blocks.0.attn1", + # SD 1.5 VAE + "decoder.mid_block.attentions.0", + "decoder.mid.attn_1", + ], + 1: [ + # SD 1.5 U-Net (diffusers) + #"down_blocks.1.attentions.0.transformer_blocks.0.attn1", + #"down_blocks.1.attentions.1.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.0.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.1.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.1.attn1", + "input_blocks.5.1.transformer_blocks.1.attn1", + "output_blocks.3.1.transformer_blocks.1.attn1", + "output_blocks.4.1.transformer_blocks.1.attn1", + "output_blocks.5.1.transformer_blocks.1.attn1", + "input_blocks.7.1.transformer_blocks.0.attn1", + "input_blocks.8.1.transformer_blocks.0.attn1", + "output_blocks.0.1.transformer_blocks.0.attn1", + "output_blocks.1.1.transformer_blocks.0.attn1", + "output_blocks.2.1.transformer_blocks.0.attn1", + "input_blocks.7.1.transformer_blocks.1.attn1", + "input_blocks.8.1.transformer_blocks.1.attn1", + "output_blocks.0.1.transformer_blocks.1.attn1", + "output_blocks.1.1.transformer_blocks.1.attn1", + "output_blocks.2.1.transformer_blocks.1.attn1", + "input_blocks.7.1.transformer_blocks.2.attn1", + "input_blocks.8.1.transformer_blocks.2.attn1", + "output_blocks.0.1.transformer_blocks.2.attn1", + "output_blocks.1.1.transformer_blocks.2.attn1", + "output_blocks.2.1.transformer_blocks.2.attn1", + "input_blocks.7.1.transformer_blocks.3.attn1", + "input_blocks.8.1.transformer_blocks.3.attn1", + "output_blocks.0.1.transformer_blocks.3.attn1", + "output_blocks.1.1.transformer_blocks.3.attn1", + "output_blocks.2.1.transformer_blocks.3.attn1", + "input_blocks.7.1.transformer_blocks.4.attn1", + "input_blocks.8.1.transformer_blocks.4.attn1", + "output_blocks.0.1.transformer_blocks.4.attn1", + "output_blocks.1.1.transformer_blocks.4.attn1", + "output_blocks.2.1.transformer_blocks.4.attn1", + "input_blocks.7.1.transformer_blocks.5.attn1", + "input_blocks.8.1.transformer_blocks.5.attn1", + "output_blocks.0.1.transformer_blocks.5.attn1", + "output_blocks.1.1.transformer_blocks.5.attn1", + "output_blocks.2.1.transformer_blocks.5.attn1", + "input_blocks.7.1.transformer_blocks.6.attn1", + "input_blocks.8.1.transformer_blocks.6.attn1", + "output_blocks.0.1.transformer_blocks.6.attn1", + "output_blocks.1.1.transformer_blocks.6.attn1", + "output_blocks.2.1.transformer_blocks.6.attn1", + "input_blocks.7.1.transformer_blocks.7.attn1", + "input_blocks.8.1.transformer_blocks.7.attn1", + "output_blocks.0.1.transformer_blocks.7.attn1", + "output_blocks.1.1.transformer_blocks.7.attn1", + "output_blocks.2.1.transformer_blocks.7.attn1", + "input_blocks.7.1.transformer_blocks.8.attn1", + "input_blocks.8.1.transformer_blocks.8.attn1", + "output_blocks.0.1.transformer_blocks.8.attn1", + "output_blocks.1.1.transformer_blocks.8.attn1", + "output_blocks.2.1.transformer_blocks.8.attn1", + "input_blocks.7.1.transformer_blocks.9.attn1", + "input_blocks.8.1.transformer_blocks.9.attn1", + "output_blocks.0.1.transformer_blocks.9.attn1", + "output_blocks.1.1.transformer_blocks.9.attn1", + "output_blocks.2.1.transformer_blocks.9.attn1", + ], + 2: [ + # SD 1.5 U-Net (diffusers) + "mid_block.attentions.0.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "middle_block.1.transformer_blocks.0.attn1", + "middle_block.1.transformer_blocks.1.attn1", + "middle_block.1.transformer_blocks.2.attn1", + "middle_block.1.transformer_blocks.3.attn1", + "middle_block.1.transformer_blocks.4.attn1", + "middle_block.1.transformer_blocks.5.attn1", + "middle_block.1.transformer_blocks.6.attn1", + "middle_block.1.transformer_blocks.7.attn1", + "middle_block.1.transformer_blocks.8.attn1", + "middle_block.1.transformer_blocks.9.attn1", + ], +} + + +RNG_INSTANCE = random.Random() + +def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: + """ + Returns a random divisor of value that + x * min_value <= value + if max_options is 1, the behavior is deterministic + """ + min_value = min(min_value, value) + + # All big divisors of value (inclusive) + divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order + + ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order + + idx = RNG_INSTANCE.randint(0, len(ns) - 1) + + return ns[idx] + +def set_hypertile_seed(seed: int) -> None: + RNG_INSTANCE.seed(seed) + +def largest_tile_size_available(width:int, height:int) -> int: + """ + Calculates the largest tile size available for a given width and height + Tile size is always a power of 2 + """ + gcd = math.gcd(width, height) + largest_tile_size_available = 1 + while gcd % (largest_tile_size_available * 2) == 0: + largest_tile_size_available *= 2 + return largest_tile_size_available + +def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: + """ + Finds h and w such that h*w = hw and h/w = aspect_ratio + We check all possible divisors of hw and return the closest to the aspect ratio + """ + divisors = [i for i in range(2, hw + 1) if hw % i == 0] # all divisors of hw + pairs = [(i, hw // i) for i in divisors] # all pairs of divisors of hw + ratios = [w/h for h, w in pairs] # all ratios of pairs of divisors of hw + closest_ratio = min(ratios, key=lambda x: abs(x - aspect_ratio)) # closest ratio to aspect_ratio + closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio + return closest_pair + +@cache +def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: + """ + Finds h and w such that h*w = hw and h/w = aspect_ratio + """ + h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) + # find h and w such that h*w = hw and h/w = aspect_ratio + if h * w != hw: + w_candidate = hw / h + # check if w is an integer + if not w_candidate.is_integer(): + h_candidate = hw / w + # check if h is an integer + if not h_candidate.is_integer(): + return iterative_closest_divisors(hw, aspect_ratio) + else: + h = int(h_candidate) + else: + w = int(w_candidate) + return h, w + +@contextmanager +def split_attention( + layer: nn.Module, + /, + aspect_ratio: float, # width/height + tile_size: int = 128, # 128 for VAE + swap_size: int = 1, # 1 for VAE + *, + disable: bool = False, + max_depth: Literal[0, 1, 2, 3] = 0, # ! Try 0 or 1 + scale_depth: bool = True, # scale the tile-size depending on the depth + is_sdxl: bool = False, # is the model SD-XL +): + # Hijacks AttnBlock from ldm and Attention from diffusers + + if disable: + logging.info(f"Attention for {layer.__class__.__qualname__} not splitted") + yield + return + + latent_tile_size = max(128, tile_size) // 8 + + def self_attn_forward(forward: Callable, depth: int, layer_name: str, module: nn.Module) -> Callable: + @wraps(forward) + def wrapper(*args, **kwargs): + x = args[0] + + # VAE + if x.ndim == 4: + b, c, h, w = x.shape + + nh = random_divisor(h, latent_tile_size, swap_size) + nw = random_divisor(w, latent_tile_size, swap_size) + + if nh * nw > 1: + x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles + + out = forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) + + # U-Net + else: + hw: int = x.size(1) + h, w = find_hw_candidates(hw, aspect_ratio) + assert h * w == hw, f"Invalid aspect ratio {aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" + + factor = 2**depth if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, swap_size) + nw = random_divisor(w, latent_tile_size * factor, swap_size) + + module._split_sizes_hypertile.append((nh, nw)) # type: ignore + + if nh * nw > 1: + x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + + out = forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + + return out + + return wrapper + + # Handle hijacking the forward method and recovering afterwards + try: + if is_sdxl: + layers = DEPTH_LAYERS_XL + else: + layers = DEPTH_LAYERS + for depth in range(max_depth + 1): + for layer_name, module in layer.named_modules(): + if any(layer_name.endswith(try_name) for try_name in layers[depth]): + # print input shape for debugging + logging.debug(f"HyperTile hijacking attention layer at depth {depth}: {layer_name}") + # hijack + module._original_forward_hypertile = module.forward + module.forward = self_attn_forward(module.forward, depth, layer_name, module) + module._split_sizes_hypertile = [] + yield + finally: + for layer_name, module in layer.named_modules(): + # remove hijack + if hasattr(module, "_original_forward_hypertile"): + if module._split_sizes_hypertile: + logging.debug(f"layer {layer_name} splitted with ({module._split_sizes_hypertile})") + # recover + module.forward = module._original_forward_hypertile + del module._original_forward_hypertile + del module._split_sizes_hypertile diff --git a/modules/processing.py b/modules/processing.py index e2309534..e19a09a3 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -24,6 +24,7 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths import modules.face_restoration +from modules.hypertile import split_attention, set_hypertile_seed, largest_tile_size_available import modules.images as images import modules.styles import modules.sd_models as sd_models @@ -799,17 +800,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] - unet_object = p.sd_model.model - vae_model = p.sd_model.first_stage_model - try: - from hyper_tile import split_attention, flush - except (ImportError, ModuleNotFoundError): # pip install git+https://github.com/tfernd/HyperTile@2ef64b2800d007d305755c33550537410310d7df - split_attention = lambda *args, **kwargs: lambda x: x # return a no-op context manager - flush = lambda: None - import random - saved_rng_state = random.getstate() - random.seed(p.seed) # hyper_tile uses random, so we need to seed it - with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): p.init(p.all_prompts, p.all_seeds, p.all_subseeds) @@ -871,29 +861,20 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.comment(comment) p.extra_generation_params.update(model_hijack.extra_generation_params) - + set_hypertile_seed(p.seed) + # add batch size + hypertile status to information to reproduce the run if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): - # get largest tile size available, which is 2^x which is factor of gcd of p.width and p.height - gcd = math.gcd(p.width, p.height) - largest_tile_size_available = 1 - while gcd % (largest_tile_size_available * 2) == 0: - largest_tile_size_available *= 2 - aspect_ratio = p.width / p.height - with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): - with split_attention(unet_object, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn): - flush() - samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) + samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) if getattr(samples_ddim, 'already_decoded', False): x_samples_ddim = samples_ddim else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): - flush() + with split_attention(p.sd_model.first_stage_model, aspect_ratio = p.width / p.height, tile_size=min(largest_tile_size_available(p.width, p.height), 128), disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1000,7 +981,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.grid_save: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True) - random.setstate(saved_rng_state) if not p.disable_extra_networks and p.extra_network_data: extra_networks.deactivate(p, p.extra_network_data) @@ -1161,24 +1141,25 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - + aspect_ratio = self.width / self.height x = self.rng.next() - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + tile_size = largest_tile_size_available(self.width, self.height) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + devices.torch_gc() + samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x - if not self.enable_hr: return samples if self.latent_scale_mode is None: - decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) else: decoded_samples = None with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=self.hr_checkpoint_info) - - devices.torch_gc() - return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts): @@ -1186,7 +1167,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples self.is_hr_pass = True - target_width = self.hr_upscale_to_x target_height = self.hr_upscale_to_y @@ -1264,18 +1244,19 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.scripts is not None: self.scripts.before_hr(self) - - samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + tile_size = largest_tile_size_available(target_width, target_height) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with split_attention(self.sd_model.model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=3, max_depth=1,scale_depth=True, disable=not opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.sampler = None devices.torch_gc() - - decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False - return decoded_samples def close(self): @@ -1550,8 +1531,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.initial_noise_multiplier != 1.0: self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier x *= self.initial_noise_multiplier - - samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) + aspect_ratio = self.width / self.height + tile_size = largest_tile_size_available(self.width, self.height) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + devices.torch_gc() + samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) if self.mask is not None: samples = samples * self.nmask + self.init_latent * self.mask From af45872fdb8a66ffd6a405d99120e0bacbb4a170 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:15:14 +0900 Subject: [PATCH 12/31] copy LDM VAE key from XL --- modules/hypertile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/hypertile.py b/modules/hypertile.py index ab1c74c0..32d8604c 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -35,6 +35,7 @@ DEPTH_LAYERS = { "output_blocks.11.1.transformer_blocks.0.attn1", # SD 1.5 VAE "decoder.mid_block.attentions.0", + "decoder.mid.attn_1", ], 1: [ # SD 1.5 U-Net (diffusers) From bcfaf3979a9f93e37c418b58c75b02d9570b4354 Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Thu, 16 Nov 2023 18:43:16 +0900 Subject: [PATCH 13/31] convert/add hypertile options --- modules/hypertile.py | 36 ++++++++++++++++++++++++++++++++++++ modules/processing.py | 21 +++++++++++---------- modules/shared_options.py | 6 ++++++ 3 files changed, 53 insertions(+), 10 deletions(-) diff --git a/modules/hypertile.py b/modules/hypertile.py index 32d8604c..fee24a8c 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -332,3 +332,39 @@ def split_attention( module.forward = module._original_forward_hypertile del module._original_forward_hypertile del module._split_sizes_hypertile + +def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, opts): + """ + Returns context manager for VAE + """ + enabled = not opts.hypertile_split_vae_attn + swap_size = opts.hypertile_swap_size_vae + max_depth = opts.hypertile_max_depth_vae + tile_size_max = opts.hypertile_max_tile_vae + return split_attention( + model, + aspect_ratio=aspect_ratio, + tile_size=min(tile_size, tile_size_max), + swap_size=swap_size, + disable=not enabled, + max_depth=max_depth, + is_sdxl=False, + ) + +def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, opts, is_sdxl:bool): + """ + Returns context manager for U-Net + """ + enabled = not opts.hypertile_split_unet_attn + swap_size = opts.hypertile_swap_size_unet + max_depth = opts.hypertile_max_depth_unet + tile_size_max = opts.hypertile_max_tile_unet + return split_attention( + model, + aspect_ratio=aspect_ratio, + tile_size=min(tile_size, tile_size_max), + swap_size=swap_size, + disable=not enabled, + max_depth=max_depth, + is_sdxl=is_sdxl, + ) \ No newline at end of file diff --git a/modules/processing.py b/modules/processing.py index e19a09a3..c622ff33 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -24,7 +24,7 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths import modules.face_restoration -from modules.hypertile import split_attention, set_hypertile_seed, largest_tile_size_available +from modules.hypertile import set_hypertile_seed, largest_tile_size_available, hypertile_context_unet, hypertile_context_vae import modules.images as images import modules.styles import modules.sd_models as sd_models @@ -874,7 +874,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with split_attention(p.sd_model.first_stage_model, aspect_ratio = p.width / p.height, tile_size=min(largest_tile_size_available(p.width, p.height), 128), disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_unet(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1144,8 +1144,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): aspect_ratio = self.width / self.height x = self.rng.next() tile_size = largest_tile_size_available(self.width, self.height) - with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): - with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): + with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x @@ -1153,7 +1153,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples if self.latent_scale_mode is None: - with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) else: decoded_samples = None @@ -1245,15 +1245,16 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.scripts is not None: self.scripts.before_hr(self) tile_size = largest_tile_size_available(target_width, target_height) - with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): - with split_attention(self.sd_model.model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=3, max_depth=1,scale_depth=True, disable=not opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + aspect_ratio = self.width / self.height + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): + with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.sampler = None devices.torch_gc() - with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False @@ -1533,8 +1534,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): x *= self.initial_noise_multiplier aspect_ratio = self.width / self.height tile_size = largest_tile_size_available(self.width, self.height) - with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): - with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): + with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) diff --git a/modules/shared_options.py b/modules/shared_options.py index d9650265..28a48906 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -202,6 +202,12 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), "hypertile_split_unet_attn" : OptionInfo(False, "Split attention in Unet with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), "hypertile_split_vae_attn": OptionInfo(False, "Split attention in VAE with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), + "hypertile_max_depth_vae" : OptionInfo(3, "Max depth for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_max_depth_unet" : OptionInfo(3, "Max depth for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_max_tile_vae" : OptionInfo(128, "Max tile size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_max_tile_unet" : OptionInfo(256, "Max tile size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_swap_size_unet": OptionInfo(3, "Swap size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_swap_size_vae": OptionInfo(3, "Swap size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { From 472c22cc8a46b825545d5c86bd2745269430d7b0 Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Thu, 16 Nov 2023 19:03:45 +0900 Subject: [PATCH 14/31] fix ruff - add newline --- modules/hypertile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hypertile.py b/modules/hypertile.py index fee24a8c..86acecdc 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -367,4 +367,4 @@ def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, o disable=not enabled, max_depth=max_depth, is_sdxl=is_sdxl, - ) \ No newline at end of file + ) From c40be2252ab1c8c289562db208c5ac6618bd8545 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 09:22:27 +0900 Subject: [PATCH 15/31] Fix critical issue - unet apply --- modules/processing.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c622ff33..2fda7f33 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -874,7 +874,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with hypertile_context_unet(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(p.sd_model.model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1145,7 +1145,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = self.rng.next() tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x @@ -1247,7 +1247,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): tile_size = largest_tile_size_available(target_width, target_height) aspect_ratio = self.width / self.height with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) @@ -1535,7 +1535,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): aspect_ratio = self.width / self.height tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) From c0725ba2d098a6a78610e7d96ee75f63a32d4e52 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 09:34:50 +0900 Subject: [PATCH 16/31] Fix inverted option issue I'm pretty sure I was sleepy while implementing this --- modules/hypertile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypertile.py b/modules/hypertile.py index 86acecdc..3a1468c6 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -337,7 +337,7 @@ def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, op """ Returns context manager for VAE """ - enabled = not opts.hypertile_split_vae_attn + enabled = opts.hypertile_split_vae_attn swap_size = opts.hypertile_swap_size_vae max_depth = opts.hypertile_max_depth_vae tile_size_max = opts.hypertile_max_tile_vae @@ -355,7 +355,7 @@ def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, o """ Returns context manager for U-Net """ - enabled = not opts.hypertile_split_unet_attn + enabled = opts.hypertile_split_unet_attn swap_size = opts.hypertile_swap_size_unet max_depth = opts.hypertile_max_depth_unet tile_size_max = opts.hypertile_max_tile_unet From ffd0f8ddc309688636ac1ac10d82b72ab6b466bf Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 09:54:33 +0900 Subject: [PATCH 17/31] set empty value for SD XL 3rd layer --- modules/hypertile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/hypertile.py b/modules/hypertile.py index 3a1468c6..be898fce 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -170,6 +170,7 @@ DEPTH_LAYERS_XL = { "middle_block.1.transformer_blocks.8.attn1", "middle_block.1.transformer_blocks.9.attn1", ], + 3 : [] # TODO - separate layers for SD-XL } From 97431f29feb17ffc96ca95e9b3efec87be9d8b3a Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 10:05:28 +0900 Subject: [PATCH 18/31] fix double gc and decoding with unet context --- modules/processing.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 2fda7f33..36c2be5e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -874,7 +874,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with hypertile_context_unet(p.sd_model.model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_vae(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), opts=shared.opts): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1146,11 +1146,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - devices.torch_gc() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x if not self.enable_hr: return samples + devices.torch_gc() if self.latent_scale_mode is None: with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): @@ -1536,7 +1536,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - devices.torch_gc() samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) if self.mask is not None: From bde439ef67776be126d6a8c569a23d54dbc3e707 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sun, 19 Nov 2023 00:58:47 -0600 Subject: [PATCH 19/31] use metadata.ini for meta filename --- modules/extensions.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 5536db3e..f3988d02 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -43,13 +43,13 @@ class Extension: @functools.cached_property def metadata(self): - if os.path.isfile(os.path.join(self.path, "sd_webui_metadata.ini")): + if os.path.isfile(os.path.join(self.path, "metadata.ini")): try: config = configparser.ConfigParser() - config.read(os.path.join(self.path, "sd_webui_metadata.ini")) + config.read(os.path.join(self.path, "metadata.ini")) return config except Exception: - errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", + errors.report(f"Error reading metadata.ini for extension {self.canonical_name}.", exc_info=True) return None @@ -177,14 +177,14 @@ def list_extensions(): canonical_name = extension_dirname requires = None - if os.path.isfile(os.path.join(path, "sd_webui_metadata.ini")): + if os.path.isfile(os.path.join(path, "metadata.ini")): try: config = configparser.ConfigParser() - config.read(os.path.join(path, "sd_webui_metadata.ini")) + config.read(os.path.join(path, "metadata.ini")) canonical_name = config.get("Extension", "Name", fallback=canonical_name) requires = config.get("Extension", "Requires", fallback=None) except Exception: - errors.report(f"Error reading sd_webui_metadata.ini for extension {extension_dirname}. " + errors.report(f"Error reading metadata.ini for extension {extension_dirname}. " f"Will load regardless.", exc_info=True) canonical_name = canonical_name.lower().strip() From dea5e43c8359b663d5599efc99278c258747db61 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 18 Nov 2023 04:23:03 +0900 Subject: [PATCH 20/31] Option to show batch img2img results in UI shared.opts.img2img_batch_show_results_limit limit the number of images return to the UI for batch img2img default limit 32 0 no images are shown -1 unlimited, all images are shown --- modules/img2img.py | 24 ++++++++++++++++++++---- modules/shared_options.py | 1 + 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 52cb577a..c583290a 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -44,6 +44,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal steps = p.steps override_settings = p.override_settings sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None)) + batch_results = None + discard_further_results = False for i, image in enumerate(images): state.job = f"{i+1} out of {len(images)}" if state.skipped: @@ -127,7 +129,21 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal if proc is None: p.override_settings.pop('save_images_replace_action', None) - process_images(p) + proc = process_images(p) + + if not discard_further_results and proc: + if batch_results: + batch_results.images.extend(proc.images) + batch_results.infotexts.extend(proc.infotexts) + else: + batch_results = proc + + if 0 <= shared.opts.img2img_batch_show_results_limit < len(batch_results.images): + discard_further_results = True + batch_results.images = batch_results.images[:int(shared.opts.img2img_batch_show_results_limit)] + batch_results.infotexts = batch_results.infotexts[:int(shared.opts.img2img_batch_show_results_limit)] + + return batch_results def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): @@ -212,10 +228,10 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s with closing(p): if is_batch: assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) - process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) - - processed = Processed(p, [], p.seed, "") + if processed is None: + processed = Processed(p, [], p.seed, "") else: processed = modules.scripts.scripts_img2img.run(p, *args) if processed is None: diff --git a/modules/shared_options.py b/modules/shared_options.py index d40db530..1ee8c7ad 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -189,6 +189,7 @@ options_templates.update(options_section(('img2img', "img2img"), { "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), + "img2img_batch_show_results_limit": OptionInfo(32, "Show the first N batch img2img results in UI", gr.Slider, {"minimum": -1, "maximum": 1000, "step": 1}).info('0: disable, -1: show all images. Too many images can cause lag'), })) options_templates.update(options_section(('optimizations', "Optimizations"), { From 6d337bf23dae990e7b6717da4d5f2e54f212685c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 20 Nov 2023 01:38:31 +0900 Subject: [PATCH 21/31] save sysinfo as .json GitHub now allows uploading of .json files in issues --- modules/launch_utils.py | 2 +- modules/ui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 8cdbafa5..264ec9ca 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -441,7 +441,7 @@ def dump_sysinfo(): import datetime text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json" with open(filename, "w", encoding="utf8") as file: file.write(text) diff --git a/modules/ui.py b/modules/ui.py index ba0d8542..b82f3c5e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1308,7 +1308,7 @@ def setup_ui_api(app): from fastapi.responses import PlainTextResponse text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json" return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) From 9b471436b2226458a767077707ea102e331b5d78 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 20 Nov 2023 14:47:09 +0300 Subject: [PATCH 22/31] rework extensions metadata: use custom sorter that doesn't mess the order as much and ignores cyclic errors, use classes with named fields instead of dictionaries, eliminate some duplicated code --- modules/extensions.py | 132 +++++++++++++++++---------------- modules/scripts.py | 165 +++++++++++++++++++----------------------- 2 files changed, 146 insertions(+), 151 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index f3988d02..1899cd52 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,5 +1,6 @@ +from __future__ import annotations + import configparser -import functools import os import threading import re @@ -8,7 +9,6 @@ from modules import shared, errors, cache, scripts from modules.gitpython_hack import Repo from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 -extensions = [] os.makedirs(extensions_dir, exist_ok=True) @@ -22,13 +22,56 @@ def active(): return [x for x in extensions if x.enabled] +class ExtensionMetadata: + filename = "metadata.ini" + config: configparser.ConfigParser + canonical_name: str + requires: list + + def __init__(self, path, canonical_name): + self.config = configparser.ConfigParser() + + filepath = os.path.join(path, self.filename) + if os.path.isfile(filepath): + try: + self.config.read(filepath) + except Exception: + errors.report(f"Error reading {self.filename} for extension {canonical_name}.", exc_info=True) + + self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name) + self.canonical_name = canonical_name.lower().strip() + + self.requires = self.get_script_requirements("Requires", "Extension") + + def get_script_requirements(self, field, section, extra_section=None): + """reads a list of requirements from the config; field is the name of the field in the ini file, + like Requires or Before, and section is the name of the [section] in the ini file; additionally, + reads more requirements from [extra_section] if specified.""" + + x = self.config.get(section, field, fallback='') + + if extra_section: + x = x + ', ' + self.config.get(extra_section, field, fallback='') + + return self.parse_list(x.lower()) + + def parse_list(self, text): + """converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])""" + + if not text: + return [] + + # both "," and " " are accepted as separator + return [x for x in re.split(r"[,\s]+", text.strip()) if x] + + class Extension: lock = threading.Lock() cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version'] + metadata: ExtensionMetadata - def __init__(self, name, path, enabled=True, is_builtin=False, canonical_name=None): + def __init__(self, name, path, enabled=True, is_builtin=False, metadata=None): self.name = name - self.canonical_name = canonical_name or name.lower() self.path = path self.enabled = enabled self.status = '' @@ -40,18 +83,8 @@ class Extension: self.branch = None self.remote = None self.have_info_from_repo = False - - @functools.cached_property - def metadata(self): - if os.path.isfile(os.path.join(self.path, "metadata.ini")): - try: - config = configparser.ConfigParser() - config.read(os.path.join(self.path, "metadata.ini")) - return config - except Exception: - errors.report(f"Error reading metadata.ini for extension {self.canonical_name}.", - exc_info=True) - return None + self.metadata = metadata if metadata else ExtensionMetadata(self.path, name.lower()) + self.canonical_name = metadata.canonical_name def to_dict(self): return {x: getattr(self, x) for x in self.cached_fields} @@ -162,7 +195,7 @@ def list_extensions(): elif shared.opts.disable_all_extensions == "extra": print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") - extension_dependency_map = {} + loaded_extensions = {} # scan through extensions directory and load metadata for dirname in [extensions_builtin_dir, extensions_dir]: @@ -175,55 +208,30 @@ def list_extensions(): continue canonical_name = extension_dirname - requires = None - - if os.path.isfile(os.path.join(path, "metadata.ini")): - try: - config = configparser.ConfigParser() - config.read(os.path.join(path, "metadata.ini")) - canonical_name = config.get("Extension", "Name", fallback=canonical_name) - requires = config.get("Extension", "Requires", fallback=None) - except Exception: - errors.report(f"Error reading metadata.ini for extension {extension_dirname}. " - f"Will load regardless.", exc_info=True) - - canonical_name = canonical_name.lower().strip() + metadata = ExtensionMetadata(path, canonical_name) # check for duplicated canonical names - if canonical_name in extension_dependency_map: - errors.report(f"Duplicate canonical name \"{canonical_name}\" found in extensions " - f"\"{extension_dirname}\" and \"{extension_dependency_map[canonical_name]['dirname']}\". " - f"The current loading extension will be discarded.", exc_info=False) + already_loaded_extension = loaded_extensions.get(metadata.canonical_name) + if already_loaded_extension is not None: + errors.report(f'Duplicate canonical name "{canonical_name}" found in extensions "{extension_dirname}" and "{already_loaded_extension.name}". Former will be discarded.', exc_info=False) continue - # both "," and " " are accepted as separator - requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] - - extension_dependency_map[canonical_name] = { - "dirname": extension_dirname, - "path": path, - "requires": requires, - } + is_builtin = dirname == extensions_builtin_dir + extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata) + extensions.append(extension) + loaded_extensions[canonical_name] = extension # check for requirements - for (_, extension_data) in extension_dependency_map.items(): - dirname, path, requires = extension_data['dirname'], extension_data['path'], extension_data['requires'] - requirement_met = True - for req in requires: - if req not in extension_dependency_map: - errors.report(f"Extension \"{dirname}\" requires \"{req}\" which is not installed. " - f"The current loading extension will be discarded.", exc_info=False) - requirement_met = False - break - dep_dirname = extension_dependency_map[req]['dirname'] - if dep_dirname in shared.opts.disabled_extensions: - errors.report(f"Extension \"{dirname}\" requires \"{dep_dirname}\" which is disabled. " - f"The current loading extension will be discarded.", exc_info=False) - requirement_met = False - break + for extension in extensions: + for req in extension.metadata.requires: + required_extension = loaded_extensions.get(req) + if required_extension is None: + errors.report(f'Extension "{extension.name}" requires "{req}" which is not installed.', exc_info=False) + continue - is_builtin = dirname == extensions_builtin_dir - extension = Extension(name=dirname, path=path, - enabled=dirname not in shared.opts.disabled_extensions and requirement_met, - is_builtin=is_builtin) - extensions.append(extension) + if not extension.enabled: + errors.report(f'Extension "{extension.name}" requires "{required_extension.name}" which is disabled.', exc_info=False) + continue + + +extensions: list[Extension] = [] diff --git a/modules/scripts.py b/modules/scripts.py index b1f4504a..b0689a23 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -2,7 +2,6 @@ import os import re import sys import inspect -from graphlib import TopologicalSorter, CycleError from collections import namedtuple from dataclasses import dataclass @@ -312,27 +311,57 @@ scripts_data = [] postprocessing_scripts_data = [] ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"]) +def topological_sort(dependencies): + """Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies. + Ignores errors relating to missing dependeencies or circular dependencies + """ + + visited = {} + result = [] + + def inner(name): + visited[name] = True + + for dep in dependencies.get(name, []): + if dep in dependencies and dep not in visited: + inner(dep) + + result.append(name) + + for depname in dependencies: + if depname not in visited: + inner(depname) + + return result + + +@dataclass +class ScriptWithDependencies: + script_canonical_name: str + file: ScriptFile + requires: list + load_before: list + load_after: list + def list_scripts(scriptdirname, extension, *, include_extensions=True): - scripts_list = [] - script_dependency_map = {} + scripts = {} + + loaded_extensions = {ext.canonical_name: ext for ext in extensions.active()} + loaded_extensions_scripts = {ext.canonical_name: [] for ext in extensions.active()} # build script dependency map - root_script_basedir = os.path.join(paths.script_path, scriptdirname) if os.path.exists(root_script_basedir): for filename in sorted(os.listdir(root_script_basedir)): if not os.path.isfile(os.path.join(root_script_basedir, filename)): continue - script_dependency_map[filename] = { - "extension": None, - "extension_dirname": None, - "script_file": ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename)), - "requires": [], - "load_before": [], - "load_after": [], - } + if os.path.splitext(filename)[1].lower() != extension: + continue + + script_file = ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename)) + scripts[filename] = ScriptWithDependencies(filename, script_file, [], [], []) if include_extensions: for ext in extensions.active(): @@ -341,96 +370,54 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): if not os.path.isfile(extension_script.path): continue - script_canonical_name = ext.canonical_name + "/" + extension_script.filename - if ext.is_builtin: - script_canonical_name = "builtin/" + script_canonical_name + script_canonical_name = ("builtin/" if ext.is_builtin else "") + ext.canonical_name + "/" + extension_script.filename relative_path = scriptdirname + "/" + extension_script.filename - requires = '' - load_before = '' - load_after = '' + script = ScriptWithDependencies( + script_canonical_name=script_canonical_name, + file=extension_script, + requires=ext.metadata.get_script_requirements("Requires", relative_path, scriptdirname), + load_before=ext.metadata.get_script_requirements("Before", relative_path, scriptdirname), + load_after=ext.metadata.get_script_requirements("After", relative_path, scriptdirname), + ) - if ext.metadata is not None: - requires = ext.metadata.get(relative_path, "Requires", fallback='') - load_before = ext.metadata.get(relative_path, "Before", fallback='') - load_after = ext.metadata.get(relative_path, "After", fallback='') + scripts[script_canonical_name] = script + loaded_extensions_scripts[ext.canonical_name].append(script) - # propagate directory level metadata - requires = requires + ',' + ext.metadata.get(scriptdirname, "Requires", fallback='') - load_before = load_before + ',' + ext.metadata.get(scriptdirname, "Before", fallback='') - load_after = load_after + ',' + ext.metadata.get(scriptdirname, "After", fallback='') - - requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] - load_after = list(filter(None, re.split(r"[,\s]+", load_after.lower()))) if load_after else [] - load_before = list(filter(None, re.split(r"[,\s]+", load_before.lower()))) if load_before else [] - - script_dependency_map[script_canonical_name] = { - "extension": ext.canonical_name, - "extension_dirname": ext.name, - "script_file": extension_script, - "requires": requires, - "load_before": load_before, - "load_after": load_after, - } - - # resolve dependencies - - loaded_extensions = set() - for ext in extensions.active(): - loaded_extensions.add(ext.canonical_name) - - for script_canonical_name, script_data in script_dependency_map.items(): + for script_canonical_name, script in scripts.items(): # load before requires inverse dependency # in this case, append the script name into the load_after list of the specified script - for load_before_script in script_data['load_before']: + for load_before in script.load_before: # if this requires an individual script to be loaded before - if load_before_script in script_dependency_map: - script_dependency_map[load_before_script]['load_after'].append(script_canonical_name) - elif load_before_script in loaded_extensions: - for _, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == load_before_script: - script_data2['load_after'].append(script_canonical_name) - break + other_script = scripts.get(load_before) + if other_script: + other_script.load_after.append(script_canonical_name) - # resolve extension name in load_after lists - for load_after_script in list(script_data['load_after']): - if load_after_script not in script_dependency_map and load_after_script in loaded_extensions: - script_data['load_after'].remove(load_after_script) - for script_canonical_name2, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == load_after_script: - script_data['load_after'].append(script_canonical_name2) - break + # if this requires an extension + other_extension_scripts = loaded_extensions_scripts.get(load_before) + if other_extension_scripts: + for other_script in other_extension_scripts: + other_script.load_after.append(script_canonical_name) - # build the DAG - sorter = TopologicalSorter() - for script_canonical_name, script_data in script_dependency_map.items(): - requirement_met = True - for required_script in script_data['requires']: - # if this requires an individual script to be loaded - if required_script not in script_dependency_map and required_script not in loaded_extensions: - errors.report(f"Script \"{script_canonical_name}\" " - f"requires \"{required_script}\" to " - f"be loaded, but it is not. Skipping.", - exc_info=False) - requirement_met = False - break - if not requirement_met: - continue + # if After mentions an extension, remove it and instead add all of its scripts + for load_after in list(script.load_after): + if load_after not in scripts and load_after in loaded_extensions_scripts: + script.load_after.remove(load_after) - sorter.add(script_canonical_name, *script_data['load_after']) + for other_script in loaded_extensions_scripts.get(load_after, []): + script.load_after.append(other_script.script_canonical_name) - # sort the scripts - try: - ordered_script = sorter.static_order() - except CycleError: - errors.report("Cycle detected in script dependencies. Scripts will load in ascending order.", exc_info=True) - ordered_script = script_dependency_map.keys() + dependencies = {} - for script_canonical_name in ordered_script: - script_data = script_dependency_map[script_canonical_name] - scripts_list.append(script_data['script_file']) + for script_canonical_name, script in scripts.items(): + for required_script in script.requires: + if required_script not in scripts and required_script not in loaded_extensions: + errors.report(f'Script "{script_canonical_name}" requires "{required_script}" to be loaded, but it is not.', exc_info=False) - scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] + dependencies[script_canonical_name] = script.load_after + + ordered_scripts = topological_sort(dependencies) + scripts_list = [scripts[script_canonical_name].file for script_canonical_name in ordered_scripts] return scripts_list From 8aa51f682c17d85f4585b9471860224568d25e95 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 21 Nov 2023 08:32:00 +0300 Subject: [PATCH 23/31] fix [Bug]: (Dev Branch) Placing "Dimensions" first in "ui_reorder_list" prevents start #14047 --- modules/ui.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index b82f3c5e..08e0ad77 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -635,12 +635,6 @@ def create_ui(): scale_by.release(**on_change_args) button_update_resize_to.click(**on_change_args) - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) @@ -701,6 +695,12 @@ def create_ui(): if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + def select_img2img_tab(tab): return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), From 8fe1e195228162a4510925de05015f361efa1087 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 22 Nov 2023 18:01:34 +0200 Subject: [PATCH 24/31] Update ruff to 0.1.6 --- .github/workflows/on_pull_request.yaml | 2 +- pyproject.toml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index 78e608ee..9e44c806 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -20,7 +20,7 @@ jobs: # not to have GHA download an (at the time of writing) 4 GB cache # of PyTorch and other dependencies. - name: Install Ruff - run: pip install ruff==0.0.272 + run: pip install ruff==0.1.6 - name: Run Ruff run: ruff . lint-js: diff --git a/pyproject.toml b/pyproject.toml index 80541a8f..d03036e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ exclude = [ ignore = [ "E501", # Line too long + "E721", # Do not compare types, use `isinstance` "E731", # Do not assign a `lambda` expression, use a `def` "I001", # Import block is un-sorted or un-formatted From 066afda2f6f650fe108d285a239d08d59d92590d Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 22 Nov 2023 18:02:39 +0200 Subject: [PATCH 25/31] Simplify restart_sampler (suggested by ruff) --- modules/sd_samplers_extra.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py index 1b981ca8..72fd0aa5 100644 --- a/modules/sd_samplers_extra.py +++ b/modules/sd_samplers_extra.py @@ -60,7 +60,7 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] while restart_times > 0: restart_times -= 1 - step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) + step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:])) last_sigma = None for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable): From 86b99b1e98fcdd6e7e5f6017071944364e01e6ad Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Fri, 24 Nov 2023 11:28:54 -0600 Subject: [PATCH 26/31] Move exception_records related methods to errors.py --- modules/errors.py | 18 ++++++++++++++++-- modules/sysinfo.py | 17 +---------------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/modules/errors.py b/modules/errors.py index 192cd8ff..ac9f1ee5 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -6,6 +6,21 @@ import traceback exception_records = [] +def format_traceback(tb): + return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)] + + +def format_exception(e, tb): + return {"exception": str(e), "traceback": format_traceback(tb)} + + +def get_exceptions(): + try: + return list(reversed(exception_records)) + except Exception as e: + return str(e) + + def record_exception(): _, e, tb = sys.exc_info() if e is None: @@ -14,8 +29,7 @@ def record_exception(): if exception_records and exception_records[-1] == e: return - from modules import sysinfo - exception_records.append(sysinfo.format_exception(e, tb)) + exception_records.append(format_exception(e, tb)) if len(exception_records) > 5: exception_records.pop(0) diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 7d906e1f..226b204d 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -85,7 +85,7 @@ def get_dict(): "Checksum": checksum_token, "Commandline": sys.argv, "Torch env info": get_torch_sysinfo(), - "Exceptions": get_exceptions(), + "Exceptions": errors.get_exceptions(), "CPU": { "model": platform.processor(), "count logical": psutil.cpu_count(logical=True), @@ -105,21 +105,6 @@ def get_dict(): return res -def format_traceback(tb): - return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)] - - -def format_exception(e, tb): - return {"exception": str(e), "traceback": format_traceback(tb)} - - -def get_exceptions(): - try: - return list(reversed(errors.exception_records)) - except Exception as e: - return str(e) - - def get_environment(): return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist} From 5cedc8f9b2b51f392e7c8f5e29286466e3bee8d6 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Fri, 24 Nov 2023 11:30:30 -0600 Subject: [PATCH 27/31] remove traceback in sysinfo --- modules/sysinfo.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 226b204d..1d058950 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -1,7 +1,6 @@ import json import os import sys -import traceback import platform import hashlib From 3a9bf4ac10d99feb81b0e637417a108d3fa5ac06 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 08:29:12 +0300 Subject: [PATCH 28/31] move file --- {modules => extensions-builtin/hypertile}/hypertile.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {modules => extensions-builtin/hypertile}/hypertile.py (100%) diff --git a/modules/hypertile.py b/extensions-builtin/hypertile/hypertile.py similarity index 100% rename from modules/hypertile.py rename to extensions-builtin/hypertile/hypertile.py From d2e0c1ca132f4f0d98b77397a9f353d4ad8e7c4b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 10:51:45 +0300 Subject: [PATCH 29/31] rework hypertile into a built-in extension --- README.md | 1 + extensions-builtin/hypertile/hypertile.py | 215 ++++++++---------- .../hypertile/scripts/hypertile_script.py | 73 ++++++ modules/processing.py | 37 ++- modules/shared_options.py | 8 - 5 files changed, 183 insertions(+), 151 deletions(-) create mode 100644 extensions-builtin/hypertile/scripts/hypertile_script.py diff --git a/README.md b/README.md index 25ba070e..3b3f93ad 100644 --- a/README.md +++ b/README.md @@ -174,5 +174,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd - LyCORIS - KohakuBlueleaf - Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling +- Hypertile - tfernd - https://github.com/tfernd/HyperTile - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - (You) diff --git a/extensions-builtin/hypertile/hypertile.py b/extensions-builtin/hypertile/hypertile.py index be898fce..a40c1311 100644 --- a/extensions-builtin/hypertile/hypertile.py +++ b/extensions-builtin/hypertile/hypertile.py @@ -1,10 +1,13 @@ """ Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE -Warn : The patch works well only if the input image has a width and height that are multiples of 128 -Author : @tfernd Github : https://github.com/tfernd/HyperTile +Warn: The patch works well only if the input image has a width and height that are multiples of 128 +Original author: @tfernd Github: https://github.com/tfernd/HyperTile """ from __future__ import annotations + +import functools +from dataclasses import dataclass from typing import Callable from typing_extensions import Literal @@ -18,6 +21,19 @@ import random from einops import rearrange + +@dataclass +class HypertileParams: + depth = 0 + layer_name = "" + tile_size: int = 0 + swap_size: int = 0 + aspect_ratio: float = 1.0 + forward = None + enabled = False + + + # TODO add SD-XL layers DEPTH_LAYERS = { 0: [ @@ -176,6 +192,7 @@ DEPTH_LAYERS_XL = { RNG_INSTANCE = random.Random() + def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: """ Returns a random divisor of value that @@ -193,10 +210,13 @@ def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: return ns[idx] + def set_hypertile_seed(seed: int) -> None: RNG_INSTANCE.seed(seed) -def largest_tile_size_available(width:int, height:int) -> int: + +@functools.cache +def largest_tile_size_available(width: int, height: int) -> int: """ Calculates the largest tile size available for a given width and height Tile size is always a power of 2 @@ -207,6 +227,7 @@ def largest_tile_size_available(width:int, height:int) -> int: largest_tile_size_available *= 2 return largest_tile_size_available + def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: """ Finds h and w such that h*w = hw and h/w = aspect_ratio @@ -219,6 +240,7 @@ def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio return closest_pair + @cache def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: """ @@ -240,132 +262,87 @@ def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: w = int(w_candidate) return h, w -@contextmanager -def split_attention( - layer: nn.Module, - /, - aspect_ratio: float, # width/height - tile_size: int = 128, # 128 for VAE - swap_size: int = 1, # 1 for VAE - *, - disable: bool = False, - max_depth: Literal[0, 1, 2, 3] = 0, # ! Try 0 or 1 - scale_depth: bool = True, # scale the tile-size depending on the depth - is_sdxl: bool = False, # is the model SD-XL -): - # Hijacks AttnBlock from ldm and Attention from diffusers - if disable: - logging.info(f"Attention for {layer.__class__.__qualname__} not splitted") - yield - return +def self_attn_forward(params: HypertileParams, scale_depth=True) -> Callable: - latent_tile_size = max(128, tile_size) // 8 + @wraps(params.forward) + def wrapper(*args, **kwargs): + if not params.enabled: + return params.forward(*args, **kwargs) - def self_attn_forward(forward: Callable, depth: int, layer_name: str, module: nn.Module) -> Callable: - @wraps(forward) - def wrapper(*args, **kwargs): - x = args[0] + latent_tile_size = max(128, params.tile_size) // 8 + x = args[0] - # VAE - if x.ndim == 4: - b, c, h, w = x.shape + # VAE + if x.ndim == 4: + b, c, h, w = x.shape - nh = random_divisor(h, latent_tile_size, swap_size) - nw = random_divisor(w, latent_tile_size, swap_size) + nh = random_divisor(h, latent_tile_size, params.swap_size) + nw = random_divisor(w, latent_tile_size, params.swap_size) - if nh * nw > 1: - x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles + if nh * nw > 1: + x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles - out = forward(x, *args[1:], **kwargs) + out = params.forward(x, *args[1:], **kwargs) - if nh * nw > 1: - out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) + if nh * nw > 1: + out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) - # U-Net - else: - hw: int = x.size(1) - h, w = find_hw_candidates(hw, aspect_ratio) - assert h * w == hw, f"Invalid aspect ratio {aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" - - factor = 2**depth if scale_depth else 1 - nh = random_divisor(h, latent_tile_size * factor, swap_size) - nw = random_divisor(w, latent_tile_size * factor, swap_size) - - module._split_sizes_hypertile.append((nh, nw)) # type: ignore - - if nh * nw > 1: - x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) - - out = forward(x, *args[1:], **kwargs) - - if nh * nw > 1: - out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) - out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) - - return out - - return wrapper - - # Handle hijacking the forward method and recovering afterwards - try: - if is_sdxl: - layers = DEPTH_LAYERS_XL + # U-Net else: - layers = DEPTH_LAYERS - for depth in range(max_depth + 1): - for layer_name, module in layer.named_modules(): + hw: int = x.size(1) + h, w = find_hw_candidates(hw, params.aspect_ratio) + assert h * w == hw, f"Invalid aspect ratio {params.aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" + + factor = 2 ** params.depth if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, params.swap_size) + nw = random_divisor(w, latent_tile_size * factor, params.swap_size) + + if nh * nw > 1: + x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + + out = params.forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + + return out + + return wrapper + + +def hypertile_hook_model(model: nn.Module, width, height, *, enable=False, tile_size_max=128, swap_size=1, max_depth=3, is_sdxl=False): + hypertile_layers = getattr(model, "__webui_hypertile_layers", None) + if hypertile_layers is None: + if not enable: + return + + hypertile_layers = {} + layers = DEPTH_LAYERS_XL if is_sdxl else DEPTH_LAYERS + + for depth in range(4): + for layer_name, module in model.named_modules(): if any(layer_name.endswith(try_name) for try_name in layers[depth]): - # print input shape for debugging - logging.debug(f"HyperTile hijacking attention layer at depth {depth}: {layer_name}") - # hijack - module._original_forward_hypertile = module.forward - module.forward = self_attn_forward(module.forward, depth, layer_name, module) - module._split_sizes_hypertile = [] - yield - finally: - for layer_name, module in layer.named_modules(): - # remove hijack - if hasattr(module, "_original_forward_hypertile"): - if module._split_sizes_hypertile: - logging.debug(f"layer {layer_name} splitted with ({module._split_sizes_hypertile})") - # recover - module.forward = module._original_forward_hypertile - del module._original_forward_hypertile - del module._split_sizes_hypertile + params = HypertileParams() + module.__webui_hypertile_params = params + params.forward = module.forward + params.depth = depth + params.layer_name = layer_name + module.forward = self_attn_forward(params) -def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, opts): - """ - Returns context manager for VAE - """ - enabled = opts.hypertile_split_vae_attn - swap_size = opts.hypertile_swap_size_vae - max_depth = opts.hypertile_max_depth_vae - tile_size_max = opts.hypertile_max_tile_vae - return split_attention( - model, - aspect_ratio=aspect_ratio, - tile_size=min(tile_size, tile_size_max), - swap_size=swap_size, - disable=not enabled, - max_depth=max_depth, - is_sdxl=False, - ) + hypertile_layers[layer_name] = 1 -def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, opts, is_sdxl:bool): - """ - Returns context manager for U-Net - """ - enabled = opts.hypertile_split_unet_attn - swap_size = opts.hypertile_swap_size_unet - max_depth = opts.hypertile_max_depth_unet - tile_size_max = opts.hypertile_max_tile_unet - return split_attention( - model, - aspect_ratio=aspect_ratio, - tile_size=min(tile_size, tile_size_max), - swap_size=swap_size, - disable=not enabled, - max_depth=max_depth, - is_sdxl=is_sdxl, - ) + model.__webui_hypertile_layers = hypertile_layers + + aspect_ratio = width / height + tile_size = min(largest_tile_size_available(width, height), tile_size_max) + + for layer_name, module in model.named_modules(): + if layer_name in hypertile_layers: + params = module.__webui_hypertile_params + + params.tile_size = tile_size + params.swap_size = swap_size + params.aspect_ratio = aspect_ratio + params.enabled = enable and params.depth <= max_depth diff --git a/extensions-builtin/hypertile/scripts/hypertile_script.py b/extensions-builtin/hypertile/scripts/hypertile_script.py new file mode 100644 index 00000000..3cc29cd1 --- /dev/null +++ b/extensions-builtin/hypertile/scripts/hypertile_script.py @@ -0,0 +1,73 @@ +import hypertile +from modules import scripts, script_callbacks, shared + + +class ScriptHypertile(scripts.Script): + name = "Hypertile" + + def title(self): + return self.name + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def process(self, p, *args): + hypertile.set_hypertile_seed(p.all_seeds[0]) + + configure_hypertile(p.width, p.height, enable_unet=shared.opts.hypertile_enable_unet) + + def before_hr(self, p, *args): + configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=shared.opts.hypertile_enable_unet_secondpass or shared.opts.hypertile_enable_unet) + + +def configure_hypertile(width, height, enable_unet=True): + hypertile.hypertile_hook_model( + shared.sd_model.first_stage_model, + width, + height, + swap_size=shared.opts.hypertile_swap_size_vae, + max_depth=shared.opts.hypertile_max_depth_vae, + tile_size_max=shared.opts.hypertile_max_tile_vae, + enable=shared.opts.hypertile_enable_vae, + ) + + hypertile.hypertile_hook_model( + shared.sd_model.model, + width, + height, + swap_size=shared.opts.hypertile_swap_size_unet, + max_depth=shared.opts.hypertile_max_depth_unet, + tile_size_max=shared.opts.hypertile_max_tile_unet, + enable=enable_unet, + is_sdxl=shared.sd_model.is_sdxl + ) + + +def on_ui_settings(): + import gradio as gr + + options = { + "hypertile_explanation": shared.OptionHTML(""" + Hypertile optimizes the self-attention layer within U-Net and VAE models, + resulting in a reduction in computation time ranging from 1 to 4 times. The larger the generated image is, the greater the + benefit. + """), + + "hypertile_enable_unet": shared.OptionInfo(False, "Enable Hypertile U-Net").info("noticeable change in details of the generated picture; if enabled, overrides the setting below"), + "hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass"), + "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), + "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), + "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-net swap size", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}), + + "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE").info("minimal change in the generated picture"), + "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), + "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), + "hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}), + } + + for name, opt in options.items(): + opt.section = ('hypertile', "Hypertile") + shared.opts.add_option(name, opt) + + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/modules/processing.py b/modules/processing.py index 36c2be5e..ac58ef86 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -24,7 +24,6 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths import modules.face_restoration -from modules.hypertile import set_hypertile_seed, largest_tile_size_available, hypertile_context_unet, hypertile_context_vae import modules.images as images import modules.styles import modules.sd_models as sd_models @@ -861,8 +860,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.comment(comment) p.extra_generation_params.update(model_hijack.extra_generation_params) - set_hypertile_seed(p.seed) - # add batch size + hypertile status to information to reproduce the run + if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" @@ -874,8 +872,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with hypertile_context_vae(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), opts=shared.opts): - x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) + x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) @@ -1141,25 +1138,23 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - aspect_ratio = self.width / self.height + x = self.rng.next() - tile_size = largest_tile_size_available(self.width, self.height) - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x + if not self.enable_hr: return samples devices.torch_gc() if self.latent_scale_mode is None: - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) + decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) else: decoded_samples = None with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=self.hr_checkpoint_info) + return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts): @@ -1244,18 +1239,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.scripts is not None: self.scripts.before_hr(self) - tile_size = largest_tile_size_available(target_width, target_height) - aspect_ratio = self.width / self.height - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + + samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.sampler = None devices.torch_gc() - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) + + decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False return decoded_samples @@ -1532,11 +1524,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.initial_noise_multiplier != 1.0: self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier x *= self.initial_noise_multiplier - aspect_ratio = self.width / self.height - tile_size = largest_tile_size_available(self.width, self.height) - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) + + samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) if self.mask is not None: samples = samples * self.nmask + self.init_latent * self.mask diff --git a/modules/shared_options.py b/modules/shared_options.py index 28a48906..d40db530 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -200,14 +200,6 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), - "hypertile_split_unet_attn" : OptionInfo(False, "Split attention in Unet with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), - "hypertile_split_vae_attn": OptionInfo(False, "Split attention in VAE with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), - "hypertile_max_depth_vae" : OptionInfo(3, "Max depth for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_max_depth_unet" : OptionInfo(3, "Max depth for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_max_tile_vae" : OptionInfo(128, "Max tile size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_max_tile_unet" : OptionInfo(256, "Max tile size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_swap_size_unet": OptionInfo(3, "Swap size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_swap_size_vae": OptionInfo(3, "Swap size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { From c5a0c59a83c950c64bc44427d3478aaa78c296cf Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 11:36:17 +0300 Subject: [PATCH 30/31] do not save HTML explanations from options page to config --- modules/options.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/options.py b/modules/options.py index ab40aff7..7703d80e 100644 --- a/modules/options.py +++ b/modules/options.py @@ -76,7 +76,7 @@ class Options: def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts): self.data_labels = data_labels - self.data = {k: v.default for k, v in self.data_labels.items()} + self.data = {k: v.default for k, v in self.data_labels.items() if not v.do_not_save} self.restricted_opts = restricted_opts def __setattr__(self, key, value): @@ -210,7 +210,7 @@ class Options: def add_option(self, key, info): self.data_labels[key] = info - if key not in self.data: + if key not in self.data and not info.do_not_save: self.data[key] = info.default def reorder(self): From d1750e5eca6fd95db3516928cad18b32e557f56f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 11:37:12 +0300 Subject: [PATCH 31/31] fix linter errors --- extensions-builtin/hypertile/hypertile.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/extensions-builtin/hypertile/hypertile.py b/extensions-builtin/hypertile/hypertile.py index a40c1311..feb02fd2 100644 --- a/extensions-builtin/hypertile/hypertile.py +++ b/extensions-builtin/hypertile/hypertile.py @@ -9,11 +9,8 @@ from __future__ import annotations import functools from dataclasses import dataclass from typing import Callable -from typing_extensions import Literal -import logging from functools import wraps, cache -from contextlib import contextmanager import math import torch.nn as nn