Revert "Fix ruff linter (#137)" (#143)

This reverts commit 6b3ad64388.
This commit is contained in:
Chenlei Hu 2024-02-09 02:24:04 +00:00 committed by GitHub
parent b49742354d
commit 388ca351f4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 39 additions and 25 deletions

View File

@ -3,6 +3,7 @@ import logging
import os
import sys
import warnings
import os
from modules.timer import startup_timer
@ -37,7 +38,7 @@ def imports():
startup_timer.record("setup paths")
import ldm.modules.encoders.modules # noqa: F401
import ldm.modules.diffusionmodules.model # noqa: F401
import ldm.modules.diffusionmodules.model
startup_timer.record("import ldm")
import sgm.modules.encoders.modules # noqa: F401

View File

@ -1,5 +1,5 @@
import torch
from modules import devices, shared
module_in_gpu = None
cpu = torch.device("cpu")

View File

@ -3,7 +3,6 @@ import sys
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, cwd # noqa: F401
import modules.safe # noqa: F401
import ldm_patched.utils.path_utils as ldm_patched_path_utils
def mute_sdxl_imports():
@ -65,6 +64,8 @@ for d, must_exist, what, options in path_dirs:
paths[what] = d
import ldm_patched.utils.path_utils as ldm_patched_path_utils
ldm_patched_path_utils.base_path = data_path
ldm_patched_path_utils.models_dir = models_path
ldm_patched_path_utils.output_directory = os.path.join(data_path, "output")

View File

@ -16,7 +16,7 @@ from skimage import exposure
from typing import Any
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, infotext_utils, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, rng
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, infotext_utils, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
from modules.rng import slerp # noqa: F401
from modules.sd_hijack import model_hijack
from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
@ -517,9 +517,9 @@ class StableDiffusionProcessing:
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments="", extra_images_list=None):
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments="", extra_images_list=[]):
self.images = images_list
self.extra_images = extra_images_list if extra_images_list is not None else []
self.extra_images = extra_images_list
self.prompt = p.prompt
self.negative_prompt = p.negative_prompt
self.seed = seed

View File

@ -1,8 +1,11 @@
import torch
from torch.nn.functional import silu
from types import MethodType
from modules import devices, sd_hijack_optimizations, shared, script_callbacks, sd_unet, patches
from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet, patches
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18
import ldm.modules.attention
import ldm.modules.diffusionmodules.model

View File

@ -6,17 +6,22 @@ import threading
import torch
import re
import safetensors.torch
from omegaconf import ListConfig
from omegaconf import OmegaConf, ListConfig
from os import mkdir
from urllib import request
import ldm.modules.midas as midas
import gc
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, errors, hashes, cache, extra_networks, processing, patches
from ldm.util import instantiate_from_config
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches
from modules.timer import Timer
import numpy as np
from modules_forge import forge_loader
import modules_forge.ops as forge_ops
from ldm_patched.modules.ops import manual_cast
from ldm_patched.modules import model_management as model_management
import ldm_patched.modules.model_patcher
model_dir = "Stable-diffusion"

View File

@ -1,9 +1,10 @@
import torch
from modules import prompt_parser, sd_samplers_common
from modules import prompt_parser, devices, sd_samplers_common
from modules.shared import state
from modules.shared import opts, state
import modules.shared as shared
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
from modules_forge import forge_sampler

View File

@ -1,6 +1,6 @@
import torch.nn
from modules import script_callbacks, shared
from modules import script_callbacks, shared, devices
unet_options = []
current_unet_option = None

View File

@ -2,7 +2,7 @@ import os
import collections
from dataclasses import dataclass
from modules import paths, shared, script_callbacks, sd_models, extra_networks, sd_hijack, hashes
from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack, hashes
import glob
from copy import deepcopy

View File

@ -1,5 +1,7 @@
import os
import torch
from modules import shared
from modules.shared import cmd_opts

View File

@ -171,7 +171,7 @@ def update_token_counter(text, steps, *, is_positive=True):
cond_stage_model = sd_models.model_data.sd_model.cond_stage_model
assert cond_stage_model is not None
except Exception:
return "<span class='gr-box gr-text-input'>?/?</span>"
return f"<span class='gr-box gr-text-input'>?/?</span>"
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]

View File

@ -14,7 +14,7 @@ def prepare_free_memory(aggressive=False):
model_management.unload_all_models()
print('Upscale script freed all memory.')
return
model_management.free_memory(memory_required=1024*1024*3, device=model_management.get_torch_device())
print('Upscale script freed memory successfully.')

View File

@ -71,6 +71,7 @@ def no_clip():
def load_checkpoint_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True):
sd_keys = sd.keys()
clip = None
clipvision = None
vae = None
@ -98,6 +99,7 @@ def load_checkpoint_guess_config(sd, output_vae=True, output_clip=True, output_c
if output_model:
inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype)
offload_device = model_management.unet_offload_device()
model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device)
model.load_model_weights(sd, "model.diffusion_model.")

View File

@ -30,10 +30,10 @@ def initialize_forge():
for bad in bad_list:
if bad in sys.argv:
print(f'Arg {bad} is removed in Forge.')
print('Now memory management is fully automatic and you do not need any command flags.')
print('Please just remove this flag.')
print('In extreme cases, if you want to force previous lowvram/medvram behaviors, '
'please use --always-offload-from-vram')
print(f'Now memory management is fully automatic and you do not need any command flags.')
print(f'Please just remove this flag.')
print(f'In extreme cases, if you want to force previous lowvram/medvram behaviors, '
f'please use --always-offload-from-vram')
from ldm_patched.modules import args_parser

View File

@ -2,6 +2,7 @@ import time
import torch
import contextlib
from ldm_patched.modules import model_management
from ldm_patched.modules.ops import use_patched_ops
@contextlib.contextmanager

View File

@ -29,8 +29,8 @@ def build_loaded(module, loader_name):
if os.path.exists(path):
os.remove(path)
exp += f'Forge has tried to move the corrupted file to {corrupted_backup_file} \n'
exp += 'You may try again now and Forge will download models again. \n'
raise ValueError(exp) from e
exp += f'You may try again now and Forge will download models again. \n'
raise ValueError(exp)
return result
setattr(module, loader_name, loader)

View File

@ -34,7 +34,7 @@ def try_load_supported_control_model(ckpt_path):
global supported_control_models
state_dict = ldm_patched.modules.utils.load_torch_file(ckpt_path, safe_load=True)
for supported_type in supported_control_models:
state_dict_copy = dict(state_dict)
state_dict_copy = {k: v for k, v in state_dict.items()}
model = supported_type.try_build_from_state_dict(state_dict_copy, ckpt_path)
if model is not None:
return model

View File

@ -12,15 +12,13 @@ extend-select = [
exclude = [
"extensions",
"extensions-disabled",
"extensions-builtin",
"ldm_patched",
]
ignore = [
"E501", # Line too long
"E721", # Do not compare types, use `isinstance`
"E731", # Do not assign a `lambda` expression, use a `def`
"I001", # Import block is un-sorted or un-formatted
"C901", # Function is too complex
"C408", # Rewrite as a literal