fix many problems

This commit is contained in:
lllyasviel 2024-01-29 09:28:38 -08:00
parent dcdc80810e
commit 7b14cb3f9f
7 changed files with 208 additions and 11 deletions

View File

@ -25,7 +25,7 @@ class ZoeDetector:
load_file_from_url(remote_model_path, model_dir=self.model_dir)
conf = get_config("zoedepth", "infer")
model = ZoeDepth.build_from_config(conf)
model.load_state_dict(torch.load(modelpath, map_location=model.device)['model'])
model.load_state_dict(torch.load(modelpath, map_location=model.device)['model'], strict=False)
model.eval()
self.model = model.to(self.device)

View File

@ -171,7 +171,7 @@ class Resize(object):
def __call__(self, x):
width, height = self.get_size(*x.shape[-2:][::-1])
return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True)
return nn.functional.interpolate(x, (int(height), int(width)), mode='bilinear', align_corners=True)
class PrepForMidas(object):
def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True):

View File

@ -44,7 +44,7 @@ def _get_rel_pos_bias(self, window_size):
old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3]
old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
new_sub_table = F.interpolate(old_sub_table, size=(new_height, new_width), mode="bilinear")
new_sub_table = F.interpolate(old_sub_table, size=(int(new_height), int(new_width)), mode="bilinear")
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
new_relative_position_bias_table = torch.cat(

View File

@ -0,0 +1,145 @@
import launch
import pkg_resources
import sys
import os
import shutil
import platform
from pathlib import Path
from typing import Tuple, Optional
repo_root = Path(__file__).parent
main_req_file = repo_root / "requirements.txt"
def comparable_version(version: str) -> Tuple:
return tuple(version.split("."))
def get_installed_version(package: str) -> Optional[str]:
try:
return pkg_resources.get_distribution(package).version
except Exception:
return None
def extract_base_package(package_string: str) -> str:
base_package = package_string.split("@git")[0]
return base_package
def install_requirements(req_file):
with open(req_file) as file:
for package in file:
try:
package = package.strip()
if "==" in package:
package_name, package_version = package.split("==")
installed_version = get_installed_version(package_name)
if installed_version != package_version:
launch.run_pip(
f"install -U {package}",
f"forge_legacy_preprocessor requirement: changing {package_name} version from {installed_version} to {package_version}",
)
elif ">=" in package:
package_name, package_version = package.split(">=")
installed_version = get_installed_version(package_name)
if not installed_version or comparable_version(
installed_version
) < comparable_version(package_version):
launch.run_pip(
f"install -U {package}",
f"forge_legacy_preprocessor requirement: changing {package_name} version from {installed_version} to {package_version}",
)
elif not launch.is_installed(extract_base_package(package)):
launch.run_pip(
f"install {package}",
f"forge_legacy_preprocessor requirement: {package}",
)
except Exception as e:
print(e)
print(
f"Warning: Failed to install {package}, some preprocessors may not work."
)
def try_install_from_wheel(pkg_name: str, wheel_url: str):
if get_installed_version(pkg_name) is not None:
return
try:
launch.run_pip(
f"install {wheel_url}",
f"forge_legacy_preprocessor requirement: {pkg_name}",
)
except Exception as e:
print(e)
print(f"Warning: Failed to install {pkg_name}. Some processors will not work.")
def try_install_insight_face():
"""Attempt to install insightface library. The library is necessary to use ip-adapter faceid.
Note: Building insightface library from source requires compiling C++ code, which should be avoided
in principle. Here the solution is to download a precompiled wheel."""
if get_installed_version("insightface") is not None:
return
default_win_wheel = "https://github.com/Gourieff/Assets/raw/main/Insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl"
wheel_url = os.environ.get("INSIGHTFACE_WHEEL", default_win_wheel)
system = platform.system().lower()
architecture = platform.machine().lower()
python_version = sys.version_info
if wheel_url != default_win_wheel or (
system == "windows"
and "amd64" in architecture
and python_version.major == 3
and python_version.minor == 10
):
try:
launch.run_pip(
f"install {wheel_url}",
"forge_legacy_preprocessor requirement: insightface",
)
except Exception as e:
print(e)
print(
"Legacy Preprocessor init warning: Unable to install insightface automatically. "
)
else:
print(
"Legacy Preprocessor init warning: Unable to install insightface automatically. "
"Please try run `pip install insightface` manually."
)
def try_remove_legacy_submodule():
"""Try remove annotators/hand_refiner_portable submodule dir."""
submodule = repo_root / "annotator" / "hand_refiner_portable"
if os.path.exists(submodule):
try:
shutil.rmtree(submodule)
except Exception as e:
print(e)
print(
f"Failed to remove submodule {submodule} automatically. You can manually delete the directory."
)
install_requirements(main_req_file)
try_install_insight_face()
try_install_from_wheel(
"handrefinerportable",
wheel_url=os.environ.get(
"HANDREFINER_WHEEL",
"https://github.com/huchenlei/HandRefinerPortable/releases/download/v1.0.0/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl",
),
)
try_install_from_wheel(
"depth_anything",
wheel_url=os.environ.get(
"DEPTH_ANYTHING_WHEEL",
"https://github.com/huchenlei/Depth-Anything/releases/download/v1.0.0/depth_anything-2024.1.22.0-py2.py3-none-any.whl",
),
)
try_remove_legacy_submodule()

View File

@ -12,7 +12,7 @@ import json
from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
from modules.paths_internal import script_path, extensions_dir, extensions_builtin_dir
from modules.timer import startup_timer
from modules import logging_config
from modules_forge import forge_version
@ -266,6 +266,27 @@ def list_extensions(settings_file):
return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
def list_extensions_builtin(settings_file):
settings = {}
try:
with open(settings_file, "r", encoding="utf8") as file:
settings = json.load(file)
except FileNotFoundError:
pass
except Exception:
errors.report(f'\nCould not load settings\nThe config file "{settings_file}" is likely corrupted\nIt has been moved to the "tmp/config.json"\nReverting config to default\n\n''', exc_info=True)
os.replace(settings_file, os.path.join(script_path, "tmp", "config.json"))
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions or not os.path.isdir(extensions_builtin_dir):
return []
return [x for x in os.listdir(extensions_builtin_dir) if x not in disabled_extensions]
def run_extensions_installers(settings_file):
if not os.path.isdir(extensions_dir):
return
@ -280,6 +301,21 @@ def run_extensions_installers(settings_file):
run_extension_installer(path)
startup_timer.record(dirname_extension)
if not os.path.isdir(extensions_builtin_dir):
return
with startup_timer.subcategory("run extensions_builtin installers"):
for dirname_extension in list_extensions_builtin(settings_file):
logging.debug(f"Installing {dirname_extension}")
path = os.path.join(extensions_builtin_dir, dirname_extension)
if os.path.isdir(path):
run_extension_installer(path)
startup_timer.record(dirname_extension)
return
re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")

View File

@ -1,3 +1,4 @@
import time
import torch
import contextlib
from ldm_patched.modules import model_management
@ -48,14 +49,14 @@ def automatic_memory_management():
torch.nn.Module.__init__ = original_init
torch.nn.Module.to = original_to
count = 0
for module in set(module_list):
module_params = getattr(module, '_parameters', [])
if len(module_params) > 0:
module.cpu()
count += 1
start = time.perf_counter()
module_list = set(module_list)
for module in module_list:
module.cpu()
print(f'Automatic Memory Management: {count} Modules.')
model_management.soft_empty_cache()
end = time.perf_counter()
print(f'Automatic Memory Management: {len(module_list)} Modules in {(end - start)%.3} seconds.')
return

View File

@ -1,4 +1,5 @@
import torch
import time
import ldm_patched.modules.samplers
from ldm_patched.modules.controlnet import ControlBase
@ -183,7 +184,21 @@ def patched_calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_op
return out_cond, out_uncond
def patched_load_models_gpu(*args, **kwargs):
execution_start_time = time.perf_counter()
y = model_management.load_models_gpu_origin(*args, **kwargs)
moving_time = time.perf_counter() - execution_start_time
if moving_time > 0.1:
print(f'Moving model(s) has taken {moving_time:.2f} seconds')
return y
def patch_all_basics():
if not hasattr(model_management, 'load_models_gpu_origin'):
model_management.load_models_gpu_origin = model_management.load_models_gpu
model_management.load_models_gpu = patched_load_models_gpu
ControlBase.control_merge = patched_control_merge
ldm_patched.modules.samplers.calc_cond_uncond_batch = patched_calc_cond_uncond_batch
return