my-sd/modules/devices.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

103 lines
2.1 KiB
Python
Raw Permalink Normal View History

import contextlib
2022-09-11 05:11:27 +00:00
import torch
2024-01-24 18:51:36 +00:00
import ldm_patched.modules.model_management as model_management
def has_xpu() -> bool:
2024-01-24 18:51:36 +00:00
return model_management.xpu_available
def has_mps() -> bool:
2024-01-24 18:51:36 +00:00
return model_management.mps_mode()
2022-09-11 15:48:36 +00:00
2023-10-28 07:24:26 +00:00
def cuda_no_autocast(device_id=None) -> bool:
2024-01-24 18:51:36 +00:00
return False
2023-10-28 07:24:26 +00:00
def get_cuda_device_id():
2024-01-24 18:51:36 +00:00
return model_management.get_torch_device().index
2023-10-28 07:24:26 +00:00
2022-11-27 10:08:54 +00:00
def get_cuda_device_string():
2024-01-24 18:51:36 +00:00
return str(model_management.get_torch_device())
2022-11-27 10:08:54 +00:00
def get_optimal_device_name():
2024-01-24 18:51:36 +00:00
return model_management.get_torch_device().type
2022-09-11 15:48:36 +00:00
def get_optimal_device():
2024-01-24 18:51:36 +00:00
return model_management.get_torch_device()
def get_device_for(task):
return model_management.get_torch_device()
def torch_gc():
2024-01-24 18:51:36 +00:00
model_management.soft_empty_cache()
2024-01-31 02:46:53 +00:00
def torch_npu_set_device():
2024-01-31 21:22:40 +00:00
return
2024-01-31 02:46:53 +00:00
def enable_tf32():
2024-01-24 18:51:36 +00:00
return
2022-09-12 17:09:32 +00:00
cpu: torch.device = torch.device("cpu")
fp8: bool = False
2024-01-24 18:51:36 +00:00
device: torch.device = model_management.get_torch_device()
device_interrogate: torch.device = model_management.text_encoder_device() # for backward compatibility, not used now
device_gfpgan: torch.device = model_management.get_torch_device() # will be managed by memory management system
device_esrgan: torch.device = model_management.get_torch_device() # will be managed by memory management system
device_codeformer: torch.device = model_management.get_torch_device() # will be managed by memory management system
2024-01-24 18:51:36 +00:00
dtype: torch.dtype = model_management.unet_dtype()
dtype_vae: torch.dtype = model_management.vae_dtype()
dtype_unet: torch.dtype = model_management.unet_dtype()
dtype_inference: torch.dtype = model_management.unet_dtype()
unet_needs_upcast = False
2022-09-12 17:09:32 +00:00
def cond_cast_unet(input):
2024-01-24 18:51:36 +00:00
return input
def cond_cast_float(input):
2024-01-24 18:51:36 +00:00
return input
nv_rng = None
2024-01-24 18:51:36 +00:00
patch_module_list = []
2023-10-28 07:24:26 +00:00
2023-11-19 07:50:06 +00:00
def manual_cast_forward(target_dtype):
2024-01-24 18:51:36 +00:00
return
2023-11-19 07:50:06 +00:00
2023-10-28 07:24:26 +00:00
@contextlib.contextmanager
def manual_cast(target_dtype):
2024-01-24 18:51:36 +00:00
return
2022-10-10 13:11:14 +00:00
def autocast(disable=False):
return contextlib.nullcontext()
def without_autocast(disable=False):
return contextlib.nullcontext()
class NansException(Exception):
pass
def test_for_nans(x, where):
2024-01-24 18:51:36 +00:00
return
def first_time_calculation():
2024-01-24 18:51:36 +00:00
return