tune threshold based on more test devices

for async mover
This commit is contained in:
lllyasviel 2024-02-22 17:18:31 -08:00
parent 8283774b86
commit eacb14e115

View File

@ -437,8 +437,8 @@ def load_models_gpu(models, memory_required=0):
if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM):
model_size = loaded_model.model_memory_required(torch_dev)
current_free_mem = get_free_memory(torch_dev)
lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1.5 * 1024 * (1024 * 1024)) / 1.3))
if model_size > (current_free_mem - inference_memory): #only switch to lowvram if really necessary
lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 2 * 1024 * (1024 * 1024)) / 1.3))
if model_size > (current_free_mem - inference_memory):
vram_set_state = VRAMState.LOW_VRAM
else:
lowvram_model_memory = 0