From b8ffb2937f9daeaead6e9225f8f5d1dde6afc577 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 12 Aug 2024 15:03:33 -0400 Subject: [PATCH] Memory tweaks. --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 686f124..fdf3308 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -438,11 +438,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu global vram_state inference_memory = minimum_inference_memory() - extra_mem = max(inference_memory, memory_required) + 100 * 1024 * 1024 + extra_mem = max(inference_memory, memory_required + 300 * 1024 * 1024) if minimum_memory_required is None: minimum_memory_required = extra_mem else: - minimum_memory_required = max(inference_memory, minimum_memory_required) + 100 * 1024 * 1024 + minimum_memory_required = max(inference_memory, minimum_memory_required + 300 * 1024 * 1024) models = set(models)