From cb7c4b4be3b3ed0602c5d68d06a14c5d8d4f6f45 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 7 Aug 2024 14:30:54 -0400 Subject: [PATCH] Workaround for lora OOM on lowvram mode. --- comfy/model_patcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 430b598..1ef4930 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -348,8 +348,8 @@ class ModelPatcher: m.comfy_cast_weights = True else: if hasattr(m, "weight"): - self.patch_weight_to_device(weight_key, device_to) - self.patch_weight_to_device(bias_key, device_to) + self.patch_weight_to_device(weight_key) #TODO: speed this up without causing OOM + self.patch_weight_to_device(bias_key) m.to(device_to) mem_counter += comfy.model_management.module_size(m) logging.debug("lowvram: loaded module regularly {} {}".format(n, m))