From 3e5ea74ad356e849ea27f1d766a7b6d90a5acfda Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 03:55:35 -0500 Subject: [PATCH] Make buggy xformers fall back on pytorch attention. --- comfy/ldm/modules/attention.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 947e200..d511dda 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -278,9 +278,20 @@ def attention_split(q, k, v, heads, mask=None): ) return r1 +BROKEN_XFORMERS = False +try: + x_vers = xformers.__version__ + #I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error) + BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23") +except: + pass + def attention_xformers(q, k, v, heads, mask=None): b, _, dim_head = q.shape dim_head //= heads + if BROKEN_XFORMERS: + if b * heads > 65535: + return attention_pytorch(q, k, v, heads, mask) q, k, v = map( lambda t: t.unsqueeze(3)