|
|
|
@ -94,6 +94,7 @@ class BaseModel(torch.nn.Module):
|
|
|
|
|
self.concat_keys = ()
|
|
|
|
|
logging.info("model_type {}".format(model_type.name))
|
|
|
|
|
logging.debug("adm {}".format(self.adm_channels))
|
|
|
|
|
self.memory_usage_factor = model_config.memory_usage_factor
|
|
|
|
|
|
|
|
|
|
def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs):
|
|
|
|
|
sigma = t
|
|
|
|
@ -252,11 +253,11 @@ class BaseModel(torch.nn.Module):
|
|
|
|
|
dtype = self.manual_cast_dtype
|
|
|
|
|
#TODO: this needs to be tweaked
|
|
|
|
|
area = input_shape[0] * math.prod(input_shape[2:])
|
|
|
|
|
return (area * comfy.model_management.dtype_size(dtype) * 0.01) * (1024 * 1024)
|
|
|
|
|
return (area * comfy.model_management.dtype_size(dtype) * 0.01 * self.memory_usage_factor) * (1024 * 1024)
|
|
|
|
|
else:
|
|
|
|
|
#TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory.
|
|
|
|
|
area = input_shape[0] * math.prod(input_shape[2:])
|
|
|
|
|
return (area * 0.3) * (1024 * 1024)
|
|
|
|
|
return (area * 0.15 * self.memory_usage_factor) * (1024 * 1024)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0, seed=None):
|
|
|
|
@ -354,6 +355,7 @@ class SDXL(BaseModel):
|
|
|
|
|
flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1)
|
|
|
|
|
return torch.cat((clip_pooled.to(flat.device), flat), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SVD_img2vid(BaseModel):
|
|
|
|
|
def __init__(self, model_config, model_type=ModelType.V_PREDICTION_EDM, device=None):
|
|
|
|
|
super().__init__(model_config, model_type, device=device)
|
|
|
|
@ -594,17 +596,6 @@ class SD3(BaseModel):
|
|
|
|
|
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
def memory_required(self, input_shape):
|
|
|
|
|
if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention():
|
|
|
|
|
dtype = self.get_dtype()
|
|
|
|
|
if self.manual_cast_dtype is not None:
|
|
|
|
|
dtype = self.manual_cast_dtype
|
|
|
|
|
#TODO: this probably needs to be tweaked
|
|
|
|
|
area = input_shape[0] * input_shape[2] * input_shape[3]
|
|
|
|
|
return (area * comfy.model_management.dtype_size(dtype) * 0.012) * (1024 * 1024)
|
|
|
|
|
else:
|
|
|
|
|
area = input_shape[0] * input_shape[2] * input_shape[3]
|
|
|
|
|
return (area * 0.3) * (1024 * 1024)
|
|
|
|
|
|
|
|
|
|
class AuraFlow(BaseModel):
|
|
|
|
|
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
|
|
|
|
@ -702,15 +693,3 @@ class Flux(BaseModel):
|
|
|
|
|
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
|
|
|
|
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 3.5)]))
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
def memory_required(self, input_shape):
|
|
|
|
|
if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention():
|
|
|
|
|
dtype = self.get_dtype()
|
|
|
|
|
if self.manual_cast_dtype is not None:
|
|
|
|
|
dtype = self.manual_cast_dtype
|
|
|
|
|
#TODO: this probably needs to be tweaked
|
|
|
|
|
area = input_shape[0] * input_shape[2] * input_shape[3]
|
|
|
|
|
return (area * comfy.model_management.dtype_size(dtype) * 0.026) * (1024 * 1024)
|
|
|
|
|
else:
|
|
|
|
|
area = input_shape[0] * input_shape[2] * input_shape[3]
|
|
|
|
|
return (area * 0.3) * (1024 * 1024)
|
|
|
|
|