|
|
@ -3,9 +3,9 @@ import torch
|
|
|
|
import os
|
|
|
|
import os
|
|
|
|
|
|
|
|
|
|
|
|
class SDXLClipG(sd1_clip.SD1ClipModel):
|
|
|
|
class SDXLClipG(sd1_clip.SD1ClipModel):
|
|
|
|
def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None):
|
|
|
|
def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None):
|
|
|
|
textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json")
|
|
|
|
textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json")
|
|
|
|
super().__init__(device=device, freeze=freeze, textmodel_json_config=textmodel_json_config)
|
|
|
|
super().__init__(device=device, freeze=freeze, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path)
|
|
|
|
self.empty_tokens = [[49406] + [49407] + [0] * 75]
|
|
|
|
self.empty_tokens = [[49406] + [49407] + [0] * 75]
|
|
|
|
self.text_projection = torch.nn.Parameter(torch.empty(1280, 1280))
|
|
|
|
self.text_projection = torch.nn.Parameter(torch.empty(1280, 1280))
|
|
|
|
self.layer_norm_hidden_state = False
|
|
|
|
self.layer_norm_hidden_state = False
|
|
|
|