diff --git a/comfy/utils.py b/comfy/utils.py index 09e05d4..0f7b345 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -46,6 +46,65 @@ def transformers_convert(sd, prefix_from, prefix_to, number): sd[k_to] = weights[shape_from*x:shape_from*(x + 1)] return sd +#slow and inefficient, should be optimized +def bislerp(samples, width, height): + shape = list(samples.shape) + width_scale = (shape[3]) / (width ) + height_scale = (shape[2]) / (height ) + + shape[3] = width + shape[2] = height + out1 = torch.empty(shape, dtype=samples.dtype, layout=samples.layout, device=samples.device) + + def algorithm(in1, w1, in2, w2): + dims = in1.shape + val = w2 + + #flatten to batches + low = in1.reshape(dims[0], -1) + high = in2.reshape(dims[0], -1) + + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + + # in case we divide by zero + low_norm[low_norm != low_norm] = 0.0 + high_norm[high_norm != high_norm] = 0.0 + + omega = torch.acos((low_norm*high_norm).sum(1)) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + return res.reshape(dims) + + for x_dest in range(shape[3]): + for y_dest in range(shape[2]): + y = (y_dest) * height_scale + x = (x_dest) * width_scale + + x1 = max(math.floor(x), 0) + x2 = min(x1 + 1, samples.shape[3] - 1) + y1 = max(math.floor(y), 0) + y2 = min(y1 + 1, samples.shape[2] - 1) + + in1 = samples[:,:,y1,x1] + in2 = samples[:,:,y1,x2] + in3 = samples[:,:,y2,x1] + in4 = samples[:,:,y2,x2] + + if (x1 == x2) and (y1 == y2): + out_value = in1 + elif (x1 == x2): + out_value = algorithm(in1, (y2 - y), in3, (y - y1)) + elif (y1 == y2): + out_value = algorithm(in1, (x2 - x), in2, (x - x1)) + else: + o1 = algorithm(in1, (x2 - x), in2, (x - x1)) + o2 = algorithm(in3, (x2 - x), in4, (x - x1)) + out_value = algorithm(o1, (y2 - y), o2, (y - y1)) + + out1[:,:,y_dest,x_dest] = out_value + return out1 + def common_upscale(samples, width, height, upscale_method, crop): if crop == "center": old_width = samples.shape[3] @@ -61,7 +120,11 @@ def common_upscale(samples, width, height, upscale_method, crop): s = samples[:,:,y:old_height-y,x:old_width-x] else: s = samples - return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method) + + if upscale_method == "bislerp": + return bislerp(s, width, height) + else: + return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method) def get_tiled_scale_steps(width, height, tile_x, tile_y, overlap): return math.ceil((height / (tile_y - overlap))) * math.ceil((width / (tile_x - overlap))) diff --git a/nodes.py b/nodes.py index bae330b..e5cec26 100644 --- a/nodes.py +++ b/nodes.py @@ -749,7 +749,7 @@ class RepeatLatentBatch: return (s,) class LatentUpscale: - upscale_methods = ["nearest-exact", "bilinear", "area"] + upscale_methods = ["nearest-exact", "bilinear", "area", "bislerp"] crop_methods = ["disabled", "center"] @classmethod