|
|
@ -998,7 +998,7 @@ def sample_deis(model, x, sigmas, extra_args=None, callback=None, disable=None,
|
|
|
|
return x_next
|
|
|
|
return x_next
|
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
@torch.no_grad()
|
|
|
|
def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
|
|
|
|
def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
|
|
|
|
extra_args = {} if extra_args is None else extra_args
|
|
|
|
extra_args = {} if extra_args is None else extra_args
|
|
|
|
|
|
|
|
|
|
|
|
temp = [0]
|
|
|
|
temp = [0]
|
|
|
@ -1013,16 +1013,16 @@ def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=No
|
|
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
|
|
sigma_hat = sigmas[i]
|
|
|
|
sigma_hat = sigmas[i]
|
|
|
|
denoised = model(x, sigma_hat * s_in, **extra_args)
|
|
|
|
denoised = model(x, sigma_hat * s_in, **extra_args)
|
|
|
|
d = to_d(x - denoised + temp[0], sigma_hat, denoised)
|
|
|
|
d = to_d(x, sigma_hat, temp[0])
|
|
|
|
if callback is not None:
|
|
|
|
if callback is not None:
|
|
|
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
|
|
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
|
|
|
dt = sigmas[i + 1] - sigma_hat
|
|
|
|
dt = sigmas[i + 1] - sigma_hat
|
|
|
|
# Euler method
|
|
|
|
# Euler method
|
|
|
|
x = x + d * dt
|
|
|
|
x = denoised + d * sigmas[i + 1]
|
|
|
|
return x
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
@torch.no_grad()
|
|
|
|
def sample_euler_ancestral_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
|
|
|
def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
|
|
|
"""Ancestral sampling with Euler method steps."""
|
|
|
|
"""Ancestral sampling with Euler method steps."""
|
|
|
|
extra_args = {} if extra_args is None else extra_args
|
|
|
|
extra_args = {} if extra_args is None else extra_args
|
|
|
|
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
|
|
|
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
|
|
@ -1041,10 +1041,10 @@ def sample_euler_ancestral_pp(model, x, sigmas, extra_args=None, callback=None,
|
|
|
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
|
|
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
|
|
|
if callback is not None:
|
|
|
|
if callback is not None:
|
|
|
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
|
|
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
|
|
|
d = to_d(x - denoised + temp[0], sigmas[i], denoised)
|
|
|
|
d = to_d(x, sigmas[i], temp[0])
|
|
|
|
# Euler method
|
|
|
|
# Euler method
|
|
|
|
dt = sigma_down - sigmas[i]
|
|
|
|
dt = sigma_down - sigmas[i]
|
|
|
|
x = x + d * dt
|
|
|
|
x = denoised + d * sigma_down
|
|
|
|
if sigmas[i + 1] > 0:
|
|
|
|
if sigmas[i + 1] > 0:
|
|
|
|
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
|
|
|
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
|
|
|
return x
|
|
|
|
return x
|
|
|
|