Skip to content

Commit

Permalink
Merge pull request #15823 from drhead/patch-3
Browse files Browse the repository at this point in the history
[Performance] Keep sigmas on CPU
  • Loading branch information
AUTOMATIC1111 authored Jun 9, 2024
2 parents 57e6d05 + d52a1e1 commit 1d0bb39
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 10 deletions.
2 changes: 1 addition & 1 deletion modules/sd_samplers_kdiffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def get_sigmas(self, p, steps):
if scheduler.need_inner_model:
sigmas_kwargs['inner_model'] = self.model_wrap

sigmas = scheduler.function(n=steps, **sigmas_kwargs, device=shared.device)
sigmas = scheduler.function(n=steps, **sigmas_kwargs)

if discard_next_to_last_sigma:
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
Expand Down
24 changes: 15 additions & 9 deletions modules/sd_schedulers.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,12 @@

from modules import shared

def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / sigma

k_diffusion.sampling.to_d = to_d

@dataclasses.dataclass
class Scheduler:
name: str
Expand All @@ -19,21 +25,21 @@ class Scheduler:
aliases: list = None


def uniform(n, sigma_min, sigma_max, inner_model, device):
def uniform(n, sigma_min, sigma_max, inner_model):
return inner_model.get_sigmas(n)


def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
def sgm_uniform(n, sigma_min, sigma_max, inner_model):
start = inner_model.sigma_to_t(torch.tensor(sigma_max))
end = inner_model.sigma_to_t(torch.tensor(sigma_min))
sigs = [
inner_model.t_to_sigma(ts)
for ts in torch.linspace(start, end, n + 1)[:-1]
]
sigs += [0.0]
return torch.FloatTensor(sigs).to(device)
return torch.FloatTensor(sigs)

def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device='cpu'):
def get_align_your_steps_sigmas(n, sigma_min, sigma_max):
# https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
def loglinear_interp(t_steps, num_steps):
"""
Expand All @@ -59,12 +65,12 @@ def loglinear_interp(t_steps, num_steps):
else:
sigmas.append(0.0)

return torch.FloatTensor(sigmas).to(device)
return torch.FloatTensor(sigmas)

def kl_optimal(n, sigma_min, sigma_max, device):
alpha_min = torch.arctan(torch.tensor(sigma_min, device=device))
alpha_max = torch.arctan(torch.tensor(sigma_max, device=device))
step_indices = torch.arange(n + 1, device=device)
def kl_optimal(n, sigma_min, sigma_max):
alpha_min = torch.arctan(torch.tensor(sigma_min))
alpha_max = torch.arctan(torch.tensor(sigma_max))
step_indices = torch.arange(n + 1)
sigmas = torch.tan(step_indices / n * alpha_min + (1.0 - step_indices / n) * alpha_max)
return sigmas

Expand Down

0 comments on commit 1d0bb39

Please sign in to comment.