Skip to content

Commit

Permalink
Merge branch 'comfyanonymous:master' into deon-mac
Browse files Browse the repository at this point in the history
  • Loading branch information
deonblaauw authored Jan 4, 2025
2 parents 4d9a850 + caa6476 commit 2320793
Show file tree
Hide file tree
Showing 88 changed files with 223,138 additions and 181,855 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/windows_release_nightly_pytorch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,19 @@ on:
description: 'cuda version'
required: true
type: string
default: "124"
default: "126"

python_minor:
description: 'python minor version'
required: true
type: string
default: "12"
default: "13"

python_patch:
description: 'python patch version'
required: true
type: string
default: "4"
default: "1"
# push:
# branches:
# - master
Expand Down
2 changes: 1 addition & 1 deletion api_server/services/terminal_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def update_size(self):

if columns != self.cols:
self.cols = columns
changed = True
changed = True

if lines != self.rows:
self.rows = lines
Expand Down
2 changes: 1 addition & 1 deletion app/custom_node_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class CustomNodeManager:
Placeholder to refactor the custom node management features from ComfyUI-Manager.
Currently it only contains the custom workflow templates feature.
"""
def add_routes(self, routes, webapp, loadedModules):
def add_routes(self, routes, webapp, loadedModules):

@routes.get("/workflow_templates")
async def get_workflow_templates(request):
Expand Down
2 changes: 1 addition & 1 deletion comfy/comfy_types/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ This module provides type hinting and concrete convenience types for node develo
If cloned to the custom_nodes directory of ComfyUI, types can be imported using:

```python
from comfy_types import IO, ComfyNodeABC, CheckLazyMixin
from comfy.comfy_types import IO, ComfyNodeABC, CheckLazyMixin

class ExampleNode(ComfyNodeABC):
@classmethod
Expand Down
6 changes: 3 additions & 3 deletions comfy/comfy_types/examples/example_nodes.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from comfy_types import IO, ComfyNodeABC, InputTypeDict
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
from inspect import cleandoc


class ExampleNode(ComfyNodeABC):
"""An example node that just adds 1 to an input integer.
* Requires an IDE configured with analysis paths etc to be worth looking at.
* Not intended for use in ComfyUI.
* Requires a modern IDE to provide any benefit (detail: an IDE configured with analysis paths etc).
* This node is intended as an example for developers only.
"""

DESCRIPTION = cleandoc(__doc__)
Expand Down
18 changes: 9 additions & 9 deletions comfy/extra_samplers/uni_pc.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ def model_wrapper(
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
``
The input `classifier_fn` has the following format:
``
Expand All @@ -240,7 +240,7 @@ def model_wrapper(
The input `model` has the following format:
``
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
``
``
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
Expand All @@ -254,7 +254,7 @@ def model_wrapper(
``
def model_fn(x, t_continuous) -> noise:
t_input = get_model_input_time(t_continuous)
return noise_pred(model, x, t_input, **model_kwargs)
return noise_pred(model, x, t_input, **model_kwargs)
``
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
Expand Down Expand Up @@ -359,7 +359,7 @@ def __init__(
max_val=1.,
variant='bh1',
):
"""Construct a UniPC.
"""Construct a UniPC.
We support both data_prediction and noise_prediction.
"""
Expand All @@ -372,7 +372,7 @@ def __init__(

def dynamic_thresholding_fn(self, x0, t=None):
"""
The dynamic thresholding method.
The dynamic thresholding method.
"""
dims = x0.dim()
p = self.dynamic_thresholding_ratio
Expand Down Expand Up @@ -404,7 +404,7 @@ def data_prediction_fn(self, x, t):

def model_fn(self, x, t):
"""
Convert the model to the noise prediction model or the data prediction model.
Convert the model to the noise prediction model or the data prediction model.
"""
if self.predict_x0:
return self.data_prediction_fn(x, t)
Expand Down Expand Up @@ -461,7 +461,7 @@ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type

def denoise_to_zero_fn(self, x, s):
"""
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
"""
return self.data_prediction_fn(x, s)

Expand Down Expand Up @@ -510,7 +510,7 @@ def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order
col = torch.ones_like(rks)
for k in range(1, K + 1):
C.append(col)
col = col * rks / (k + 1)
col = col * rks / (k + 1)
C = torch.stack(C, dim=1)

if len(D1s) > 0:
Expand Down Expand Up @@ -626,7 +626,7 @@ def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order,
R.append(torch.pow(rks, i - 1))
b.append(h_phi_k * factorial_i / B_h)
factorial_i *= (i + 1)
h_phi_k = h_phi_k / hh - 1 / factorial_i
h_phi_k = h_phi_k / hh - 1 / factorial_i

R = torch.stack(R)
b = torch.tensor(b, device=x.device)
Expand Down
4 changes: 2 additions & 2 deletions comfy/ldm/cascade/stage_b.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0, self_attn=True):
# nn.init.normal_(self.pixels_mapper[2].weight, std=0.02) # conditionings
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs
# nn.init.constant_(self.clf[1].weight, 0) # outputs
#
#
# # blocks
# for level_block in self.down_blocks + self.up_blocks:
# for block in level_block:
Expand All @@ -148,7 +148,7 @@ def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0, self_attn=True):
# for layer in block.modules():
# if isinstance(layer, nn.Linear):
# nn.init.constant_(layer.weight, 0)
#
#
# def _init_weights(self, m):
# if isinstance(m, (nn.Conv2d, nn.Linear)):
# torch.nn.init.xavier_uniform_(m.weight)
Expand Down
4 changes: 2 additions & 2 deletions comfy/ldm/cascade/stage_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0, self_attn=True):
# nn.init.normal_(self.clip_img_mapper.weight, std=0.02) # conditionings
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs
# nn.init.constant_(self.clf[1].weight, 0) # outputs
#
#
# # blocks
# for level_block in self.down_blocks + self.up_blocks:
# for block in level_block:
Expand All @@ -152,7 +152,7 @@ def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0, self_attn=True):
# for layer in block.modules():
# if isinstance(layer, nn.Linear):
# nn.init.constant_(layer.weight, 0)
#
#
# def _init_weights(self, m):
# if isinstance(m, (nn.Conv2d, nn.Linear)):
# torch.nn.init.xavier_uniform_(m.weight)
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/flux/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def block_wrap(args):
out = blocks_replace[("single_block", i)]({"img": img,
"vec": vec,
"pe": pe,
"attn_mask": attn_mask},
"attn_mask": attn_mask},
{"original_block": block_wrap})
img = out["img"]
else:
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/hydit/attn_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def forward(self, x, y, freqs_cis_img=None):

q = q.transpose(-2, -3).contiguous() # q -> B, L1, H, C - B, H, L1, C
k = k.transpose(-2, -3).contiguous() # k -> B, L2, H, C - B, H, C, L2
v = v.transpose(-2, -3).contiguous()
v = v.transpose(-2, -3).contiguous()

context = optimized_attention(q, k, v, self.num_heads, skip_reshape=True, attn_precision=self.attn_precision)

Expand Down
6 changes: 3 additions & 3 deletions comfy/ldm/modules/sub_quadratic_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
import logging

try:
from typing import Optional, NamedTuple, List, Protocol
from typing import Optional, NamedTuple, List, Protocol
except ImportError:
from typing import Optional, NamedTuple, List
from typing_extensions import Protocol
from typing import Optional, NamedTuple, List
from typing_extensions import Protocol

from typing import List

Expand Down
2 changes: 1 addition & 1 deletion comfy/model_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ def extra_conds(self, **kwargs):
cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None:
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
# upscale the attention mask, since now we
# upscale the attention mask, since now we
attention_mask = kwargs.get("attention_mask", None)
if attention_mask is not None:
shape = kwargs["noise"].shape
Expand Down
2 changes: 1 addition & 1 deletion comfy/model_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ def unet_config_from_diffusers_unet(state_dict, dtype=None):
'dtype': dtype, 'in_channels': 9, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0],
'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8,
'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
'use_temporal_attention': False, 'use_temporal_resblock': False}
'use_temporal_attention': False, 'use_temporal_resblock': False}


supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega, KOALA_700M, KOALA_1B, SD09_XS, SD_XS, SDXL_diffusers_ip2p, SD15_diffusers_inpaint]
Expand Down
5 changes: 2 additions & 3 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -1121,9 +1121,8 @@ def soft_empty_cache(force=False):
elif is_ascend_npu():
torch.npu.empty_cache()
elif torch.cuda.is_available():
if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
torch.cuda.empty_cache()
torch.cuda.ipc_collect()

def unload_all_models():
free_memory(1e30, get_torch_device())
Expand Down
57 changes: 32 additions & 25 deletions comfy/samplers.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
from __future__ import annotations
from .k_diffusion import sampling as k_diffusion_sampling
from .extra_samplers import uni_pc
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Callable, NamedTuple
if TYPE_CHECKING:
from comfy.model_patcher import ModelPatcher
from comfy.model_base import BaseModel
from comfy.controlnet import ControlBase
import torch
from functools import partial
import collections
from comfy import model_management
import math
Expand Down Expand Up @@ -224,7 +225,7 @@ def _calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Te
default_conds.append(default_c)

if has_default_conds:
finalize_default_conds(model, hooked_to_run, default_conds, x_in, timestep)
finalize_default_conds(model, hooked_to_run, default_conds, x_in, timestep, model_options)

model.current_patcher.prepare_state(timestep)

Expand Down Expand Up @@ -920,31 +921,37 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model
return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)


SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "beta", "linear_quadratic", "kl_optimal"]
SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"]

def calculate_sigmas(model_sampling, scheduler_name, steps):
if scheduler_name == "karras":
sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model_sampling.sigma_min), sigma_max=float(model_sampling.sigma_max))
elif scheduler_name == "exponential":
sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=float(model_sampling.sigma_min), sigma_max=float(model_sampling.sigma_max))
elif scheduler_name == "normal":
sigmas = normal_scheduler(model_sampling, steps)
elif scheduler_name == "simple":
sigmas = simple_scheduler(model_sampling, steps)
elif scheduler_name == "ddim_uniform":
sigmas = ddim_scheduler(model_sampling, steps)
elif scheduler_name == "sgm_uniform":
sigmas = normal_scheduler(model_sampling, steps, sgm=True)
elif scheduler_name == "beta":
sigmas = beta_scheduler(model_sampling, steps)
elif scheduler_name == "linear_quadratic":
sigmas = linear_quadratic_schedule(model_sampling, steps)
elif scheduler_name == "kl_optimal":
sigmas = kl_optimal_scheduler(n=steps, sigma_min=float(model_sampling.sigma_min), sigma_max=float(model_sampling.sigma_max))
else:
logging.error("error invalid scheduler {}".format(scheduler_name))
return sigmas
class SchedulerHandler(NamedTuple):
handler: Callable[..., torch.Tensor]
# Boolean indicates whether to call the handler like:
# scheduler_function(model_sampling, steps) or
# scheduler_function(n, sigma_min: float, sigma_max: float)
use_ms: bool = True

SCHEDULER_HANDLERS = {
"normal": SchedulerHandler(normal_scheduler),
"karras": SchedulerHandler(k_diffusion_sampling.get_sigmas_karras, use_ms=False),
"exponential": SchedulerHandler(k_diffusion_sampling.get_sigmas_exponential, use_ms=False),
"sgm_uniform": SchedulerHandler(partial(normal_scheduler, sgm=True)),
"simple": SchedulerHandler(simple_scheduler),
"ddim_uniform": SchedulerHandler(ddim_scheduler),
"beta": SchedulerHandler(beta_scheduler),
"linear_quadratic": SchedulerHandler(linear_quadratic_schedule),
"kl_optimal": SchedulerHandler(kl_optimal_scheduler, use_ms=False),
}
SCHEDULER_NAMES = list(SCHEDULER_HANDLERS)

def calculate_sigmas(model_sampling: object, scheduler_name: str, steps: int) -> torch.Tensor:
handler = SCHEDULER_HANDLERS.get(scheduler_name)
if handler is None:
err = f"error invalid scheduler {scheduler_name}"
logging.error(err)
raise ValueError(err)
if handler.use_ms:
return handler.handler(model_sampling, steps)
return handler.handler(n=steps, sigma_min=float(model_sampling.sigma_min), sigma_max=float(model_sampling.sigma_max))

def sampler_object(name):
if name == "uni_pc":
Expand Down
4 changes: 2 additions & 2 deletions comfy/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -727,7 +727,7 @@ def slerp(b1, b2, r):
res *= (b1_norms * (1.0-r) + b2_norms * r).expand(-1,c)

#edge cases for same or polar opposites
res[dot > 1 - 1e-5] = b1[dot > 1 - 1e-5]
res[dot > 1 - 1e-5] = b1[dot > 1 - 1e-5]
res[dot < 1e-5 - 1] = (b1 * (1.0-r) + b2 * r)[dot < 1e-5 - 1]
return res

Expand Down Expand Up @@ -893,7 +893,7 @@ def mult_list_upscale(a):
out = torch.zeros([s.shape[0], out_channels] + mult_list_upscale(s.shape[2:]), device=output_device)
out_div = torch.zeros([s.shape[0], out_channels] + mult_list_upscale(s.shape[2:]), device=output_device)

positions = [range(0, s.shape[d+2], tile[d] - overlap[d]) if s.shape[d+2] > tile[d] else [0] for d in range(dims)]
positions = [range(0, s.shape[d+2] - overlap[d], tile[d] - overlap[d]) if s.shape[d+2] > tile[d] else [0] for d in range(dims)]

for it in itertools.product(*positions):
s_in = s
Expand Down
4 changes: 2 additions & 2 deletions comfy_extras/nodes_gits.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def loglinear_interp(t_steps, num_steps):
[14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.89115214, 0.72133851, 0.59516323, 0.4783645, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
],
],
1.15: [
[14.61464119, 0.83188516, 0.02916753],
[14.61464119, 1.84880662, 0.59516323, 0.02916753],
Expand Down Expand Up @@ -246,7 +246,7 @@ def loglinear_interp(t_steps, num_steps):
[14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.83188516, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.4783645, 0.41087446, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
],
],
1.35: [
[14.61464119, 0.69515091, 0.02916753],
[14.61464119, 0.95350921, 0.34370604, 0.02916753],
Expand Down
Loading

0 comments on commit 2320793

Please sign in to comment.