Skip to content

Commit

Permalink
Move unet to device right after loading on highvram mode.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Jun 30, 2023
1 parent e7ed507 commit 62db116
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 4 deletions.
12 changes: 8 additions & 4 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,11 @@ def get_torch_device_name(device):

model_accelerated = False

def unet_offload_device():
if vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.SHARED:
return get_torch_device()
else:
return torch.device("cpu")

def unload_model():
global current_loaded_model
Expand All @@ -228,10 +233,9 @@ def unload_model():
accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
model_accelerated = False

#never unload models from GPU on high vram
if vram_state != VRAMState.HIGH_VRAM:
current_loaded_model.model.cpu()
current_loaded_model.model_patches_to("cpu")

current_loaded_model.model.to(unet_offload_device())
current_loaded_model.model_patches_to(unet_offload_device())
current_loaded_model.unpatch_model()
current_loaded_model = None

Expand Down
1 change: 1 addition & 0 deletions comfy/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -1142,6 +1142,7 @@ class WeightsLoader(torch.nn.Module):
clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)

model = model_config.get_model(sd)
model = model.to(model_management.unet_offload_device())
model.load_model_weights(sd, "model.diffusion_model.")

if output_vae:
Expand Down

0 comments on commit 62db116

Please sign in to comment.