Skip to content

Commit

Permalink
Set model to fp16 before loading the state dict to lower ram bump.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Jun 14, 2023
1 parent 0c7cad4 commit 6b77458
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions comfy/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -1155,9 +1155,9 @@ class WeightsLoader(torch.nn.Module):
else:
model = model_base.BaseModel(unet_config, v_prediction=v_prediction)

model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)

if fp16:
model = model.half()

model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)

return (ModelPatcher(model), clip, vae, clipvision)

0 comments on commit 6b77458

Please sign in to comment.