Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci

Signed-off-by: ataghibakhsh <[email protected]>
  • Loading branch information
pre-commit-ci[bot] authored and JRD971000 committed Feb 16, 2024
1 parent eafb9c3 commit ff64a76
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 19 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -279,23 +279,24 @@ def sampling_fn(
intermediates["pred_x0"].append(pred_x0)
return img, intermediates

def single_ddim_denoise_step(self,
img,
total_steps,
i,
b,
device,
step,
cond,
ddim_use_original_steps=None,
quantize_denoised=None,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None):

def single_ddim_denoise_step(
self,
img,
total_steps,
i,
b,
device,
step,
cond,
ddim_use_original_steps=None,
quantize_denoised=None,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
):

index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def p_sampling_fn(
noise_dropout,
)
return x_prev, pred_x0


def grad_p_sampling_fn(
self,
Expand Down
4 changes: 3 additions & 1 deletion nemo/collections/nlp/parts/megatron_trainer_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def create_trainer(self) -> Trainer:
callbacks=[ModelSummary(max_depth=3), CustomProgressBar()]
)


class MegatronStableDiffusionTrainerBuilder(MegatronTrainerBuilder):
"""Builder for SD model Trainer with overrides."""

Expand All @@ -171,7 +172,8 @@ def _training_strategy(self) -> NLPDDPStrategy:
gradient_as_bucket_view=self.cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)



class MegatronLMPPTrainerBuilder(MegatronTrainerBuilder):
"""Builder for scripts where grad scaler is turned off for pipeline parallel LM model. E.g. PEFT tuning scripts"""

Expand Down

0 comments on commit ff64a76

Please sign in to comment.