diff --git a/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/base_sampler.py b/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/base_sampler.py index 10a1a5907b29..08ecdcb830d9 100644 --- a/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/base_sampler.py +++ b/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/base_sampler.py @@ -279,23 +279,24 @@ def sampling_fn( intermediates["pred_x0"].append(pred_x0) return img, intermediates - def single_ddim_denoise_step(self, - img, - total_steps, - i, - b, - device, - step, - cond, - ddim_use_original_steps=None, - quantize_denoised=None, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None): - + def single_ddim_denoise_step( + self, + img, + total_steps, + i, + b, + device, + step, + cond, + ddim_use_original_steps=None, + quantize_denoised=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + ): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) diff --git a/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/ddim.py b/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/ddim.py index 175f65624b68..761401d11658 100644 --- a/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/ddim.py +++ b/nemo/collections/multimodal/models/text_to_image/stable_diffusion/samplers/ddim.py @@ -65,7 +65,6 @@ def p_sampling_fn( noise_dropout, ) return x_prev, pred_x0 - def grad_p_sampling_fn( self, diff --git a/nemo/collections/nlp/parts/megatron_trainer_builder.py b/nemo/collections/nlp/parts/megatron_trainer_builder.py index b8fe95d94c42..28e93e8bac68 100644 --- a/nemo/collections/nlp/parts/megatron_trainer_builder.py +++ b/nemo/collections/nlp/parts/megatron_trainer_builder.py @@ -150,6 +150,7 @@ def create_trainer(self) -> Trainer: callbacks=[ModelSummary(max_depth=3), CustomProgressBar()] ) + class MegatronStableDiffusionTrainerBuilder(MegatronTrainerBuilder): """Builder for SD model Trainer with overrides.""" @@ -171,7 +172,8 @@ def _training_strategy(self) -> NLPDDPStrategy: gradient_as_bucket_view=self.cfg.model.gradient_as_bucket_view, find_unused_parameters=False, ) - + + class MegatronLMPPTrainerBuilder(MegatronTrainerBuilder): """Builder for scripts where grad scaler is turned off for pipeline parallel LM model. E.g. PEFT tuning scripts"""