diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 80988f3c717..9cca0bd7446 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -367,36 +367,31 @@ def prep_control_data( # original idea by https://github.com/AmericanPresidentJimmyCarter # TODO: research more for second order schedulers timesteps def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end): - num_inference_steps = steps if scheduler.config.get("cpu_only", False): - scheduler.set_timesteps(num_inference_steps, device="cpu") + scheduler.set_timesteps(steps, device="cpu") timesteps = scheduler.timesteps.to(device=device) else: - scheduler.set_timesteps(num_inference_steps, device=device) + scheduler.set_timesteps(steps, device=device) timesteps = scheduler.timesteps - # apply denoising_start - t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start))) - t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, timesteps))) - timesteps = timesteps[t_start_idx:] - if scheduler.order == 2 and t_start_idx > 0: - timesteps = timesteps[1:] + # skip greater order timesteps + _timesteps = timesteps[:: scheduler.order] - # save start timestep to apply noise - init_timestep = timesteps[:1] + # get start timestep index + t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start))) + t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, _timesteps))) - # apply denoising_end + # get end timestep index t_end_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_end))) - t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, timesteps))) - if scheduler.order == 2 and t_end_idx > 0: - t_end_idx += 1 - timesteps = timesteps[:t_end_idx] - - # calculate step count based on scheduler order - num_inference_steps = len(timesteps) - if scheduler.order == 2: - num_inference_steps += num_inference_steps % 2 - num_inference_steps = num_inference_steps // 2 + t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, _timesteps[t_start_idx:]))) + + # apply order to indexes + t_start_idx *= scheduler.order + t_end_idx *= scheduler.order + + init_timestep = timesteps[t_start_idx : t_start_idx + 1] + timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx] + num_inference_steps = len(timesteps) // scheduler.order return num_inference_steps, timesteps, init_timestep diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 2d1894c8963..d88313f455a 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -558,12 +558,22 @@ def step( # compute the previous noisy sample x_t -> x_t-1 step_output = self.scheduler.step(noise_pred, timestep, latents, **conditioning_data.scheduler_args) + # TODO: issue to diffusers? + # undo internal counter increment done by scheduler.step, so timestep can be resolved as before call + # this needed to be able call scheduler.add_noise with current timestep + if self.scheduler.order == 2: + self.scheduler._index_counter[timestep.item()] -= 1 + # TODO: this additional_guidance extension point feels redundant with InvokeAIDiffusionComponent. # But the way things are now, scheduler runs _after_ that, so there was # no way to use it to apply an operation that happens after the last scheduler.step. for guidance in additional_guidance: step_output = guidance(step_output, timestep, conditioning_data) + # restore internal counter + if self.scheduler.order == 2: + self.scheduler._index_counter[timestep.item()] += 1 + return step_output def _unet_forward(