diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/ITREX_StableDiffusionInstructPix2PixPipeline.py b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/ITREX_StableDiffusionInstructPix2PixPipeline.py new file mode 100644 index 00000000000..a00ccb896cb --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/ITREX_StableDiffusionInstructPix2PixPipeline.py @@ -0,0 +1,738 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Pipeline Modificaiton based from the diffusers 0.12.1 StableDiffusionInstructPix2PixPipeline""" + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +import copy +import PIL +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + +from diffusers.models import AutoencoderKL, UNet2DConditionModel +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + PIL_INTERPOLATION, + deprecate, + is_accelerate_available, + logging, + randn_tensor, + replace_example_docstring, +) +from diffusers.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline): + r""" + Pipeline for pixel-level image editing by following text instructions. Based on Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + num_inference_steps: int = 100, + guidance_scale: float = 7.5, + image_guidance_scale: float = 1.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + engine_graph: Optional[list] = [], + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be repainted according to `prompt`. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. This pipeline requires a value of at least `1`. + image_guidance_scale (`float`, *optional*, defaults to 1.5): + Image guidance scale is to push the generated image towards the initial image `image`. Image guidance + scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to + generate images that are closely linked to the source image `image`, usually at the expense of lower + image quality. This pipeline requires a value of at least `1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInstructPix2PixPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" + + >>> image = download_image(img_url).resize((512, 512)) + + >>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + ... "timbrooks/instruct-pix2pix", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "make the mountains snowy" + >>> image = pipe(prompt=prompt, image=image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 0. Check inputs + self.check_inputs(prompt, callback_steps) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 1. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0 + # check if scheduler is in sigmas space + scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas") + + # 2. Encode input prompt + prompt_embeds = self._encode_prompt( + engine_graph[0], + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 3. Preprocess image + image = preprocess(image) + height, width = image.shape[-2:] + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare Image latents + image_latents = self.prepare_image_latents( + image, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + do_classifier_free_guidance, + generator, + ) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Check that shapes of latents and image match the UNet channels + num_channels_image = image_latents.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents+num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Expand the latents if we are doing classifier free guidance. + # The latents are expanded 3 times because for pix2pix the guidance\ + # is applied for both the text and the input image. + latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents + + # concat latents, image_latents in the channel dimension + scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) + + # Original Pytorch Diffuser Unet Code: predict the noise residual + #noise_pred = self.unet(scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds).sample + + # The ITREX Unet Code + scaled_latent_model_input = scaled_latent_model_input.contiguous() + prompt_embeds = prompt_embeds.contiguous() + t_1d = torch.tensor([t], dtype=torch.float32).contiguous() + engine_output = engine_graph[1].inference([scaled_latent_model_input, t_1d, prompt_embeds]) + noise_pred = torch.from_numpy(engine_output['out_sample:0']) + + # Hack: + # For karras style schedulers the model does classifier free guidance using the + # predicted_original_sample instead of the noise_pred. So we need to compute the + # predicted_original_sample here if we are using a karras style scheduler. + if scheduler_is_in_sigma_space: + step_index = (self.scheduler.timesteps == t).nonzero().item() + sigma = self.scheduler.sigmas[step_index] + noise_pred = latent_model_input - sigma * noise_pred + + # perform guidance + if do_classifier_free_guidance: + noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3) + noise_pred = ( + noise_pred_uncond + + guidance_scale * (noise_pred_text - noise_pred_image) + + image_guidance_scale * (noise_pred_image - noise_pred_uncond) + ) + + # Hack: + # For karras style schedulers the model does classifier free guidance using the + # predicted_original_sample instead of the noise_pred. But the scheduler.step function + # expects the noise_pred and computes the predicted_original_sample internally. So we + # need to overwrite the noise_pred here such that the value of the computed + # predicted_original_sample is correct. + if scheduler_is_in_sigma_space: + noise_pred = (noise_pred - latents) / (-sigma) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 10. Post-processing + image = self.decode_latents(latents, engine_graph[2]) + + # 11. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 12. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + """ + if is_accelerate_available(): + from accelerate import cpu_offload + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + device = torch.device(f"cuda:{gpu_id}") + + for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: + cpu_offload(cpu_offloaded_model, device) + + if self.safety_checker is not None: + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device + def _execution_device(self): + r""" + Returns the device on which the pipeline's models will be executed. After calling + `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module + hooks. + """ + if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): + return self.device + for module in self.unet.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + return self.device + + def _encode_prompt( + self, + text_encoder_graph, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_ prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + # The ITREX Text Encdoer Code + # prompt_embeds = text_encoder_graph.inference([text_input_ids]) + # bsz, seq_length = text_input_ids.shape + # encoder_hidden_state = prompt_embeds['last_hidden_state:0'] + # if encoder_hidden_state.dtype == np.int16: + # prompt_embeds['last_hidden_state:0'] = bf16_to_fp32(encoder_hidden_state) + # prompt_embeds = torch.from_numpy(prompt_embeds['last_hidden_state:0']).reshape( + # bsz, seq_length, -1) + + #Original Pytorch Diffuser Text Encoder Code + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + # The ITREX Text Encdoer Code + negative_prompt_embeds = text_encoder_graph.inference([uncond_input.input_ids]) + bsz, seq_length = uncond_input.input_ids.shape + encoder_hidden_state = negative_prompt_embeds['last_hidden_state:0'] + if encoder_hidden_state.dtype == np.int16: + negative_prompt_embeds['last_hidden_state:0'] = bf16_to_fp32(encoder_hidden_state) + negative_prompt_embeds = torch.from_numpy(negative_prompt_embeds['last_hidden_state:0']).reshape( + bsz, seq_length, -1) + + # Original Pytorch Diffuser Text Encoder Code + # negative_prompt_embeds = self.text_encoder( + # uncond_input.input_ids.to(device), + # attention_mask=attention_mask, + # ) + # negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # pix2pix has two negative embeddings, and unlike in other pipelines latents are ordered [prompt_embeds, negative_prompt_embeds, negative_prompt_embeds] + prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + else: + has_nsfw_concept = None + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents, vae_decoder_graph): + latents = 1 / 0.18215 * latents + + # Original Pytorch Diffuser Vae Code + image = self.vae.decode(latents).sample + + # The ITREX Vae Codes + latents = latents.contiguous() + output = vae_decoder_graph.inference([latents]) + image = torch.from_numpy(output['sample:0']) + + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs(self, prompt, callback_steps): + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_latents( + self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + image_latents = [self.vae.encode(image[i : i + 1]).latent_dist.mode() for i in range(batch_size)] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.vae.encode(image).latent_dist.mode() + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if do_classifier_free_guidance: + uncond_image_latents = torch.zeros_like(image_latents) + image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) + + return image_latents + +def fp32_to_bf16(fp32_np): + assert(fp32_np.dtype==np.float32) + temp = copy.deepcopy(fp32_np) + int32_np = temp.view(dtype=np.int32) + int32_np = int32_np >> 16 + bf16_np = int32_np.astype(np.uint16) + return bf16_np + +def bf16_to_fp32(bf16_np): + assert(bf16_np.dtype==np.int16) + temp = copy.deepcopy(bf16_np) + int32_np = temp.astype(dtype=np.int32) + int32_np = int32_np << 16 + fp32_np = int32_np.view(np.float32) + return fp32_np diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/README.md b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/README.md new file mode 100644 index 00000000000..ade21d9086b --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/README.md @@ -0,0 +1,290 @@ +Step-by-Step +========= +This document describes the end-to-end workflow for Text-to-image generative AI models across the Neural Engine backend. + +Supported Text-to-image Generative AI models: +1. [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) +2. [runwayml/stable-diffusion-v1-5](https://github.com/runwayml/stable-diffusion) +3. [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1) +4. [instruction-tuning-sd](https://huggingface.co/instruction-tuning-sd) + * [scratch-low-level-img-proc](https://huggingface.co/instruction-tuning-sd/scratch-low-level-img-proc) + * [scratch-cartoonizer](https://huggingface.co/instruction-tuning-sd/scratch-cartoonizer) + * [cartoonizer](https://huggingface.co/instruction-tuning-sd/cartoonizer) + * [low-level-img-proc](https://huggingface.co/instruction-tuning-sd/low-level-img-proc) + +The inference and accuracy of the above pretrained models are verified in the default configs. + +# Prerequisite + +## Prepare Python Environment +Create a python environment, optionally with autoconf for jemalloc support. +```shell +conda create -n python=3.10 [autoconf] +conda activate +``` +>**Note**: Make sure pip <=23.2.2 + +Check that `gcc` version is higher than 9.0. +```shell +gcc -v +``` + +Install Intel® Extension for Transformers, please refer to [installation](/docs/installation.md). +```shell +# Install from pypi +pip install intel-extension-for-transformers + +# Or, install from source code +cd +pip install -r requirements.txt +pip install -v . +``` + +Install required dependencies for this example +```shell +cd /examples/huggingface/pytorch/text-to-image/deployment/stable_diffusion + +pip install -r requirements.txt +pip install transformers==4.34.1 +pip install diffusers==0.12.1 +``` +>**Note**: Please use transformers no higher than 4.34.1 + +## Environment Variables (Optional) +```shell +# Preload libjemalloc.so may improve the performance when inference under multi instance. +conda install jemalloc==5.2.1 -c conda-forge -y +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libjemalloc.so + +# Using weight sharing can save memory and may improve the performance when multi instances. +export WEIGHT_SHARING=1 +export INST_NUM= +``` +>**Note**: This step is optional. +# End-to-End Workflow +## 1. Prepare Models + +The stable diffusion mainly includes three sub models: +1. Text Encoder +2. Unet +3. Vae Decoder. + +Here we take the [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) as an example. + +### 1.1 Download Models +Export FP32 ONNX models from the hugginface diffusers module, command as follows: + +```python +python prepare_model.py --input_model=CompVis/stable-diffusion-v1-4 --output_path=./model +``` + +By setting --bf16 to export FP32 and BF16 models. +```python +python prepare_model.py --input_model=CompVis/stable-diffusion-v1-4 --output_path=./model --bf16 +``` + +For INT8 quantized mode, we **only support runwayml/stable-diffusion-v1-5** for now. +You need to get a quantized INT8 model first through QAT, Please refer the [link](https://github.com/intel/intel-extension-for-transformers/blob/main/examples/huggingface/pytorch/text-to-image/quantization/qat/README.md). +Then by setting --qat_int8 to export INT8 models, to export INT8 model. +```python +python prepare_model.py --input_model=runwayml/stable-diffusion-v1-5 --output_path=./model --qat_int8 +``` + +### 1.2 Compile Models + +Export three FP32 onnx sub models of the stable diffusion to Nerual Engine IR. + +```bash +# running the follow bash command to get all IR. +bash export_model.sh --input_model=model --precision=fp32 +``` + +Export three BF16 onnx sub models of the stable diffusion to Nerual Engine IR. + +```bash +# running the follow bash command to get all IR. +bash export_model.sh --input_model=model --precision=bf16 +``` + +Export mixed FP32 & dynamic quantized Int8 IR. + +```bash +bash export_model.sh --input_model=model --precision=fp32 --cast_type=dynamic_int8 +``` + +Export mixed BF16 & QAT quantized Int8 IR. +```bash +bash export_model.sh --input_model=model --precision=qat_int8 +``` + +## 2. Performance + +Python API command as follows: +```python +# FP32 IR +python run_executor.py --ir_path=./fp32_ir --mode=latency --input_model=CompVis/stable-diffusion-v1-4 + +# Mixed FP32 & dynamic quantized Int8 IR. +python run_executor.py --ir_path=./fp32_dynamic_int8_ir --mode=latency --input_model=CompVis/stable-diffusion-v1-4 + +# BF16 IR +python run_executor.py --ir_path=./bf16_ir --mode=latency --input_model=CompVis/stable-diffusion-v1-4 + +# QAT INT8 IR +python run_executor.py --ir_path=./qat_int8_ir --mode=latency --input_model=runwayml/stable-diffusion-v1-5 +``` + +## 3. Accuracy +Frechet Inception Distance(FID) metric is used to evaluate the accuracy. This case we check the FID scores between the pytorch image and engine image. + +By setting --accuracy to check FID socre. +Python API command as follows: +```python +# FP32 IR +python run_executor.py --ir_path=./fp32_ir --mode=accuracy --input_model=CompVis/stable-diffusion-v1-4 + +# Mixed FP32 & dynamic quantized Int8 IR +python run_executor.py --ir_path=./fp32_dynamic_int8_ir --mode=accuracy --input_model=CompVis/stable-diffusion-v1-4 + +# BF16 IR +python run_executor.py --ir_path=./bf16_ir --mode=accuracy --input_model=CompVis/stable-diffusion-v1-4 + +# QAT INT8 IR +python run_executor.py --ir_path=./qat_int8_ir --mode=accuracy --input_model=runwayml/stable-diffusion-v1-5 +``` + +## 4. Try Text to Image + +### 4.1 Text2Img + +Try using one sentence to create a picture! + +```python +# Running FP32 models or BF16 models, just import different IR. +# FP32 models +# Note: +# 1. Using --image to set the path of your image, here we use the default download link. +# 2. The default image is "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png". +# 3. The default prompt is "Cartoonize the following image". + +python run_executor.py --ir_path=./fp32_ir --input_model=CompVis/stable-diffusion-v1-4 +``` +![picture1](./images/astronaut_rides_horse.png) + +```python +# BF16 models +python run_executor.py --ir_path=./bf16_ir --input_model=CompVis/stable-diffusion-v1-4 +``` +![picture2](./images/astronaut_rides_horse_from_engine_1.png) + + +### 4.2 Img2Img: instruction-tuning-sd + +Try using one image and prompts to create a new picture! + +```python +# Running FP32 models or BF16 models, just import different IR. +# BF16 models +python run_executor.py --ir_path=./bf16_ir --input_model=instruction-tuning-sd/cartoonizer --pipeline=instruction-tuning-sd --prompts="Cartoonize the following image" --steps=100 +``` + +Original image: + +![picture3](./images/mountain.png) + +Cartoonized image: + +![picture4](./images/mountain_cartoonized.png) + + +## 5. Validated Result + + +### 5.1 Latency (s) + + +Input: a photo of an astronaut riding a horse on mars + +Batch Size: 1 + + +| Model | FP32 | BF16 | +|---------------------|:----------------------:|-----------------------| +| [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) | 10.33 (s) | 3.02 (s) | + +> Note: Performance results test on ​​06/09/2023 with Intel(R) Xeon(R) Platinum 8480+. +Performance varies by use, configuration and other factors. See platform configuration for configuration details. For more complete information about performance and benchmark results, visit www.intel.com/benchmarks + + + +### 5.2 Platform Configuration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ManufacturerQuanta Cloud Technology Inc
Product NameQuantaGrid D54Q-2U
OSCentOS Stream 8
Kernel5.16.0-rc1-intel-next-00543-g5867b0a2a125
Microcode0x2b000111
IRQ BalanceEnabled
CPU ModelIntel(R) Xeon(R) Platinum 8480+
Base Frequency2.0GHz
Maximum Frequency3.8GHz
CPU(s)224
Thread(s) per Core2
Core(s) per Socket56
Socket(s)2
NUMA Node(s)2
TurboEnabled
FrequencyGovernerPerformance
diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/diffusion_utils.py b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/diffusion_utils.py new file mode 100644 index 00000000000..f7e7b0ddd6d --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/diffusion_utils.py @@ -0,0 +1,696 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union +import torch +import numpy as np +import copy + +from packaging import version +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + +from intel_extension_for_transformers.transformers.runtime.compile.graph import Graph + +from diffusers.configuration_utils import FrozenDict +from diffusers.models import AutoencoderKL, UNet2DConditionModel +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import deprecate, is_accelerate_available, logging, randn_tensor, replace_example_docstring +from diffusers.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionPipeline + + >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +class StableDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file") + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file") + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 .") + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file") + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2**(len(self.vae.config.block_out_channels) - 1) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. + + When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several + steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + """ + if is_accelerate_available(): + from accelerate import cpu_offload + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + device = torch.device(f"cuda:{gpu_id}") + + for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: + cpu_offload(cpu_offloaded_model, device) + + if self.safety_checker is not None: + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) + + @property + def _execution_device(self): + r""" + Returns the device on which the pipeline's models will be executed. After calling + `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module + hooks. + """ + if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): + return self.device + for module in self.unet.modules(): + if (hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None): + return torch.device(module._hf_hook.execution_device) + return self.device + + def _encode_prompt( + self, + text_encoder_graph, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_ prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}") + + if hasattr(self.text_encoder.config, + "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + # The ITREX Text Encdoer Code + prompt_embeds = text_encoder_graph.inference([text_input_ids]) + bsz, seq_length = text_input_ids.shape + encoder_hidden_state = prompt_embeds['last_hidden_state:0'] + if encoder_hidden_state.dtype == np.int16: + prompt_embeds['last_hidden_state:0'] = bf16_to_fp32(encoder_hidden_state) + prompt_embeds = torch.from_numpy(prompt_embeds['last_hidden_state:0']).reshape( + bsz, seq_length, -1) + + # Original Pytorch Diffuser Text Encoder Code + # prompt_embeds = self.text_encoder( + # text_input_ids.to(device), + # attention_mask=attention_mask, + # ) + # prompt_embeds = prompt_embeds[0] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}.") + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`.") + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, + "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + # The ITREX Text Encdoer Code + negative_prompt_embeds = text_encoder_graph.inference([uncond_input.input_ids]) + bsz, seq_length = uncond_input.input_ids.shape + encoder_hidden_state = negative_prompt_embeds['last_hidden_state:0'] + if encoder_hidden_state.dtype == np.int16: + negative_prompt_embeds['last_hidden_state:0'] = bf16_to_fp32(encoder_hidden_state) + negative_prompt_embeds = torch.from_numpy(negative_prompt_embeds['last_hidden_state:0']).reshape( + bsz, seq_length, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, + -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), + return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) + else: + has_nsfw_concept = None + return image, has_nsfw_concept + + def decode_latents(self, latents, vae_decoder_graph): + latents = 1 / 0.18215 * latents + # Original Pytorch Diffuser Vae Code + # image = self.vae.decode(latents).sample + + # The ITREX Vae Codes + output = vae_decoder_graph.inference([latents]) + image = torch.from_numpy(output['sample:0']) + + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or (callback_steps is not None and + (not isinstance(callback_steps, int) or callback_steps <= 0)): + raise ValueError(f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two.") + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two.") + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}.") + + def prepare_latents(self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, + width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators.") + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + engine_graph: Optional[list] = [], + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, + negative_prompt_embeds) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + engine_graph[0], + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + # 5. Prepare latent variables + num_channels_latents = self.unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # Original Pytorch Diffuser Unet Code: predict the noise residual + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample + + # The ITREX Unet Code + t_1d = torch.tensor([t], dtype=torch.float32) + engine_output = engine_graph[1].inference([latent_model_input, t_1d, prompt_embeds]) + noise_pred = torch.from_numpy(engine_output['out_sample:0']) + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and + (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 8. Post-processing + image = self.decode_latents(latents, engine_graph[2]) + + # 9. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + +def neural_engine_init(ir_path): + text_encoder_graph = Graph() + text_encoder_path = ir_path + '/text_encoder/' + text_encoder_conf = text_encoder_path + 'conf.yaml' + text_encoder_bin = text_encoder_path + 'model.bin' + text_encoder_graph.graph_init(text_encoder_conf, text_encoder_bin) + + unet_graph = Graph() + uent_path = ir_path + '/unet/' + unet_conf = uent_path + 'conf.yaml' + unet_bin = uent_path + 'model.bin' + unet_graph.graph_init(unet_conf, unet_bin, True) + + vae_decoder_graph = Graph() + vae_decoder_path = ir_path + '/vae_decoder/' + vae_decoder_conf = vae_decoder_path + 'conf.yaml' + vae_decoder_bin = vae_decoder_path + 'model.bin' + vae_decoder_graph.graph_init(vae_decoder_conf, vae_decoder_bin) + + return [text_encoder_graph, unet_graph, vae_decoder_graph] + +def fp32_to_bf16(fp32_np): + assert(fp32_np.dtype==np.float32) + temp = copy.deepcopy(fp32_np) + int32_np = temp.view(dtype=np.int32) + int32_np = int32_np >> 16 + bf16_np = int32_np.astype(np.uint16) + return bf16_np + +def bf16_to_fp32(bf16_np): + assert(bf16_np.dtype==np.int16) + temp = copy.deepcopy(bf16_np) + int32_np = temp.astype(dtype=np.int32) + int32_np = int32_np << 16 + fp32_np = int32_np.view(np.float32) + return fp32_np diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/diffusion_utils_img2img.py b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/diffusion_utils_img2img.py new file mode 100644 index 00000000000..e464b07acef --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/diffusion_utils_img2img.py @@ -0,0 +1,755 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Pipeline Modificaiton based from the diffusers 0.12.1 StableDiffusionImg2ImgPipeline""" + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +import copy +import PIL +from packaging import version +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + +from diffusers.configuration_utils import FrozenDict +from diffusers.models import AutoencoderKL, UNet2DConditionModel +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + PIL_INTERPOLATION, + deprecate, + is_accelerate_available, + logging, + randn_tensor, + replace_example_docstring, +) +from diffusers.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionImg2ImgPipeline + + >>> device = "cuda" + >>> model_id_or_path = "runwayml/stable-diffusion-v1-5" + >>> pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + >>> response = requests.get(url) + >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_image = init_image.resize((768, 512)) + + >>> prompt = "A fantasy landscape, trending on artstation" + + >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images + >>> images[0].save("fantasy_landscape.png") + ``` +""" + + +def preprocess(image): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionImg2ImgPipeline(DiffusionPipeline): + r""" + Pipeline for text-guided image to image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.__init__ + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + """ + if is_accelerate_available(): + from accelerate import cpu_offload + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + device = torch.device(f"cuda:{gpu_id}") + + for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: + cpu_offload(cpu_offloaded_model, device) + + if self.safety_checker is not None: + cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device + def _execution_device(self): + r""" + Returns the device on which the pipeline's models will be executed. After calling + `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module + hooks. + """ + if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): + return self.device + for module in self.unet.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + return self.device + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + text_encoder_graph, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_ prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + # The ITREX Text Encdoer Code + prompt_embeds = text_encoder_graph.inference([text_input_ids]) + bsz, seq_length = text_input_ids.shape + encoder_hidden_state = prompt_embeds['last_hidden_state:0'] + if encoder_hidden_state.dtype == np.int16: + prompt_embeds['last_hidden_state:0'] = bf16_to_fp32(encoder_hidden_state) + prompt_embeds = torch.from_numpy(prompt_embeds['last_hidden_state:0']).reshape( + bsz, seq_length, -1) + + # Original Pytorch Diffuser Text Encoder Code + # prompt_embeds = self.text_encoder( + # text_input_ids.to(device), + # attention_mask=attention_mask, + # ) + # prompt_embeds = prompt_embeds[0] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + # The ITREX Text Encdoer Code + negative_prompt_embeds = text_encoder_graph.inference([uncond_input.input_ids]) + bsz, seq_length = uncond_input.input_ids.shape + encoder_hidden_state = negative_prompt_embeds['last_hidden_state:0'] + if encoder_hidden_state.dtype == np.int16: + negative_prompt_embeds['last_hidden_state:0'] = bf16_to_fp32(encoder_hidden_state) + negative_prompt_embeds = torch.from_numpy(negative_prompt_embeds['last_hidden_state:0']).reshape( + bsz, seq_length, -1) + + # Original Pytorch Diffuser Text Encoder Code + # negative_prompt_embeds = self.text_encoder( + # uncond_input.input_ids.to(device), + # attention_mask=attention_mask, + # ) + # negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + else: + has_nsfw_concept = None + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents, vae_decoder_graph): + latents = 1 / 0.18215 * latents + # Original Pytorch Diffuser Vae Code + # image = self.vae.decode(latents).sample + + # The ITREX Vae Codes + latents = latents.contiguous() + output = vae_decoder_graph.inference([latents]) + image = torch.from_numpy(output['sample:0']) + + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None + ): + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = 0.18215 * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + engine_graph: Optional[list] = [], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter will be modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + message = "Please use `image` instead of `init_image`." + init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + image = init_image or image + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + engine_graph[0], + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Preprocess image + image = preprocess(image) + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents = self.prepare_latents( + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # Original Pytorch Diffuser Unet Code: predict the noise residual + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample + + # The ITREX Unet Code + latent_model_input = latent_model_input.contiguous() + prompt_embeds = prompt_embeds.contiguous() + t_1d = torch.tensor([t], dtype=torch.float32).contiguous() + engine_output = engine_graph[1].inference([latent_model_input, t_1d, prompt_embeds]) + noise_pred = torch.from_numpy(engine_output['out_sample:0']) + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 9. Post-processing + image = self.decode_latents(latents, engine_graph[2]) + + # 10. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 11. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + +def fp32_to_bf16(fp32_np): + assert(fp32_np.dtype==np.float32) + temp = copy.deepcopy(fp32_np) + int32_np = temp.view(dtype=np.int32) + int32_np = int32_np >> 16 + bf16_np = int32_np.astype(np.uint16) + return bf16_np + +def bf16_to_fp32(bf16_np): + assert(bf16_np.dtype==np.int16) + temp = copy.deepcopy(bf16_np) + int32_np = temp.astype(dtype=np.int32) + int32_np = int32_np << 16 + fp32_np = int32_np.view(np.float32) + return fp32_np diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/export_ir.py b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/export_ir.py new file mode 100644 index 00000000000..35cf29d247d --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/export_ir.py @@ -0,0 +1,311 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +from intel_extension_for_transformers.transformers.runtime.compile import compile, autocast + +text_encoder_pattern_config = { + 'pattern_switch': { + # General Pattern + 'PaddingSequence': False, + 'AttentionReshape': False, + 'QKVReshape': False, + 'ReshapeFusion': False, + 'InsertBF16Node': False, + 'OperatorAdaptor': False, + + # transpose_int8 + 'QKVMerge': False, + + # 'TextEncoder + 'TextEncoder_WordEmbedding': True, + 'TextEncoder_QReshape': True, + 'TextEncoder_KVReshape': True, + 'TextEncoder_AttentionMaskAddReshape': True, + 'TextEncoder_SoftmaxReshape': True, + 'TextEncoder_MulReshape': True, + 'TextEncoder_AttentionReshape': True, + 'TextEncoder_CasualAttentionMask': True, + + # for unet and vae decoder + 'GroupNorm': False, + + # vae decoder & Transformer2Dmodel + 'AttentionBlock_Resize2Gather': False, + 'AttentionBlock_QKVPreReshape': False, + 'AttentionBlock_AttentionMaskAddReshape': False, + 'AttentionBlock_ConstantOfShapeWithMul': False, + + 'Transformer2Dmodel_GetSampleBatch': False, + 'Transformer2Dmodel_SampleSlice': False, + 'Transformer2Dmodel_EncoderHiddenStatesReshape': False, + 'Transformer2Dmodel_ConstantOfShapeWithMul': False, + 'Transformer2Dmodel_QKVPreReshape': False, + 'Transformer2Dmodel_QKVReshape': False, + 'AttentionBlock_QKVReshape': False, + 'Transformer2Dmodel_QKVReshapeTo4D': False, + 'Transformer2Dmodel_AttentionMaskAddReshape': False, + 'Transformer2Dmodel_FFNInputSlice': False, + 'Transformer2Dmodel_FFNInputSlice_1': False, + 'Transformer2DModel_UpBlockResize': False, + + # for all stable diffusion models + 'StableDiffusion_bf16Convert': True, + 'StableDiffusion_ReshapeFusion': True, + + # MHA + 'TorchInsertBF16Node': False, + 'StableDiffusion_MHAReshape': True, + 'StableDiffusion_MHA': False, + 'ExplicitNHWCTransposeForConv': True, + 'ExplicitNHWCTransposeForConvQAT': False, + 'MultiHeadAttention': False, + + # Channel_last + 'ConvReshape': False + } +} + +unet_pattern_config = { + 'pattern_switch': { + # General Pattern + 'PaddingSequence': False, + 'AttentionReshape': False, + 'QKVReshape': False, + 'ReshapeFusion': False, + 'InsertBF16Node': False, + 'OperatorAdaptor': False, + + # transpose_int8 + 'QKVMerge': False, + + # 'TextEncoder + 'TextEncoder_WordEmbedding': False, + 'TextEncoder_QReshape': False, + 'TextEncoder_KVReshape': False, + 'TextEncoder_AttentionMaskAddReshape': False, + 'TextEncoder_SoftmaxReshape': False, + 'TextEncoder_MulReshape': False, + 'TextEncoder_AttentionReshape': False, + 'TextEncoder_CasualAttentionMask': False, + + # for unet and vae decoder + 'GroupNorm': True, + + # vae decoder & Transformer2Dmodel + 'AttentionBlock_Resize2Gather': True, + 'AttentionBlock_QKVPreReshape': True, + 'AttentionBlock_AttentionMaskAddReshape': True, + 'AttentionBlock_ConstantOfShapeWithMul': True, + + 'Transformer2Dmodel_GetSampleBatch': True, + 'Transformer2Dmodel_SampleSlice': True, + 'Transformer2Dmodel_EncoderHiddenStatesReshape': True, + 'Transformer2Dmodel_ConstantOfShapeWithMul': True, + 'Transformer2Dmodel_QKVPreReshape': True, + 'Transformer2Dmodel_QKVReshape': True, + 'AttentionBlock_QKVReshape': False, + 'Transformer2Dmodel_QKVReshapeTo4D': True, + 'Transformer2Dmodel_AttentionMaskAddReshape': True, + 'Transformer2Dmodel_FFNInputSlice': True, + 'Transformer2Dmodel_FFNInputSlice_1': True, + 'Transformer2DModel_UpBlockResize': True, + + # for all stable diffusion models + 'StableDiffusion_bf16Convert': True, + 'StableDiffusion_ReshapeFusion': True, + + # MHA + 'TorchInsertBF16Node': False, + 'StableDiffusion_MHAReshape': True, + 'StableDiffusion_MHA': False, + 'ExplicitNHWCTransposeForConv': True, + 'ExplicitNHWCTransposeForConvQAT': False, + 'MultiHeadAttention': False, + + # Channel_last + 'ConvReshape': False + } +} + +qat_unet_pattern_config = { + 'pattern_switch': { + # General Pattern + 'PaddingSequence': False, + 'AttentionReshape': False, + 'QKVReshape': False, + 'ReshapeFusion': False, + 'InsertBF16Node': False, + 'OperatorAdaptor': False, + + # transpose_int8 + 'QKVMerge': False, + + # 'TextEncoder + 'TextEncoder_WordEmbedding': False, + 'TextEncoder_QReshape': False, + 'TextEncoder_KVReshape': False, + 'TextEncoder_AttentionMaskAddReshape': False, + 'TextEncoder_SoftmaxReshape': False, + 'TextEncoder_MulReshape': False, + 'TextEncoder_AttentionReshape': False, + 'TextEncoder_CasualAttentionMask': False, + + # for unet and vae decoder + 'GroupNorm': True, + + # vae decoder & Transformer2Dmodel + 'AttentionBlock_Resize2Gather': True, + 'AttentionBlock_QKVPreReshape': True, + 'AttentionBlock_AttentionMaskAddReshape': True, + 'AttentionBlock_ConstantOfShapeWithMul': True, + + 'Transformer2Dmodel_GetSampleBatch': True, + 'Transformer2Dmodel_SampleSlice': True, + 'Transformer2Dmodel_EncoderHiddenStatesReshape': True, + 'Transformer2Dmodel_ConstantOfShapeWithMul': True, + 'Transformer2Dmodel_QKVPreReshape': True, + 'Transformer2Dmodel_QKVReshape': True, + 'AttentionBlock_QKVReshape': False, + 'Transformer2Dmodel_QKVReshapeTo4D': True, + 'Transformer2Dmodel_AttentionMaskAddReshape': True, + 'Transformer2Dmodel_FFNInputSlice': True, + 'Transformer2Dmodel_FFNInputSlice_1': True, + 'Transformer2DModel_UpBlockResize': True, + + # for all stable diffusion models + 'StableDiffusion_bf16Convert': True, + 'StableDiffusion_ReshapeFusion': True, + + # MHA + 'TorchInsertBF16Node': False, + 'StableDiffusion_MHAReshape': True, + 'StableDiffusion_MHA': True, + 'ExplicitNHWCTransposeForConv': False, + 'ExplicitNHWCTransposeForConvQAT': True, + 'MultiHeadAttention': False, + + # QAT for the stable diffusion + 'StableDiffusion_InsertQuantNode': True, + 'StableDiffusion_CollectQuantInfo': True, + 'CollectQuantInfo': False, + 'InsertQuantNode': False, + 'QuantizeFusion': False, + 'StableDiffusion_QuantizeFusion': True, + + # Channel_last + 'ConvReshape': False + } +} + +vae_decoder_pattern_config = { + 'pattern_switch': { + # General Pattern + 'PaddingSequence': False, + 'AttentionReshape': False, + 'QKVReshape': False, + 'ReshapeFusion': False, + 'InsertBF16Node': False, + 'OperatorAdaptor': False, + + # transpose_int8 + 'QKVMerge': False, + + # 'TextEncoder + 'TextEncoder_WordEmbedding': False, + 'TextEncoder_QReshape': False, + 'TextEncoder_KVReshape': False, + 'TextEncoder_AttentionMaskAddReshape': False, + 'TextEncoder_SoftmaxReshape': False, + 'TextEncoder_MulReshape': False, + 'TextEncoder_AttentionReshape': False, + 'TextEncoder_CasualAttentionMask': False, + + # for unet and vae decoder + 'GroupNorm': True, + + # vae decoder & Transformer2Dmodel + 'AttentionBlock_Resize2Gather': True, + 'AttentionBlock_QKVPreReshape': True, + 'AttentionBlock_AttentionMaskAddReshape': True, + 'AttentionBlock_ConstantOfShapeWithMul': True, + + 'Transformer2Dmodel_GetSampleBatch': True, + 'Transformer2Dmodel_SampleSlice': True, + 'Transformer2Dmodel_EncoderHiddenStatesReshape': True, + 'Transformer2Dmodel_ConstantOfShapeWithMul': True, + 'Transformer2Dmodel_QKVPreReshape': True, + 'Transformer2Dmodel_QKVReshape': True, + 'AttentionBlock_QKVReshape': True, + 'Transformer2Dmodel_QKVReshapeTo4D': False, + 'Transformer2Dmodel_AttentionMaskAddReshape': True, + 'Transformer2Dmodel_FFNInputSlice': True, + 'Transformer2Dmodel_FFNInputSlice_1': True, + 'Transformer2DModel_UpBlockResize': True, + + # for all stable diffusion models + 'StableDiffusion_bf16Convert': True, + 'StableDiffusion_ReshapeFusion': True, + + # MHA + 'TorchInsertBF16Node': False, + 'StableDiffusion_MHAReshape': True, + 'StableDiffusion_MHA': False, + 'ExplicitNHWCTransposeForConv': True, + 'ExplicitNHWCTransposeForConvQAT': False, + 'MultiHeadAttention': False, + + # Channel_last + 'ConvReshape': False + } +} + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--onnx_model", default="./model", + type=str, help="onnx model path.") + parser.add_argument("--pattern_config", default="./", + type=str, help="pattern graph path.") + parser.add_argument("--output_path", default="./ir", + type=str, help="pattern graph path.") + parser.add_argument("--dtype", default="fp32", type=str) + args = parser.parse_args() + + if args.pattern_config == 'text_encoder': + args.pattern_config = text_encoder_pattern_config + if args.pattern_config == 'unet': + args.pattern_config = unet_pattern_config + if args.pattern_config == 'vae_decoder': + args.pattern_config = vae_decoder_pattern_config + + if args.dtype == "bf16": + args.pattern_config['pattern_switch']['StableDiffusion_MHA'] = True + with autocast(args.dtype): + graph = compile(args.onnx_model, args.pattern_config) + graph.save(args.output_path) + elif args.dtype == "dynamic_int8": + with autocast(args.dtype): + graph = compile(args.onnx_model, args.pattern_config) + graph.save(args.output_path) + elif args.dtype == "qat_int8": + args.pattern_config = qat_unet_pattern_config + with autocast(args.dtype): + graph = compile(args.onnx_model, args.pattern_config) + graph.save(args.output_path) + else: + graph = compile(args.onnx_model, args.pattern_config) + graph.save(args.output_path) diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/export_model.sh b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/export_model.sh new file mode 100644 index 00000000000..d6b74e204d9 --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/export_model.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --precision=*) + precision=$(echo $var |cut -f2 -d=) + ;; + --cast_type=*) + cast_type=$(echo $var |cut -f2 -d=) + ;; + esac + done + +if [[ ${cast_type} == 'dynamic_int8' ]]; then + echo "[INFO] cast_type is dynamic int8 and model will be dynamic quantized based on $precision" +# 1. text encoder +echo "[INFO] Text encoder ir will be $precision ..." +echo "[INFO] Start to export text encoder ir..." +python export_ir.py --onnx_model=${input_model}/text_encoder_fp32/model.onnx --pattern_config=text_encoder --output_path=./${precision}_${cast_type}_ir/text_encoder/ --dtype=${precision} + +# 2. unet +echo "[INFO] Start to export unet ir..." +python export_ir.py --onnx_model=${input_model}/unet_fp32/model.onnx --pattern_config=unet --output_path=./${precision}_${cast_type}_ir/unet/ --dtype=${cast_type} + +# 3. vae_decoder +echo "[INFO] start to export vae_decoder ir..." +python export_ir.py --onnx_model=${input_model}/vae_decoder_fp32/model.onnx --pattern_config=vae_decoder --output_path=./${precision}_${cast_type}_ir/vae_decoder/ --dtype=${cast_type} +exit +fi + +if [[ ${precision} == 'qat_int8' ]]; then +cast_type=qat_int8 +echo "[INFO] cast_type is qat int8" +# 1. text encoder +echo "[INFO] Start to export text encoder bf16 ir..." +python export_ir.py --onnx_model=${input_model}/text_encoder_bf16/model.onnx --pattern_config=text_encoder --output_path=./${cast_type}_ir/text_encoder/ --dtype=bf16 + +# 2. unet +echo "[INFO] Start to export unet qat int8 ir..." +python export_ir.py --onnx_model=${input_model}/unet_${cast_type}/model.onnx --pattern_config=unet --output_path=./${cast_type}_ir/unet/ --dtype=${cast_type} + +# 3. vae_decoder +echo "[INFO] start to export vae_decoder bf16 ir..." +python export_ir.py --onnx_model=${input_model}/vae_decoder_bf16/model.onnx --pattern_config=vae_decoder --output_path=./${cast_type}_ir/vae_decoder/ --dtype=bf16 +exit +fi + +# 1. text encoder +echo "[INFO] Start to export text encoder ir..." +python export_ir.py --onnx_model=${input_model}/text_encoder_${precision}/model.onnx --pattern_config=text_encoder --output_path=./${precision}_ir/text_encoder/ --dtype=${precision} + +# 2. unet +echo "[INFO] Start to export unet ir..." +python export_ir.py --onnx_model=${input_model}/unet_${precision}/model.onnx --pattern_config=unet --output_path=./${precision}_ir/unet/ --dtype=${precision} + +# 3. vae_decoder +echo "[INFO] start to export vae_decoder ir..." +python export_ir.py --onnx_model=${input_model}/vae_decoder_${precision}/model.onnx --pattern_config=vae_decoder --output_path=./${precision}_ir/vae_decoder/ --dtype=${precision} diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/astronaut_rides_horse.png b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/astronaut_rides_horse.png new file mode 100644 index 00000000000..0374ac3d663 Binary files /dev/null and b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/astronaut_rides_horse.png differ diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/astronaut_rides_horse_from_engine_1.png b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/astronaut_rides_horse_from_engine_1.png new file mode 100644 index 00000000000..dd5cbb6a0b5 Binary files /dev/null and b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/astronaut_rides_horse_from_engine_1.png differ diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/mountain.png b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/mountain.png new file mode 100644 index 00000000000..680f0033ef5 Binary files /dev/null and b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/mountain.png differ diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/mountain_cartoonized.png b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/mountain_cartoonized.png new file mode 100644 index 00000000000..f653addaf3e Binary files /dev/null and b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/images/mountain_cartoonized.png differ diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/prepare_model.py b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/prepare_model.py new file mode 100644 index 00000000000..5977c186e01 --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/prepare_model.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import shutil +from pathlib import Path + +import torch +import onnx +from diffusers import StableDiffusionPipeline +import shlex + + +@torch.no_grad() +def _export_bf16_onnx_model(fp32_model_path, bf16_model_path): + model = onnx.load(fp32_model_path) + bf16_type_list = ['MatMul', 'Gemm', 'Conv'] + bf16_tensor_name_list = [] + for node in model.graph.node: + if node.op_type in bf16_type_list: + for inp in node.input: + bf16_tensor_name_list.append(inp) + import numpy as np + from onnx import TensorProto, numpy_helper + for tensor in model.graph.initializer: + if tensor.name in bf16_tensor_name_list: + + def fp32_to_bf16(fp32_np): + assert (fp32_np.dtype == np.float32) + int32_np = fp32_np.view(dtype=np.int32) + int32_np = int32_np >> 16 + bf16_np = int32_np.astype(np.int16) + return bf16_np + + fp16_data = fp32_to_bf16(numpy_helper.to_array(tensor)) + tensor.raw_data = fp16_data.tobytes() + tensor.data_type = TensorProto.BFLOAT16 + onnx.save(model, bf16_model_path) + + +def prepare_model( + model_name: str, + output_path: Path, + opset: int, + expected_dtype: str, + fake_quant_model_qinit_path: str, + fake_quant_model_qinit_name: str +): + device = 'cpu' + dtype = torch.float32 + output_path = Path(output_path) + pipeline = StableDiffusionPipeline.from_pretrained(model_name, torch_dtype=dtype).to(device) + + # TEXT ENCODER + num_tokens = pipeline.text_encoder.config.max_position_embeddings + text_hidden_size = pipeline.text_encoder.config.hidden_size + text_input = pipeline.tokenizer( + "A sample prompt", + padding="max_length", + max_length=pipeline.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_encoder = output_path / "text_encoder_fp32" / "model.onnx" + text_encoder.parent.mkdir(parents=True, exist_ok=True) + + torch.onnx.export( + pipeline.text_encoder, + args=(text_input.input_ids.to(device=device, dtype=torch.int32)), + f=text_encoder.as_posix(), + input_names=["input_ids"], + output_names=["last_hidden_state", "pooler_output"], + dynamic_axes={ + "input_ids": { + 0: "batch", + 1: "sequence" + }, + }, + do_constant_folding=True, + opset_version=opset, + ) + + if expected_dtype == 'bf16' or expected_dtype == 'qat_int8': + text_encoder_bf16 = output_path / "text_encoder_bf16" / "model.onnx" + text_encoder_bf16_dir = output_path / "text_encoder_bf16" + if os.path.exists(text_encoder_bf16_dir): + shutil.rmtree(text_encoder_bf16_dir) + os.mkdir(shlex.quote(text_encoder_bf16_dir.as_posix())) + _export_bf16_onnx_model(text_encoder.as_posix(), text_encoder_bf16.as_posix()) + + del pipeline.text_encoder + + # UNET + if expected_dtype == 'qat_int8': + prepare_qat_model(model_name, output_path, fake_quant_model_qinit_path, fake_quant_model_qinit_name) + + unet_in_channels = pipeline.unet.config.in_channels + unet_sample_size = pipeline.unet.config.sample_size + unet_path = output_path / "unet_fp32" / "model.onnx" + unet_path.parent.mkdir(parents=True, exist_ok=True) + torch.onnx.export( + pipeline.unet, + args=( + torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, + dtype=dtype), + torch.randn(2).to(device=device, dtype=dtype), + torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype), + False, + ), + f=unet_path.as_posix(), + input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"], + output_names=["out_sample"], # has to be different from "sample" for correct tracing + dynamic_axes={ + "sample": { + 0: "batch", + 1: "channels", + 2: "height", + 3: "width" + }, + "timestep": { + 0: "batch" + }, + "encoder_hidden_states": { + 0: "batch", + 1: "sequence" + }, + }, + do_constant_folding=True, + opset_version=opset, + ) + + unet_model_path = str(unet_path.absolute().as_posix()) + + if expected_dtype == 'bf16' or expected_dtype == 'qat_int8': + unet_bf16_model_path = output_path / "unet_bf16" / "model.onnx" + unet_bf16_dir = output_path / "unet_bf16" + if os.path.exists(unet_bf16_dir): + shutil.rmtree(unet_bf16_dir) + os.mkdir(shlex.quote(unet_bf16_dir.as_posix())) + _export_bf16_onnx_model(unet_path.as_posix(), unet_bf16_model_path.as_posix()) + unet_bf16_model = onnx.load(unet_bf16_model_path) + + unet_dir = os.path.dirname(unet_model_path) + unet = onnx.load(unet_model_path) + # clean up existing tensor files + shutil.rmtree(unet_dir) + os.mkdir(shlex.quote(unet_dir)) + # collate external tensor files into one + onnx.save_model( + unet, + unet_model_path, + save_as_external_data=True, + all_tensors_to_one_file=True, + location="weights.pb", + convert_attribute=False, + ) + if expected_dtype == 'bf16' or expected_dtype == 'qat_int8': + unet_bf16_model_path = str(unet_bf16_model_path.absolute().as_posix()) + onnx.save_model( + unet_bf16_model, + unet_bf16_model_path, + save_as_external_data=True, + all_tensors_to_one_file=True, + location="weights.pb", + ) + del pipeline.unet + + # VAE DECODER + vae_decoder = pipeline.vae + vae_latent_channels = vae_decoder.config.latent_channels + # forward only through the decoder part + vae_decoder.forward = pipeline.vae.decode + + vae_decoder_path = output_path / "vae_decoder_fp32" / "model.onnx" + vae_decoder_path.parent.mkdir(parents=True, exist_ok=True) + + torch.onnx.export( + vae_decoder, + args=( + torch.randn(1, vae_latent_channels, unet_sample_size, unet_sample_size).to(device=device, + dtype=dtype), + False, + ), + f=vae_decoder_path.as_posix(), + input_names=["latent_sample", "return_dict"], + output_names=["sample"], + dynamic_axes={ + "latent_sample": { + 0: "batch", + 1: "channels", + 2: "height", + 3: "width" + }, + }, + do_constant_folding=True, + opset_version=opset, + ) + + if expected_dtype == 'bf16' or expected_dtype == 'qat_int8': + vae_decoder_bf16_model = output_path / "vae_decoder_bf16" / "model.onnx" + vae_decoder_bf16_dir = output_path / "vae_decoder_bf16" + if os.path.exists(vae_decoder_bf16_dir): + shutil.rmtree(vae_decoder_bf16_dir) + os.mkdir(shlex.quote(vae_decoder_bf16_dir.as_posix())) + _export_bf16_onnx_model(vae_decoder_path.as_posix(), vae_decoder_bf16_model.as_posix()) + del pipeline.vae + + +def prepare_qat_model( + model_name: str, + output_path: Path, + fake_quant_model_qinit_path: str = "./", + fake_quant_model_qinit_name: str = "fake_quant_model_qinit.pt" +): + device = 'cpu' + output_path = Path(output_path) + pipeline = StableDiffusionPipeline.from_pretrained(model_name).to(device) + unet = pipeline.unet + + from quantization_modules import find_and_replace, convert2quantized_model + find_and_replace(unet) + unet.load_state_dict(torch.load(os.path.join(fake_quant_model_qinit_path, fake_quant_model_qinit_name))) + unet = convert2quantized_model(unet) + unet.eval() + setattr(pipeline, "unet", unet) + + onnx_model_path = output_path / "unet_qat_int8" / "model.onnx" + os.makedirs(os.path.dirname(onnx_model_path), exist_ok=True) + if os.path.exists(os.path.dirname(onnx_model_path)): + def model_wrapper(model_fn): + # export doesn't support a dictionary output, so manually turn it into a tuple + # refer to https://discuss.tvm.apache.org/t/how-to-deal-with-prim-dictconstruct/11978 + def wrapper(*args, **kwargs): + output = model_fn(*args, **kwargs) + if isinstance(output, dict): + return tuple(v for v in output.values() if v is not None) + else: + return output + return wrapper + unet.forward = model_wrapper(unet.forward) + + torch.onnx.export( + unet, + args=( + torch.randn(2, 4, 64, 64).to(device=device,dtype=torch.float32), + torch.randn(2).to(device=device, dtype=torch.float32), + torch.randn(2, 77, 768).to(device=device, dtype=torch.float32), + ), + f=onnx_model_path, + input_names=["sample", "timestep", "encoder_hidden_states"],# "return_dict"], + output_names=["out_sample"], # has to be different from "sample" for correct tracing + dynamic_axes={ + "sample": { + 0: "batch", + 1: "channels", + 2: "height", + 3: "width" + }, + "timestep": { + 0: "batch" + }, + "encoder_hidden_states": { + 0: "batch", + 1: "sequence" + }, + }, + do_constant_folding=True, + opset_version=14, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_model", + type=str, + required=True, + help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", + ) + parser.add_argument('--pattern_config', + default="./pattern_config", + type=str, + help="The fusion pattern config path for the nerual engine.") + parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") + parser.add_argument( + "--opset", + default=14, + type=int, + help="The version of the ONNX operator set to use.", + ) + parser.add_argument("--bf16", action="store_true", help="Export the models in `bfloat16` mode") + parser.add_argument("--qat_int8", action="store_true", help="Export the models in `bfloat16` mode") + parser.add_argument( + "--fake_quant_model_qinit_path", + type=str, + default="./", + help="Path to the fake_quant_model_qinit", + ) + parser.add_argument( + "--fake_quant_model_qinit_name", + type=str, + default="fake_quant_model_qinit.pt", + help="Name of the fake_quant_model_qinit", + ) + + args = parser.parse_args() + + expected_dtype = 'fp32' + if args.bf16: + expected_dtype = 'bf16' + elif args.qat_int8: + expected_dtype = 'qat_int8' + + prepare_model( + args.input_model, + args.output_path, + args.opset, + expected_dtype, + args.fake_quant_model_qinit_path, + args.fake_quant_model_qinit_name + ) diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/quantization_modules.py b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/quantization_modules.py new file mode 100644 index 00000000000..c50b20b12b3 --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/quantization_modules.py @@ -0,0 +1,146 @@ +import torch +import torch.nn.functional as F +from torch.ao.quantization import FakeQuantize, FakeQuantizeBase, default_fake_quant, default_per_channel_weight_fake_quant, default_fused_act_fake_quant, default_fused_per_channel_wt_fake_quant +from torch.ao.nn.quantized import Linear, Conv2d, Quantize +try: + from peft.tuners.lora import Linear as LoRALinear + from peft.utils import transpose + peft_available = True +except: + peft_available = False + +ACT_REDUCE_RANGE = False +WEIGHT_REDUCE_RANGE = False + + +class QuantizedLinear(Linear): + def forward(self, x): + return super().forward( + self.input_quant(x) + ).dequantize() + + +class FakeQuantLinear(torch.nn.Linear): + def __init__(self, module: torch.nn.Linear): + self.__dict__.update(module.__dict__.copy()) + self.add_module('activation_pre_process', default_fake_quant(reduce_range=ACT_REDUCE_RANGE)) + self.add_module('weight_fake_quant', default_per_channel_weight_fake_quant(reduce_range=WEIGHT_REDUCE_RANGE)) + self.add_module('activation_post_process', default_fake_quant(reduce_range=ACT_REDUCE_RANGE)) + self.is_lora_layer = True if peft_available and isinstance(module, LoRALinear) else False + + def forward(self, x): + x = self.activation_pre_process(x) + weight = self.weight + if self.is_lora_layer and not self.disable_adapters and self.r[self.active_adapter] > 0: + lora_weight = transpose( + self.lora_B[self.active_adapter].weight @ self.lora_A[self.active_adapter].weight, + self.fan_in_fan_out, + ) * self.scaling[self.active_adapter] + weight = weight + lora_weight + x = F.linear(x, self.weight_fake_quant(weight), self.bias) + x = self.activation_post_process(x) + return x + + def convert(self): + if self.is_lora_layer and not self.disable_adapters and self.r[self.active_adapter] > 0: + lora_weight = transpose( + self.lora_B[self.active_adapter].weight @ self.lora_A[self.active_adapter].weight, + self.fan_in_fan_out, + ) * self.scaling[self.active_adapter] + self.weight.data += lora_weight.data + module = QuantizedLinear.from_float(self) + input_quant = torch.quantization.QuantStub() + input_quant.add_module('activation_post_process', self.activation_pre_process) + input_quant = Quantize.from_float(input_quant) + module.add_module('input_quant', input_quant) + return module + + +class QuantizedConv2d(Conv2d): + def forward(self, x): + return super().forward( + self.input_quant(x) + ).dequantize() + + +class FakeQuantConv2d(torch.nn.Conv2d): + def __init__(self, module: torch.nn.Conv2d): + self.__dict__.update(module.__dict__.copy()) + self.add_module('activation_pre_process', default_fake_quant(reduce_range=ACT_REDUCE_RANGE)) + self.add_module('weight_fake_quant', default_per_channel_weight_fake_quant(reduce_range=WEIGHT_REDUCE_RANGE)) + self.add_module('activation_post_process', default_fake_quant(reduce_range=ACT_REDUCE_RANGE)) + + def forward(self, x): + x = self.activation_pre_process(x) + x = self._conv_forward(x, self.weight_fake_quant(self.weight), self.bias) + x = self.activation_post_process(x) + return x + + def convert(self): + module = QuantizedConv2d.from_float(self) + input_quant = torch.quantization.QuantStub() + input_quant.add_module('activation_post_process', self.activation_pre_process) + input_quant = Quantize.from_float(input_quant) + module.add_module('input_quant', input_quant) + return module + + +def get_submodules(model, key): + parent = model.get_submodule(".".join(key.split(".")[:-1])) + target_name = key.split(".")[-1] + target = model.get_submodule(key) + return parent, target, target_name + +def find_and_replace(model, fake_quant=True): + assert isinstance(model, torch.nn.Module), "Only support torch Module." + key_list = [key for key, _ in model.named_modules()] + for key in key_list: + try: + parent, target, target_name = get_submodules(model, key) + except: + continue + if fake_quant: + if isinstance(target, torch.nn.Linear): + setattr(parent, target_name, FakeQuantLinear(target)) + elif isinstance(target, torch.nn.Conv2d): + setattr(parent, target_name, FakeQuantConv2d(target)) + else: + if isinstance(target, FakeQuantLinear): + setattr(parent, target_name, target.convert()) + elif isinstance(target, FakeQuantConv2d): + setattr(parent, target_name, target.convert()) + +def convert2quantized_model(model): + model.to(torch.device("cpu")) + find_and_replace(model, fake_quant=False) + return model + +def disable_all_observers(model): + assert isinstance(model, torch.nn.Module), "Only support torch Module." + for name, module in model.named_modules(): + if isinstance(module, FakeQuantizeBase): + module.disable_observer() + +def sync_all_observers(model): + assert isinstance(model, torch.nn.Module), "Only support torch Module." + for name, module in model.named_modules(): + if isinstance(module, FakeQuantize): + _scale, _zero_point = module.calculate_qparams() + _scale, _zero_point = _scale.to(module.scale.device), _zero_point.to(module.zero_point.device) + if module.scale.shape != _scale.shape: + module.scale.resize_(_scale.shape) + module.zero_point.resize_(_zero_point.shape) + module.scale.copy_(_scale) + module.zero_point.copy_(_zero_point) + +def load_int8_model(fp32_model, int8_model_path, fake_quantize_model=False): + find_and_replace(fp32_model) + if fake_quantize_model: + fp32_model.load_state_dict(torch.load(int8_model_path)) + disable_all_observers(fp32_model) + sync_all_observers(fp32_model) + int8_model = convert2quantized_model(fp32_model) + print('Converted to quantized model.') + if not fake_quantize_model: + int8_model.load_state_dict(torch.load(int8_model_path)) + return int8_model \ No newline at end of file diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/requirements.txt b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/requirements.txt new file mode 100644 index 00000000000..7295f511f4f --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/requirements.txt @@ -0,0 +1,12 @@ +neural-compressor +transformers +accelerate +datasets >= 1.8.0 +sentencepiece != 0.1.92 +protobuf +torch==2.3.0 +onnx>=1.12 +onnxruntime==1.13.1 +diffusers==0.12.1 +pytorch_fid +optimum \ No newline at end of file diff --git a/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/run_executor.py b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/run_executor.py new file mode 100644 index 00000000000..dbb7fd99e41 --- /dev/null +++ b/examples/huggingface/pytorch/image-classification/deployment/stable_diffusion/run_executor.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import diffusion_utils +import torch +import time +from pytorch_fid import fid_score +from diffusers import DPMSolverMultistepScheduler +import os + + +def benchmark(pipe, neural_engine_graph, generator, steps=20): + print('Benchmark start...') + warmup = 4 + total = 8 + total_time = 0 + with torch.no_grad(): + prompt = "a photo of an astronaut riding a horse on mars" + for i in range(total): + start2 = time.time() + pipe(prompt, + engine_graph=neural_engine_graph, + num_inference_steps=steps, + generator=generator).images[0] + end2 = time.time() + if i >= warmup: + total_time += end2 - start2 + print("Total inference latency: ", str(end2 - start2) + "s") + print("Average Latency: ", (total_time) / (total - warmup), "s") + print("Average Throughput: {:.5f} samples/sec".format( + (total - warmup) / (total_time))) + + +def accuracy(pipe, original_pipe, neural_engine_graph, generator): + with torch.no_grad(): + prompt = "a photo of an astronaut riding a horse on mars" + + save_time = time.strftime("_%H_%M_%S") + # Engine + engine_image = pipe(prompt, + engine_graph=neural_engine_graph, + generator=generator).images[0] + engine_image.save("astronaut_rides_horse_from_engine" + save_time + + '.png') + + engine_image_dir = "engine_image" + os.makedirs(engine_image_dir, exist_ok=True) + if os.path.isfile( + os.path.join(engine_image_dir, "astronaut_rides_horse.png")): + os.remove( + os.path.join(engine_image_dir, "astronaut_rides_horse.png")) + engine_image.save(engine_image_dir + "/astronaut_rides_horse.png") + + # Pytorch + pytorch_image = original_pipe(prompt, generator=generator).images[0] + pytorch_image.save("astronaut_rides_horse_from_pytorch" + save_time + + '.png') + + pytorch_image_dir = "pytorch_image" + os.makedirs(pytorch_image_dir, exist_ok=True) + if os.path.isfile( + os.path.join(pytorch_image_dir, "astronaut_rides_horse.png")): + os.remove( + os.path.join(pytorch_image_dir, "astronaut_rides_horse.png")) + pytorch_image.save(pytorch_image_dir + "/astronaut_rides_horse.png") + + fid = fid_score.calculate_fid_given_paths( + (pytorch_image_dir, engine_image_dir), 1, "cpu", 2048, 2) + print("Finally FID score Accuracy: {}".format(fid)) + return fid + + +def executor(pipe, neural_engine_graph, prompt, name, size, generator): + print('Executor start...') + for i in range(size): + save_time = time.strftime("_%H_%M_%S") + image = pipe(prompt, + engine_graph=neural_engine_graph, + generator=generator).images[0] + image.save(name + str(i) + save_time + '.png') + return + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_model", + default="runwayml/stable-diffusion-v1-5", + type=str, + help= + "Path to pretrained model or model identifier from huggingface.co/models." + ) + parser.add_argument( + "--prompt", + default="a photo of an astronaut riding a horse on mars", + type=str, + help= + "The input of the model, like: 'a photo of an astronaut riding a horse on mars'." + ) + parser.add_argument("--ir_path", + default="./ir", + type=str, + help="Neural engine IR path.") + parser.add_argument("--name", + default="output_image", + type=str, + help="output image name.") + parser.add_argument("--mode", + type=str, + help="Benchmark mode of latency or accuracy.") + parser.add_argument("--seed", type=int, default=666, help="random seed") + parser.add_argument("--steps", + type=int, + default=20, + help="denoising steps") + parser.add_argument("--size", + type=int, + default=1, + help="the number of output images per prompt") + + # for img2img + parser.add_argument("--pipeline", + default="text2img", + type=str, + help="text2img or img2img pipeline.") + parser.add_argument("--prompts", + type=str, + default="Cartoonize the following image", + help="prompts for img2img") + parser.add_argument( + "--image", + type=str, + default= + "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png", + help="the original image for the img2img pipeline") + return parser.parse_args() + + +def main(): + args = parse_args() + neural_engine_graph = diffusion_utils.neural_engine_init(args.ir_path) + if args.pipeline == "text2img": + dpm = DPMSolverMultistepScheduler.from_pretrained( + args.input_model, subfolder="scheduler") + pipe = diffusion_utils.StableDiffusionPipeline.from_pretrained( + args.input_model, scheduler=dpm) + pipe.safety_checker = lambda images, clip_input: (images, False) + generator = torch.Generator("cpu").manual_seed(args.seed) + if args.mode == "latency": + benchmark(pipe, neural_engine_graph, generator, args.steps) + return + + if args.mode == "accuracy": + from diffusers import StableDiffusionPipeline + original_pipe = StableDiffusionPipeline.from_pretrained( + args.input_model) + accuracy(pipe, original_pipe, neural_engine_graph, generator) + return + + executor(pipe, neural_engine_graph, args.prompt, args.name, args.size, + generator) + + if args.pipeline == "img2img": + from diffusion_utils_img2img import StableDiffusionImg2ImgPipeline + import requests + from PIL import Image + from io import BytesIO + pipe = StableDiffusionImg2ImgPipeline.from_pretrained(args.input_model) + url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + response = requests.get(url) + init_image = Image.open(BytesIO(response.content)).convert("RGB") + init_image = init_image.resize((768, 512)) + + prompt = "A fantasy landscape, trending on artstation" + images = pipe(prompt=prompt, + image=init_image, + engine_graph=neural_engine_graph, + strength=0.75, + guidance_scale=7.5).images + images[0].save("fantasy_landscape.png") + + if args.pipeline == "instruction-tuning-sd": + """ + # officical Example: https://huggingface.co/instruction-tuning-sd/cartoonizer + import torch + from diffusers import StableDiffusionInstructPix2PixPipeline + from diffusers.utils import load_image + + model_id = "instruction-tuning-sd/cartoonizer" + pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained( + model_id, torch_dtype=torch.float16, use_auth_token=True + ).to("cuda") + + image_path = "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" + image = load_image(image_path) + + image = pipeline("Cartoonize the following image", image=image).images[0] + image.save("image.png") + """ + from ITREX_StableDiffusionInstructPix2PixPipeline import StableDiffusionInstructPix2PixPipeline + from diffusers.utils import load_image + pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained( + args.input_model, torch_dtype=torch.float32, use_auth_token=True) + + image_path = args.image + image = load_image(image_path) + image = image.resize((512, 512)) + + image = pipeline(args.prompts, + image=image, + engine_graph=neural_engine_graph, + num_inference_steps=args.steps).images[0] + save_time = time.strftime("_%H_%M_%S") + image.save("image" + save_time + '.png') + + return + + +if __name__ == '__main__': + main() diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_bert_large.sh b/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_bert_large.sh index 017b7a2efd3..5c65ca50e0b 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_bert_large.sh +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_bert_large.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_qa.py b/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_qa.py index 39bb8c074ac..4ce35df134d 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/run_qa.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, @@ -217,9 +211,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -652,43 +646,25 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - if optim_args.quantization_approach != "qat": - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. - ) - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. - ) - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=200, + metrics=[tune_metric], + ) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: @@ -698,7 +674,7 @@ def compute_metrics(p: EvalPrediction): max_eval_samples = data_args.max_eval_samples \ if data_args.max_eval_samples is not None else len(eval_dataset) eval_samples = min(max_eval_samples, len(eval_dataset)) - samples = eval_samples - (eval_samples % training_args.per_device_eval_batch_size) \ + samples = eval_samples - (eval_samples % batch_size) \ if training_args.dataloader_drop_last else eval_samples logger.info("metrics keys: {}".format(results.keys())) bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_bert_large.sh b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_bert_large.sh index 0d93e12079b..7f3253746ab 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_bert_large.sh +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_bert_large.sh @@ -121,7 +121,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'fp32' ]]; then mode_cmd=$mode_cmd" --fp32" fi diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_qa.py b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_qa.py index 46122402c6e..d34418e0dd8 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/run_qa.py @@ -26,8 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel -from neural_compressor.config import PostTrainingQuantConfig +from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, @@ -212,9 +211,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -641,21 +640,27 @@ def compute_metrics(p: EvalPrediction): trainer.save_model(training_args.output_dir) trainer.calib_dataloader = trainer.get_eval_dataloader() - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - quantization_config = PostTrainingQuantConfig( - backend="ipex", + quantization_config = QuantizationConfig( approach=optim_args.quantization_approach, - excluded_precisions=["bf16"] + max_trials=200, + metrics=[tune_metric], + use_bf16=False ) + quantization_config.framework = "pytorch_ipex" model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_distilbert.sh b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_distilbert.sh index d0d2bbb9db7..1c0ca172eba 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_distilbert.sh +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_distilbert.sh @@ -121,7 +121,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'fp32' ]]; then mode_cmd=$mode_cmd" --fp32" fi diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_qa.py b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_qa.py index 46122402c6e..9417cf4f65f 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased/run_qa.py @@ -26,8 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel -from neural_compressor.config import PostTrainingQuantConfig +from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, @@ -212,9 +211,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -641,21 +640,28 @@ def compute_metrics(p: EvalPrediction): trainer.save_model(training_args.output_dir) trainer.calib_dataloader = trainer.get_eval_dataloader() - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + + elif optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - quantization_config = PostTrainingQuantConfig( - backend="ipex", + quantization_config = QuantizationConfig( approach=optim_args.quantization_approach, - excluded_precisions=["bf16"] + max_trials=200, + metrics=[tune_metric], + use_bf16=False ) + quantization_config.framework = "pytorch_ipex" model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_distilbert_sparse.sh b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_distilbert_sparse.sh index 7621827eef9..3e82836a0d2 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_distilbert_sparse.sh +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_distilbert_sparse.sh @@ -121,7 +121,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'fp32' ]]; then mode_cmd=$mode_cmd" --fp32" fi diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_qa.py b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_qa.py index 1c250201c9d..25c51b01192 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/run_qa.py @@ -26,8 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel -from neural_compressor.config import PostTrainingQuantConfig +from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, @@ -212,9 +211,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -641,21 +640,27 @@ def compute_metrics(p: EvalPrediction): trainer.save_model(training_args.output_dir) trainer.calib_dataloader = trainer.get_eval_dataloader() - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + elif optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - quantization_config = PostTrainingQuantConfig( - backend="ipex", + quantization_config = QuantizationConfig( approach=optim_args.quantization_approach, - excluded_precisions=["bf16"], + max_trials=200, + metrics=[tune_metric], + use_bf16=False ) + quantization_config.framework = "pytorch_ipex" model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/README.md b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/README.md index 98f9050cdc5..d7082e1194a 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/README.md +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/README.md @@ -76,7 +76,7 @@ python run_qa.py --model_name_or_path "sguskin/dynamic-minilmv2-L6-H384-squad1.1 For INT8: ```shell -python run_qa.py --model_name_or_path "sguskin/dynamic-minilmv2-L6-H384-squad1.1" --dataset_name squad --do_train --do_eval --output_dir model_and_tokenizer --overwrite_output_dir --length_config "(269, 253, 252, 202, 104, 34)" --overwrite_cache --to_onnx --tune --quantization_approach static +python run_qa.py --model_name_or_path "sguskin/dynamic-minilmv2-L6-H384-squad1.1" --dataset_name squad --do_train --do_eval --output_dir model_and_tokenizer --overwrite_output_dir --length_config "(269, 253, 252, 202, 104, 34)" --overwrite_cache --to_onnx --tune --quantization_approach PostTrainingStatic ``` For BF16: diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_LAT.sh b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_LAT.sh index a2119fd1dc3..163c8894cf7 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_LAT.sh +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_LAT.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_qa.py b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_qa.py index 81209b7d4f7..eb41bf85a32 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/run_qa.py @@ -29,8 +29,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel, DynamicLengthConfig -from neural_compressor.config import PostTrainingQuantConfig +from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig, DynamicLengthConfig from trainer_qa import QuestionAnsweringTrainer from intel_extension_for_transformers.transformers.modeling.modeling_roberta_dynamic import RobertaForQuestionAnswering @@ -222,9 +221,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -781,20 +780,24 @@ def compute_metrics(p: EvalPrediction): trainer.save_model(training_args.output_dir) trainer.calib_dataloader = trainer.get_eval_dataloader() - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - quantization_config = PostTrainingQuantConfig( - backend="ipex", + quantization_config = QuantizationConfig( approach=optim_args.quantization_approach, - excluded_precision=["bf16"] + max_trials=200, + metrics=[tune_metric], ) model = trainer.quantize(quant_config=quantization_config) diff --git a/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.py b/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.py index fc8b047c58d..720e4109657 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.py +++ b/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -204,9 +198,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -527,37 +521,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.sh b/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.sh index 79c91ee5b18..57cee9c4494 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/run_emotion.sh @@ -121,7 +121,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_bert_base.sh b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_bert_base.sh index d1648a0c661..f2e133f1d9e 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_bert_base.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_bert_base.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_glue.py index 7267b845f98..eb8e47583e0 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,37 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_bert_base_cased.sh b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_bert_base_cased.sh index cca1268e646..4daabbfe41a 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_bert_base_cased.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_bert_base_cased.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_glue.py index c68d3527abc..9374620302a 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,37 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_bert_mini.sh b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_bert_mini.sh index fa9f005fa54..6e9db50fddd 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_bert_mini.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_bert_mini.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_glue.py index 6e3bc04cd0d..eb8e47583e0 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,36 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_distilbert_base.sh b/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_distilbert_base.sh index b20cffed3b5..8c75385ea5b 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_distilbert_base.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_distilbert_base.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_glue.py index 6e3bc04cd0d..eb8e47583e0 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,36 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_glue.py index 6f899f98ed4..efc762b5c59 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,36 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_roberta_base.sh b/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_roberta_base.sh index 4228e299003..37110ee356c 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_roberta_base.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/run_roberta_base.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_bert_mini.sh b/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_bert_mini.sh index 4e36c1330a1..48b24477a34 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_bert_mini.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_bert_mini.sh @@ -128,7 +128,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_glue.py index b5c05350e84..4400593f0c2 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,38 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric objective = objectives.performance - tuning_criterion = TuningCriterion(max_trials=600, objective=[objective.name]) - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_distilbert.sh b/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_distilbert.sh index 43d99aa3419..53c694f0a89 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_distilbert.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_distilbert.sh @@ -128,7 +128,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_qa.py b/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_qa.py index af1e10149f3..373669fb338 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_qa.py +++ b/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/run_qa.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, @@ -217,9 +211,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -652,38 +646,25 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - objective = objectives.performance - tuning_criterion = TuningCriterion(max_trials=600, objective=[objective.name]) - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=200, + metrics=[tune_metric], ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_bert_mini.sh b/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_bert_mini.sh index d756a945cf6..4a5a986fdc1 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_bert_mini.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_bert_mini.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_glue.py index 6f899f98ed4..efc762b5c59 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,36 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_distilbert_base.sh b/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_distilbert_base.sh index 962a0044ca0..d2be2d43c50 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_distilbert_base.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_distilbert_base.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_glue.py index 6e3bc04cd0d..eb8e47583e0 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,36 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_glue.py b/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_glue.py index 9eaa0cb0ecd..4400593f0c2 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_glue.py @@ -26,13 +26,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -203,9 +197,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -530,37 +524,27 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective] ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_minilm.sh b/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_minilm.sh index 31f6033ce6e..20417a96738 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_minilm.sh +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/run_minilm.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_bge.sh b/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_bge.sh index 5b62734d906..8c24dbcffef 100644 --- a/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_bge.sh +++ b/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_bge.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach static" + mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_mteb.py b/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_mteb.py index 225c8012b34..557524c6590 100644 --- a/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_mteb.py +++ b/examples/huggingface/pytorch/text-embedding/deployment/mteb/bge/run_mteb.py @@ -28,13 +28,7 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel -from neural_compressor.config import ( - PostTrainingQuantConfig, - QuantizationAwareTrainingConfig, - TuningCriterion, - AccuracyCriterion -) +from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( @@ -207,9 +201,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="static", - metadata={"help": "Quantization approach. Supported approach are static, " - "dynamic and qat."}, + default="PostTrainingStatic", + metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " + "PostTrainingDynamic and QuantizationAwareTraining."}, ) metric_name: Optional[str] = field( default=None, @@ -577,36 +571,28 @@ def preprocess_function(example): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach != "PostTrainingDynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) + if optim_args.quantization_approach == "QuantizationAwareTraining": + early_stopping_patience = 6 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, + early_stopping_threshold)) + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - trainer.metrics = tune_metric - tuning_criterion = TuningCriterion(max_trials=600) - accuracy_criterion = AccuracyCriterion( - higher_is_better=False, # optional. - criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". - tolerable_loss=optim_args.perf_tol, # optional. + objective = objectives.performance + quantization_config = QuantizationConfig( + approach=optim_args.quantization_approach, + max_trials=600, + metrics=[tune_metric], + objectives=[objective], + sampling_size = len(train_dataset)//20 ) - if optim_args.quantization_approach != "qat": - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - else: - quantization_config = QuantizationAwareTrainingConfig( - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion - ) - early_stopping_patience = 2 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ - early_stopping_threshold)) stmodel = SentenceTransformer(model_args.model_name_or_path) def eval_func(model):