Skip to content

Commit

Permalink
Merge pull request #5 from vinesmsuic/main
Browse files Browse the repository at this point in the history
Adding LCM (Latent-Consistency-Models)
  • Loading branch information
vinesmsuic authored Nov 11, 2023
2 parents 791bc60 + 639a64b commit 348df83
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 1 deletion.
2 changes: 1 addition & 1 deletion src/imagen_hub/infermodels/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@

# ==========================================================
# Text-to-Image Generation
from .sd import SD, OpenJourney
from .sd import SD, OpenJourney, LCM
from .sdxl import SDXL
from .deepfloydif import DeepFloydIF
from .dalle import DALLE2, DALLE3, StableUnCLIP
Expand Down
41 changes: 41 additions & 0 deletions src/imagen_hub/infermodels/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,44 @@ def __init__(self, device="cuda", weight="prompthero/openjourney"):

def infer_one_image(self, prompt: str = None, seed: int = 42):
return super().infer_one_image(prompt, seed)

class LCM():
def __init__(self, device="cuda", weight="SimianLuo/LCM_Dreamshaper_v7"):
"""
A class for the Latent Consistency Model. Require diffusers >= 0.22
Reference: https://github.com/luosiallen/latent-consistency-model#latent-consistency-models
Args:
device (str, optional): The device on which the model should run. Default is "cuda".
weight (str, optional): The pretrained model weights for OpenJourney. Default is "SimianLuo/LCM_Dreamshaper_v7".
"""
self.pipe = DiffusionPipeline.from_pretrained(
weight,
torch_dtype=torch.float16,
).to(device)

def infer_one_image(self, prompt: str = None, seed: int = 42, num_inference_steps: int = 4):
"""
Infer an image based on the given prompt and seed.
Args:
prompt (str, optional): The prompt for the image generation. Default is None.
seed (int, optional): The seed for random generator. Default is 42.
num_inference_steps (int, optional): inference steps. Recommend: 1~8 steps. Paper used 4.
Returns:
PIL.Image.Image: The inferred image.
Notes:
num_inference_steps can be set to 1~50 steps. LCM support fast inference even <= 4 steps.
(Max)I personally found 8 steps give a better result but the paper focus on using 4 steps.
"""
generator = torch.manual_seed(seed)
images = self.pipe(prompt=prompt,
num_inference_steps=num_inference_steps,
guidance_scale=8.0,
lcm_origin_steps=50,
generator=generator,
output_type="pil").images
return images[0]

0 comments on commit 348df83

Please sign in to comment.