diff --git a/aicon/backend/models/big_sleep/big_sleep.py b/aicon/backend/models/big_sleep/big_sleep.py index e92b59d..e6632ab 100644 --- a/aicon/backend/models/big_sleep/big_sleep.py +++ b/aicon/backend/models/big_sleep/big_sleep.py @@ -302,7 +302,7 @@ def __init__( gradient_accumulate_every: int = int(self.client_data[RECEIVED_DATA][JSON_GAE]) model_name: str = self.client_data[RECEIVED_DATA][JSON_BACKBONE] if self.client_data[RECEIVED_DATA][JSON_SOURCE_IMG] is not None: - source_img: Image = Image.open(BytesIO(b64decode((self.client_data[RECEIVED_DATA][JSON_SOURCE_IMG])))) + source_img: Image = Image.open(BytesIO(b64decode((self.client_data[RECEIVED_DATA][JSON_SOURCE_IMG])))).convert('RGB') else: source_img = None diff --git a/aicon/backend/models/deep_daze/deep_daze.py b/aicon/backend/models/deep_daze/deep_daze.py index 18c59b3..4ce4d97 100644 --- a/aicon/backend/models/deep_daze/deep_daze.py +++ b/aicon/backend/models/deep_daze/deep_daze.py @@ -422,7 +422,7 @@ def __init__( self.start_image_lr: float = start_image_lr if source_img is not None: - image: Image = Image.open(source_img) + image: Image = Image.open(source_img).convert('RGB') start_img_transform: Compose = T.Compose([ T.Resize(image_width), T.CenterCrop((image_width, image_width)), @@ -457,7 +457,7 @@ def create_text_encoding(self, text: str) -> torch.Tensor: return text_encoding def create_img_encoding(self, target_img: str) -> torch.Tensor: - target_img: Image = Image.open(target_img) + target_img: Image = Image.open(target_img).convert('RGB') normed_img: torch.Tensor = self.clip_transform(target_img).unsqueeze(0).to(self.device) with torch.no_grad(): diff --git a/aicon/backend/server.py b/aicon/backend/server.py index b1feebb..47916d5 100644 --- a/aicon/backend/server.py +++ b/aicon/backend/server.py @@ -57,7 +57,7 @@ _lock: Lock = Lock() _twitter_database: Dict[str, str] = {} -_translator: Translation = Translation("deepl") +_translator: Translation = Translation("google") def _reset_valid_response() -> None: