diff --git a/__init__.py b/__init__.py index 12c1d3dc..ba2958e8 100644 --- a/__init__.py +++ b/__init__.py @@ -30,13 +30,13 @@ from .help_section import register_section_props -from .async_loop import * from .prompt_engineering import * from .operators.open_latest_version import check_for_updates from .absolute_path import absolute_path from .classes import CLASSES, PREFERENCE_CLASSES from .tools import TOOLS from .operators.install_dependencies import are_dependencies_installed, set_dependencies_installed +from .operators.dream_texture import kill_generator from .property_groups.dream_prompt import DreamPrompt from .ui import panel @@ -48,14 +48,6 @@ ) def register(): - async_loop.setup_asyncio_executor() - bpy.utils.register_class(AsyncLoopModalOperator) - - sys.path.append(absolute_path("stable_diffusion/")) - sys.path.append(absolute_path("stable_diffusion/src/clip")) - sys.path.append(absolute_path("stable_diffusion/src/k-diffusion")) - sys.path.append(absolute_path("stable_diffusion/src/taming-transformers")) - set_dependencies_installed(False) bpy.types.Scene.dream_textures_requirements_path = EnumProperty(name="Platform", items=requirements_path_items, description="Specifies which set of dependencies to install", default='stable_diffusion/requirements-mac-MPS-CPU.txt' if sys.platform == 'darwin' else 'requirements-win-torch-1-11-0.txt') @@ -79,6 +71,7 @@ def register(): bpy.types.Scene.init_mask = PointerProperty(name="Init Mask", type=bpy.types.Image) bpy.types.Scene.dream_textures_history_selection = IntProperty() bpy.types.Scene.dream_textures_progress = bpy.props.IntProperty(name="Progress", default=0, min=0, max=0) + bpy.types.Scene.dream_textures_info = bpy.props.StringProperty(name="Info") for cls in CLASSES: bpy.utils.register_class(cls) @@ -87,8 +80,6 @@ def register(): bpy.utils.register_tool(tool) def unregister(): - bpy.utils.unregister_class(AsyncLoopModalOperator) - for cls in PREFERENCE_CLASSES: bpy.utils.unregister_class(cls) @@ -97,6 +88,7 @@ def unregister(): bpy.utils.unregister_class(cls) for tool in TOOLS: bpy.utils.unregister_tool(tool) + kill_generator() if __name__ == "__main__": register() \ No newline at end of file diff --git a/async_loop.py b/async_loop.py deleted file mode 100644 index 83f70d22..00000000 --- a/async_loop.py +++ /dev/null @@ -1,280 +0,0 @@ -# ##### BEGIN GPL LICENSE BLOCK ##### -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# ##### END GPL LICENSE BLOCK ##### - -"""Manages the asyncio loop. (Copied from Blender Cloud plugin with minor changes)""" - -import asyncio -import traceback -import concurrent.futures -import logging -import gc -import typing - -import bpy - -log = logging.getLogger(__name__) - -# Keeps track of whether a loop-kicking operator is already running. -_loop_kicking_operator_running = False - - -def setup_asyncio_executor(): - """Sets up AsyncIO to run properly on each platform.""" - - import sys - - if sys.platform == 'win32': - asyncio.get_event_loop().close() - # On Windows, the default event loop is SelectorEventLoop, which does - # not support subprocesses. ProactorEventLoop should be used instead. - # Source: https://docs.python.org/3/library/asyncio-subprocess.html - loop = asyncio.ProactorEventLoop() - asyncio.set_event_loop(loop) - else: - loop = asyncio.get_event_loop() - - executor = concurrent.futures.ThreadPoolExecutor(max_workers=10) - loop.set_default_executor(executor) - # loop.set_debug(True) - - -def kick_async_loop(*args) -> bool: - """Performs a single iteration of the asyncio event loop. - - :return: whether the asyncio loop should stop after this kick. - """ - - loop = asyncio.get_event_loop() - - # Even when we want to stop, we always need to do one more - # 'kick' to handle task-done callbacks. - stop_after_this_kick = False - - if loop.is_closed(): - log.warning('loop closed, stopping immediately.') - return True - - all_tasks = asyncio.all_tasks(loop) - if not len(all_tasks): - log.debug('no more scheduled tasks, stopping after this kick.') - stop_after_this_kick = True - - elif all(task.done() for task in all_tasks): - log.debug('all %i tasks are done, fetching results and stopping after this kick.', - len(all_tasks)) - stop_after_this_kick = True - - # Clean up circular references between tasks. - gc.collect() - - for task_idx, task in enumerate(all_tasks): - if not task.done(): - continue - - # noinspection PyBroadException - try: - res = task.result() - log.debug(' task #%i: result=%r', task_idx, res) - except asyncio.CancelledError: - # No problem, we want to stop anyway. - log.debug(' task #%i: cancelled', task_idx) - except Exception: - print('{}: resulted in exception'.format(task)) - traceback.print_exc() - - # for ref in gc.get_referrers(task): - # log.debug(' - referred by %s', ref) - - loop.stop() - loop.run_forever() - - return stop_after_this_kick - - -def ensure_async_loop(): - log.debug('Starting asyncio loop') - result = bpy.ops.asyncio.loop() - log.debug('Result of starting modal operator is %r', result) - - -def erase_async_loop(): - global _loop_kicking_operator_running - - log.debug('Erasing async loop') - - loop = asyncio.get_event_loop() - loop.stop() - - -class AsyncLoopModalOperator(bpy.types.Operator): - bl_idname = 'asyncio.loop' - bl_label = 'Runs the asyncio main loop' - - timer = None - log = logging.getLogger(__name__ + '.AsyncLoopModalOperator') - - def __del__(self): - global _loop_kicking_operator_running - - # This can be required when the operator is running while Blender - # (re)loads a file. The operator then doesn't get the chance to - # finish the async tasks, hence stop_after_this_kick is never True. - _loop_kicking_operator_running = False - - def execute(self, context): - return self.invoke(context, None) - - def invoke(self, context, event): - global _loop_kicking_operator_running - - if _loop_kicking_operator_running: - self.log.debug('Another loop-kicking operator is already running.') - return {'PASS_THROUGH'} - - context.window_manager.modal_handler_add(self) - _loop_kicking_operator_running = True - - wm = context.window_manager - self.timer = wm.event_timer_add(0.00001, window=context.window) - - return {'RUNNING_MODAL'} - - def modal(self, context, event): - global _loop_kicking_operator_running - - # If _loop_kicking_operator_running is set to False, someone called - # erase_async_loop(). This is a signal that we really should stop - # running. - if not _loop_kicking_operator_running: - return {'FINISHED'} - - if event.type != 'TIMER': - return {'PASS_THROUGH'} - - # self.log.debug('KICKING LOOP') - stop_after_this_kick = kick_async_loop() - if stop_after_this_kick: - context.window_manager.event_timer_remove(self.timer) - _loop_kicking_operator_running = False - - self.log.debug('Stopped asyncio loop kicking') - return {'FINISHED'} - - return {'RUNNING_MODAL'} - - -# noinspection PyAttributeOutsideInit -class AsyncModalOperatorMixin: - async_task = None # asyncio task for fetching thumbnails - signalling_future = None # asyncio future for signalling that we want to cancel everything. - log = logging.getLogger('%s.AsyncModalOperatorMixin' % __name__) - - _state = 'INITIALIZING' - stop_upon_exception = False - - def invoke(self, context, event): - context.window_manager.modal_handler_add(self) - self.timer = context.window_manager.event_timer_add(1 / 15, window=context.window) - - self.log.info('Starting') - self._new_async_task(self.async_execute(context)) - - return {'RUNNING_MODAL'} - - async def async_execute(self, context): - """Entry point of the asynchronous operator. - - Implement in a subclass. - """ - return - - def quit(self): - """Signals the state machine to stop this operator from running.""" - self._state = 'QUIT' - - def execute(self, context): - return self.invoke(context, None) - - def modal(self, context, event): - task = self.async_task - - if self._state != 'EXCEPTION' and task and task.done() and not task.cancelled(): - ex = task.exception() - if ex is not None: - self._state = 'EXCEPTION' - self.log.error('Exception while running task: %s', ex) - if self.stop_upon_exception: - self.quit() - self._finish(context) - return {'FINISHED'} - - return {'RUNNING_MODAL'} - - if self._state == 'QUIT': - self._finish(context) - return {'FINISHED'} - - return {'PASS_THROUGH'} - - def _finish(self, context): - self._stop_async_task() - context.window_manager.event_timer_remove(self.timer) - - def _new_async_task(self, async_task: typing.Coroutine, future: asyncio.Future = None): - """Stops the currently running async task, and starts another one.""" - - self.log.debug('Setting up a new task %r, so any existing task must be stopped', async_task) - self._stop_async_task() - - # Download the previews asynchronously. - self.signalling_future = future or asyncio.Future() - self.async_task = asyncio.ensure_future(async_task) - self.log.debug('Created new task %r', self.async_task) - - # Start the async manager so everything happens. - ensure_async_loop() - - def _stop_async_task(self): - self.log.debug('Stopping async task') - if self.async_task is None: - self.log.debug('No async task, trivially stopped') - return - - # Signal that we want to stop. - self.async_task.cancel() - if not self.signalling_future.done(): - self.log.info("Signalling that we want to cancel anything that's running.") - self.signalling_future.cancel() - - # Wait until the asynchronous task is done. - if not self.async_task.done(): - self.log.info("blocking until async task is done.") - loop = asyncio.get_event_loop() - try: - loop.run_until_complete(self.async_task) - except asyncio.CancelledError: - self.log.info('Asynchronous task was cancelled') - return - - # noinspection PyBroadException - try: - self.async_task.result() # This re-raises any exception of the task. - except asyncio.CancelledError: - self.log.info('Asynchronous task was cancelled') - except Exception: - self.log.exception("Exception from asynchronous task") \ No newline at end of file diff --git a/generator_process.py b/generator_process.py new file mode 100644 index 00000000..72abc992 --- /dev/null +++ b/generator_process.py @@ -0,0 +1,243 @@ +import json +import subprocess +import sys +import os +import threading +import numpy as np +from enum import IntEnum as Lawsuit + +# IPC message types from subprocess +class Action(Lawsuit): # can't help myself + UNKNOWN = -1 + CLOSED = 0 + INFO = 1 + IMAGE = 2 + STEP_IMAGE = 3 + STEP_NO_SHOW = 4 + EXCEPTION = 5 + + @classmethod + def _missing_(cls, value): + return cls.UNKNOWN + +class GeneratorProcess(): + def __init__(self): + self.process = subprocess.Popen([sys.executable,'generator_process.py'],cwd=os.path.dirname(os.path.realpath(__file__)),stdin=subprocess.PIPE,stdout=subprocess.PIPE) + self.reader = self.process.stdout + self.queue = [] + self.args = None + self.killed = False + self.thread = threading.Thread(target=self._run,daemon=True,name="BackgroundReader") + self.thread.start() + + def kill(self): + self.killed = True + self.process.kill() + + def prompt2image(self, args, step_callback, image_callback, info_callback, exception_callback): + self.args = args + stdin = self.process.stdin + b = bytes(json.dumps(args), encoding='utf-8') + stdin.write(len(b).to_bytes(8,sys.byteorder,signed=False)) + stdin.write(b) + stdin.flush() + + queue = self.queue + callbacks = { + Action.INFO: info_callback, + Action.IMAGE: image_callback, + Action.STEP_IMAGE: step_callback, + Action.STEP_NO_SHOW: step_callback, + Action.EXCEPTION: exception_callback + } + + for i in range(0,args['iterations']): + while True: + while len(queue) == 0: + yield # nothing in queue, let blender resume + tup = queue.pop() + action = tup[0] + callbacks[action](*tup[1:]) + if action == Action.IMAGE: + break + elif action == Action.EXCEPTION: + return + + def _run(self): + reader = self.reader + def readStr(): + return str(reader.read(readUInt(4)), encoding='utf-8') + def readUInt(length): + return int.from_bytes(reader.read(length),sys.byteorder,signed=False) + + queue = self.queue + def queue_exception(fatal, err): + queue.append((Action.EXCEPTION, fatal, err)) + + image_buffer = bytearray(512*512*16) + while not self.killed: + action = readUInt(1) + if action == Action.CLOSED: + if not self.killed: + queue_exception(True, "Process closed unexpectedly") + return + elif action == Action.INFO: + queue.append((action, readStr())) + elif action == Action.IMAGE or action == Action.STEP_IMAGE: + seed = readUInt(4) + width = readUInt(4) + height = readUInt(4) + length = width*height*16 + import math + w = math.floor(self.args['width']/64)*64 # stable diffusion rounds down internally + h = math.floor(self.args['height']/64)*64 + if width != w or height != h: + queue_exception(True, f"Internal error, received image of wrong resolution {width}x{height}, expected {w}x{h}") + return + if length > len(image_buffer): + image_buffer = bytearray(length) + m = memoryview(image_buffer)[:length] + reader.readinto(m) + image = np.frombuffer(m,dtype=np.float32) + queue.append((action, seed, width, height, image)) + elif action == Action.STEP_NO_SHOW: + queue.append((action, readUInt(4))) + elif action == Action.EXCEPTION: + fatal = readUInt(1) != 0 + queue_exception(fatal, readStr()) + if fatal: + return + else: + queue_exception(True, f"Internal error, unexpected action id: {action}") + return + +def main(): + from absolute_path import absolute_path + # Support Apple Silicon GPUs as much as possible. + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + sys.path.append(absolute_path("stable_diffusion/")) + sys.path.append(absolute_path("stable_diffusion/src/clip")) + sys.path.append(absolute_path("stable_diffusion/src/k-diffusion")) + sys.path.append(absolute_path("stable_diffusion/src/taming-transformers")) + from stable_diffusion.ldm.generate import Generate + from omegaconf import OmegaConf + from PIL import ImageOps + from io import StringIO + + models_config = absolute_path('stable_diffusion/configs/models.yaml') + model = 'stable-diffusion-1.4' + + models = OmegaConf.load(models_config) + config = absolute_path('stable_diffusion/' + models[model].config) + weights = absolute_path('stable_diffusion/' + models[model].weights) + + stdin = sys.stdin.buffer + stdout = sys.stdout.buffer + sys.stdout = open(os.devnull, 'w') # prevent stable diffusion logs from breaking ipc + stderr = sys.stderr + + def writeUInt(length, value): + stdout.write(value.to_bytes(length,sys.byteorder,signed=False)) + + def writeStr(string): + b = bytes(string,encoding='utf-8') + writeUInt(4,len(b)) + stdout.write(b) + + def writeInfo(msg): + writeUInt(1,Action.INFO) + writeStr(msg) + stdout.flush() + + def writeException(fatal, e): + writeUInt(1,Action.EXCEPTION) + writeUInt(1,1 if fatal else 0) + writeStr(e) + stdout.flush() + + byte_to_normalized = 1.0 / 255.0 + def write_pixels(image): + writeUInt(4,image.width) + writeUInt(4,image.height) + b = (np.asarray(ImageOps.flip(image).convert('RGBA'),dtype=np.float32) * byte_to_normalized).tobytes() + for i in range(0,len(b),1024*64): + stdout.write(b[i:i+1024*64]) + # stdout.write(memoryview(b)[i:i+1024*64]) # won't accept memoryview for some reason, writer thinks it needs serialized but fails + # stdout.write(b) # writing full image has caused the subprocess to crash without raising any exception, safer not to use + stdout.flush() + + def image_writer(image, seed, upscaled=False): + # Only use the non-upscaled texture, as upscaling is currently unsupported by the addon. + if not upscaled: + writeUInt(1,Action.IMAGE) + writeUInt(4,seed) + write_pixels(image) + stdout.flush() + + def view_step(samples, step): + if args['show_steps']: + pixels = generator._sample_to_image(samples) # May run out of memory, keep before any writing + writeUInt(1,Action.STEP_IMAGE) + writeUInt(4,step) + write_pixels(pixels) + else: + writeUInt(1,Action.STEP_NO_SHOW) + writeUInt(4,step) + stdout.flush() + + generator = None + while True: + json_len = int.from_bytes(stdin.read(8),sys.byteorder,signed=False) + if json_len == 0: + return # stdin closed + args = json.loads(stdin.read(json_len)) + + if generator is None or generator.full_precision != args['full_precision']: + writeInfo("Initializing Generator") + try: + generator = Generate( + conf=models_config, + model=model, + # These args are deprecated, but we need them to specify an absolute path to the weights. + weights=weights, + config=config, + full_precision=args['full_precision'] + ) + generator.load_model() + except Exception as e: + writeException(True, str(e)) + return + writeInfo("Starting") + + try: + tmp_stderr = sys.stderr = StringIO() # prompt2image writes exceptions straight to stderr, intercepting + generator.prompt2image( + # a function or method that will be called each step + step_callback=view_step, + # a function or method that will be called each time an image is generated + image_callback=image_writer, + **args + ) + if tmp_stderr.tell() > 0: + tmp_stderr.seek(0) + s = tmp_stderr.read() + i = s.find("Traceback") # progress also gets printed to stderr so check for an actual exception + if i != -1: + s = s[i:] + import re + low_ram = re.search(r"(Not enough memory, use lower resolution)( \(max approx. \d+x\d+\))",s,re.IGNORECASE) + if low_ram: + writeException(False, f"{low_ram[1]}{' or disable full precision' if args['full_precision'] else ''}{low_ram[2]}") + elif s.find("CUDA out of memory. Tried to allocate") != -1: + writeException(False, f"Not enough memory, use lower resolution{' or disable full precision' if args['full_precision'] else ''}") + else: + writeException(True, s) # consider all unknown exceptions to be fatal so the generator process is fully restarted next time + return + except Exception as e: + writeException(True, str(e)) + return + finally: + sys.stderr = stderr + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/operators/dream_texture.py b/operators/dream_texture.py index 7423b2e1..1f14f9cc 100644 --- a/operators/dream_texture.py +++ b/operators/dream_texture.py @@ -1,14 +1,14 @@ from importlib.resources import path import bpy -import asyncio import os import math from ..preferences import StableDiffusionPreferences -from ..async_loop import * from ..pil_to_image import * from ..prompt_engineering import * from ..absolute_path import WEIGHTS_PATH, absolute_path +from ..generator_process import GeneratorProcess +from ..property_groups.dream_prompt import DreamPrompt from .install_dependencies import are_dependencies_installed import tempfile @@ -17,6 +17,9 @@ # This allows the slow model loading process to happen once, # and re-use the model on subsequent calls. generator = None +generator_advance = None +last_data_block = None +timer = None def image_has_alpha(img): b = 32 if img.is_float else 8 @@ -30,53 +33,52 @@ class DreamTexture(bpy.types.Operator): bl_label = "Dream Texture" bl_description = "Generate a texture with AI" bl_options = {'REGISTER'} - + + @classmethod + def poll(cls, context): + global timer + return timer is None + def invoke(self, context, event): weights_installed = os.path.exists(WEIGHTS_PATH) if not weights_installed or not are_dependencies_installed(): self.report({'ERROR'}, "Please complete setup in the preferences window.") - return {"FINISHED"} - else: - return self.execute(context) + return {'CANCELLED'} + return self.execute(context) + + def modal(self, context, event): + if event.type != 'TIMER': + return {'PASS_THROUGH'} + try: + next(generator_advance) + except StopIteration: + remove_timer(context) + return {'FINISHED'} + except Exception as e: + remove_timer(context) + raise e + return {'RUNNING_MODAL'} - async def dream_texture(self, context): + def execute(self, context): history_entry = context.preferences.addons[StableDiffusionPreferences.bl_idname].preferences.history.add() for prop in context.scene.dream_textures_prompt.__annotations__.keys(): if hasattr(history_entry, prop): setattr(history_entry, prop, getattr(context.scene.dream_textures_prompt, prop)) - generated_prompt = context.scene.dream_textures_prompt.generate_prompt() - - # Support Apple Silicon GPUs as much as possible. - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - - from ..stable_diffusion.ldm.generate import Generate - from omegaconf import OmegaConf - - models_config = absolute_path('stable_diffusion/configs/models.yaml') - model = 'stable-diffusion-1.4' - - models = OmegaConf.load(models_config) - config = absolute_path('stable_diffusion/' + models[model].config) - weights = absolute_path('stable_diffusion/' + models[model].weights) - - global generator - if generator is None or generator.full_precision != context.scene.dream_textures_prompt.full_precision: - generator = Generate( - conf=models_config, - model=model, - # These args are deprecated, but we need them to specify an absolute path to the weights. - weights=weights, - config=config, - full_precision=context.scene.dream_textures_prompt.full_precision - ) - generator.load_model() - node_tree = context.material.node_tree if hasattr(context, 'material') else None screen = context.screen - last_data_block = None scene = context.scene + def info(msg=""): + scene.dream_textures_info = msg + + def handle_exception(fatal, err): + info() # clear variable + if fatal: + kill_generator() + self.report({'ERROR'},err) + + def step_progress_update(self, context): if hasattr(context.area, "regions"): for region in context.area.regions: @@ -84,16 +86,33 @@ def step_progress_update(self, context): region.tag_redraw() return None - bpy.types.Scene.dream_textures_progress = bpy.props.IntProperty(name="Progress", default=1, min=0, max=context.scene.dream_textures_prompt.steps + 1, update=step_progress_update) + bpy.types.Scene.dream_textures_progress = bpy.props.IntProperty(name="Progress", default=0, min=0, max=context.scene.dream_textures_prompt.steps + 1, update=step_progress_update) + bpy.types.Scene.dream_textures_info = bpy.props.StringProperty(name="Info", update=step_progress_update) - def image_writer(image, seed, upscaled=False): - nonlocal last_data_block + global generator + if generator is None: + info("Initializing Process") + generator = GeneratorProcess() + else: + info("Waiting For Process") + + def bpy_image(name, width, height, pixels): + image = bpy.data.images.new(name, width=width, height=height) + image.pixels[:] = pixels + image.pack() + return image + + def image_writer(seed, width, height, pixels, upscaled=False): + info() # clear variable + global last_data_block # Only use the non-upscaled texture, as upscaling is currently unsupported by the addon. if not upscaled: if last_data_block is not None: bpy.data.images.remove(last_data_block) last_data_block = None - image = pil_to_image(image, name=f"{seed}") + if generator is None or generator.process.poll() or width == 0 or height == 0: + return # process was closed + image = bpy_image(f"{seed}", width, height, pixels) if node_tree is not None: nodes = node_tree.nodes texture_node = nodes.new("ShaderNodeTexImage") @@ -105,20 +124,20 @@ def image_writer(image, seed, upscaled=False): scene.dream_textures_progress = 0 scene.dream_textures_prompt.seed = str(seed) # update property in case seed was sourced randomly or from hash - def view_step(samples, step): - step_progress(samples, step) - nonlocal last_data_block + def view_step(step, width=None, height=None, pixels=None): + info() # clear variable + scene.dream_textures_progress = step + 1 + if pixels is None: + return # show steps disabled + global last_data_block for area in screen.areas: if area.type == 'IMAGE_EDITOR': - step_image = pil_to_image(generator._sample_to_image(samples), name=f'Step {step + 1}/{scene.dream_textures_prompt.steps}') + step_image = bpy_image(f'Step {step + 1}/{scene.dream_textures_prompt.steps}', width, height, pixels) area.spaces.active.image = step_image if last_data_block is not None: bpy.data.images.remove(last_data_block) last_data_block = step_image return # Only perform this on the first image editor found. - - def step_progress(samples, step): - scene.dream_textures_progress = step + 1 def save_temp_image(img, path=None): path = path if path is not None else tempfile.NamedTemporaryFile().name @@ -140,62 +159,53 @@ def save_temp_image(img, path=None): return path - def perform(): - init_img = scene.init_img if scene.dream_textures_prompt.use_init_img else None - if scene.dream_textures_prompt.use_inpainting: - for area in screen.areas: - if area.type == 'IMAGE_EDITOR': - if area.spaces.active.image is not None and image_has_alpha(area.spaces.active.image): - init_img = area.spaces.active.image - init_img_path = None - if init_img is not None: - init_img_path = save_temp_image(init_img) - - generator.prompt2image( - # prompt string (no default) - prompt=generated_prompt, - # iterations (1); image count=iterations - iterations=scene.dream_textures_prompt.iterations, - # refinement steps per iteration - steps=scene.dream_textures_prompt.steps, - # seed for random number generator - seed=scene.dream_textures_prompt.get_seed(), - # width of image, in multiples of 64 (512) - width=scene.dream_textures_prompt.width, - # height of image, in multiples of 64 (512) - height=scene.dream_textures_prompt.height, - # how strongly the prompt influences the image (7.5) (must be >1) - cfg_scale=scene.dream_textures_prompt.cfgscale, - # path to an initial image - its dimensions override width and height - init_img=init_img_path, - - # generate tileable/seamless textures - seamless=scene.dream_textures_prompt.seamless, - - fit=scene.dream_textures_prompt.fit, - # strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely - strength=scene.dream_textures_prompt.strength, - # strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely - gfpgan_strength=0.0, # 0 disables upscaling, which is currently not supported by the addon. - # image randomness (eta=0.0 means the same seed always produces the same image) - ddim_eta=0.0, - # a function or method that will be called each step - step_callback=view_step if scene.dream_textures_prompt.show_steps else step_progress, - # a function or method that will be called each time an image is generated - image_callback=image_writer, - - sampler_name=scene.dream_textures_prompt.sampler - ) - - loop = asyncio.get_running_loop() - await loop.run_in_executor(None, perform) - - def execute(self, context): - async_task = asyncio.ensure_future(self.dream_texture(context)) - # async_task.add_done_callback(done_callback) - ensure_async_loop() - - return {'FINISHED'} + init_img = scene.init_img if scene.dream_textures_prompt.use_init_img else None + if scene.dream_textures_prompt.use_inpainting: + for area in screen.areas: + if area.type == 'IMAGE_EDITOR': + if area.spaces.active.image is not None and image_has_alpha(area.spaces.active.image): + init_img = area.spaces.active.image + init_img_path = None + if init_img is not None: + init_img_path = save_temp_image(init_img) + + args = {key: getattr(scene.dream_textures_prompt,key) for key in DreamPrompt.__annotations__} + args['prompt'] = context.scene.dream_textures_prompt.generate_prompt() + args['seed'] = scene.dream_textures_prompt.get_seed() + args['init_img'] = init_img_path + + global generator_advance + generator_advance = generator.prompt2image(args, + # a function or method that will be called each step + step_callback=view_step, + # a function or method that will be called each time an image is generated + image_callback=image_writer, + # a function or method that will recieve messages + info_callback=info, + exception_callback=handle_exception + ) + context.window_manager.modal_handler_add(self) + self.timer = context.window_manager.event_timer_add(1 / 15, window=context.window) + return {'RUNNING_MODAL'} + +def remove_timer(context): + global timer + if timer: + context.window_manager.event_timer_remove(timer) + timer = None + +def kill_generator(context=bpy.context): + global generator + if generator: + generator.kill() + generator = None + remove_timer(context) + bpy.context.scene.dream_textures_progress = 0 + bpy.context.scene.dream_textures_info = "" + global last_data_block + if last_data_block is not None: + bpy.data.images.remove(last_data_block) + last_data_block = None class ReleaseGenerator(bpy.types.Operator): bl_idname = "shade.dream_textures_release_generator" @@ -204,7 +214,5 @@ class ReleaseGenerator(bpy.types.Operator): bl_options = {'REGISTER'} def execute(self, context): - global generator - generator = None - context.scene.dream_textures_progress = 0 - return {'FINISHED'} + kill_generator(context) + return {'FINISHED'} \ No newline at end of file diff --git a/property_groups/dream_prompt.py b/property_groups/dream_prompt.py index 1f0e18a9..8bba8704 100644 --- a/property_groups/dream_prompt.py +++ b/property_groups/dream_prompt.py @@ -19,7 +19,7 @@ def seed_clamp(self, ctx): s = str(max(0,min(int(float(self.seed)),2**32-1))) # float() first to make sure any seed that is a number gets clamped, not just ints if s != self.seed: self.seed = s - except ValueError: + except (ValueError, OverflowError): pass # will get hashed once generated attributes = { @@ -27,8 +27,8 @@ def seed_clamp(self, ctx): "prompt_structure": EnumProperty(name="Preset", items=prompt_structures_items, description="Fill in a few simple options to create interesting images quickly"), # Size - "width": IntProperty(name="Width", default=512), - "height": IntProperty(name="Height", default=512), + "width": IntProperty(name="Width", default=512, min=64, step=64), + "height": IntProperty(name="Height", default=512, min=64, step=64), # Simple Options "seamless": BoolProperty(name="Seamless", default=False, description="Enables seamless/tilable image generation"), @@ -40,7 +40,7 @@ def seed_clamp(self, ctx): "full_precision": BoolProperty(name="Full Precision", default=False, description="Whether to use full precision or half precision floats. Full precision is slower, but required by some GPUs"), "iterations": IntProperty(name="Iterations", default=1, min=1, description="How many images to generate"), "steps": IntProperty(name="Steps", default=25, min=1), - "cfgscale": FloatProperty(name="CFG Scale", default=7.5, min=1, description="How strongly the prompt influences the image"), + "cfg_scale": FloatProperty(name="CFG Scale", default=7.5, min=1, description="How strongly the prompt influences the image"), "sampler": EnumProperty(name="Sampler", items=sampler_options, default=3), "show_steps": BoolProperty(name="Show Steps", description="Displays intermediate steps in the Image Viewer. Disabling can speed up generation", default=True), @@ -99,7 +99,7 @@ def get_seed(self): return None # let stable diffusion automatically pick one try: return max(0,min(int(float(self.seed)),2**32-1)) # clamp int - except ValueError: + except (ValueError, OverflowError): h = hash(self.seed) # not an int, let's hash it! if h < 0: h = ~h diff --git a/ui/panel.py b/ui/panel.py index 3e8f6283..af329400 100644 --- a/ui/panel.py +++ b/ui/panel.py @@ -1,6 +1,5 @@ import bpy from bpy.types import Panel -from ..async_loop import * from ..pil_to_image import * from ..prompt_engineering import * from ..operators.dream_texture import DreamTexture, image_has_alpha, ReleaseGenerator @@ -65,7 +64,7 @@ def draw_panel(self, context): seed_row.enabled = not scene.dream_textures_prompt.random_seed # advanced_box.prop(self, "iterations") # Disabled until supported by the addon. advanced_box.prop(scene.dream_textures_prompt, "steps") - advanced_box.prop(scene.dream_textures_prompt, "cfgscale") + advanced_box.prop(scene.dream_textures_prompt, "cfg_scale") advanced_box.prop(scene.dream_textures_prompt, "sampler") advanced_box.prop(scene.dream_textures_prompt, "show_steps") @@ -76,7 +75,10 @@ def draw_panel(self, context): row = layout.row() row.scale_y = 1.5 if context.scene.dream_textures_progress <= 0: - row.operator(DreamTexture.bl_idname, icon="PLAY", text="Generate") + if context.scene.dream_textures_info != "": + row.label(text=context.scene.dream_textures_info, icon="INFO") + else: + row.operator(DreamTexture.bl_idname, icon="PLAY", text="Generate") else: row.prop(context.scene, 'dream_textures_progress', slider=True) row.enabled = False