diff --git a/generator/generator.py b/generator/generator.py index ce259533..05f3b329 100644 --- a/generator/generator.py +++ b/generator/generator.py @@ -1,3 +1,4 @@ +import bpy import os import sys import traceback @@ -5,9 +6,9 @@ from ..operators.install_dependencies import load_dependencies from ..utils import absolute_path - class Generator: _instance = None + def __new__(cls): if not cls._instance: @@ -69,14 +70,21 @@ def load_generator(self): self._ensure_dependencies() try: - import llama_cpp - - self.llm = llama_cpp.Llama( - model_path=absolute_path(".models/LLaMA-Mesh-Q4_K_M.gguf"), - n_gpu_layers=-1, - seed=1337, - n_ctx=4096, - ) + if bpy.context.scene.meshgen_props.use_ollama_backend: + from ollama import Client + self.llm = Client( + host=bpy.context.scene.meshgen_props.ollama_host, + ) + self.llm.pull(model='hf.co/bartowski/LLaMA-Mesh-GGUF:Q4_K_M') + else: + import llama_cpp + + self.llm = llama_cpp.Llama( + model_path=absolute_path(".models/LLaMA-Mesh-Q4_K_M.gguf"), + n_gpu_layers=-1, + seed=1337, + n_ctx=4096, + ) print("Finished loading generator.") diff --git a/operators/generate_mesh.py b/operators/generate_mesh.py index c052046b..f9a3ec60 100644 --- a/operators/generate_mesh.py +++ b/operators/generate_mesh.py @@ -33,22 +33,41 @@ def execute(self, context): self.generated_text = "" self.line_buffer = "" - self._iterator = generator.llm.create_chat_completion( - messages=messages, - stream=True, - temperature=props.temperature - ) + if not context.scene.meshgen_props.use_ollama_backend: + self._iterator = generator.llm.create_chat_completion( + messages=messages, + stream=True, + temperature=props.temperature + ) + props.is_running = True self._queue = queue.Queue() def run_in_thread(): try: - for chunk in generator.llm.create_chat_completion( - messages=messages, - stream=True, - temperature=props.temperature - ): + if props.use_ollama_backend: + template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> + {{ .System }}<|eot_id|><|start_header_id|>user<|end_header_id|> + {{ .Prompt }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + """ + options = {"temperature": props.temperature} + stream = generator.llm.generate( + model='hf.co/bartowski/LLaMA-Mesh-GGUF:Q4_K_M', + prompt=props.prompt, + stream=True, + template=template, + system="You are a helpful assistant that can generate 3D obj files.", + options=options + ) + else: + stream = generator.llm.create_chat_completion( + messages=messages, + stream=True, + temperature=props.temperature + ) + + for chunk in stream: if props.cancelled: return self._queue.put(chunk) @@ -80,10 +99,13 @@ def modal(self, context, event): chunk = self._queue.get_nowait() if chunk is None: break - delta = chunk["choices"][0]["delta"] - if "content" not in delta: - continue - content = delta["content"] + if props.use_ollama_backend: + content = chunk["response"] + else: + delta = chunk["choices"][0]["delta"] + if "content" not in delta: + continue + content = delta["content"] self.generated_text += content self.line_buffer += content props.generated_text = self.generated_text diff --git a/operators/install_dependencies.py b/operators/install_dependencies.py index c35455ac..a00c1edd 100644 --- a/operators/install_dependencies.py +++ b/operators/install_dependencies.py @@ -105,9 +105,9 @@ def install_and_load_dependencies(): os.makedirs(dependencies_dir, exist_ok=True) if (sys.platform == "win32" or sys.platform == "linux") and check_cuda(): - requirements_file = "./requirements/cuda.txt" + requirements_file = absolute_path("./requirements/cuda.txt") else: - requirements_file = "./requirements/cpu.txt" + requirements_file = absolute_path("./requirements/cpu.txt") subprocess.run( [ diff --git a/preferences.py b/preferences.py index 9d411893..083951a5 100644 --- a/preferences.py +++ b/preferences.py @@ -23,14 +23,17 @@ def draw(self, context): layout.label(text="Dependencies not installed.", icon="ERROR") box = layout.box() box.operator(MESHGEN_OT_InstallDependencies.bl_idname, icon="IMPORT") - return + #return + else: + layout.label(text="Dependencies installed.") if not generator.has_required_models(): layout.label(text="Required models not downloaded.", icon="ERROR") layout.operator(MESHGEN_OT_DownloadRequiredModels.bl_idname, icon="IMPORT") - return + #return + else: + layout.label(text="Ready to generate. Press 'N' -> MeshGen to get started.") - layout.label(text="Ready to generate. Press 'N' -> MeshGen to get started.") layout.separator() layout.prop(context.scene.meshgen_props, "show_developer_options", text="Show Developer Options") @@ -38,3 +41,10 @@ def draw(self, context): if context.scene.meshgen_props.show_developer_options: box = layout.box() box.operator(MESHGEN_OT_UninstallDependencies.bl_idname, icon="IMPORT") + + if bpy.app.online_access: + box.prop(context.scene.meshgen_props, "use_ollama_backend", text="Use Ollama Backend") + + if context.scene.meshgen_props.use_ollama_backend: + ollama_options_box = box.box() + ollama_options_box.prop(context.scene.meshgen_props, "ollama_host", text="Ollama Host") diff --git a/property_groups/meshgen.py b/property_groups/meshgen.py index 406b893f..eb98663a 100644 --- a/property_groups/meshgen.py +++ b/property_groups/meshgen.py @@ -45,4 +45,14 @@ class MeshGenProperties(bpy.types.PropertyGroup): description="Whether to show developer options.", default=False, ), + "use_ollama_backend": bpy.props.BoolProperty( + name="Use Ollama for Backend", + description="Use Ollama for backend processing", + default=False, + ), + "ollama_host": bpy.props.StringProperty( + name="Ollama Host", + description="Host address for Ollama backend", + default="http://localhost:11434", + ) } diff --git a/requirements/cpu.txt b/requirements/cpu.txt index aefc1205..9b42c10d 100644 --- a/requirements/cpu.txt +++ b/requirements/cpu.txt @@ -2,3 +2,4 @@ llama_cpp_python==0.2.90 huggingface_hub +ollama \ No newline at end of file diff --git a/requirements/cuda.txt b/requirements/cuda.txt index e81631a6..ce092abc 100644 --- a/requirements/cuda.txt +++ b/requirements/cuda.txt @@ -2,3 +2,4 @@ llama_cpp_python==0.2.90 huggingface_hub +ollama \ No newline at end of file