Skip to content

Commit

Permalink
feat: Add MultiContextProcessor class
Browse files Browse the repository at this point in the history
This commit introduces the MultiContextProcessor class, which is designed to handle the processing of multiple contexts with a selected prompt group and output settings.
The MultiContextProcessor class provides the following functionality:
Processes multiple context files grouped by name.
Reads the content of each file and processes it with the selected prompt group.
Organizes the results into a dictionary where keys are group names and values are lists of results for each file in the group.
Saves the results to separate text files named according to the group and result index.
This class enhances the flexibility and modularity of the code by separating the prompt building, launching, and result saving logic for multi-context processing.

Signed-off-by: Fred Zimmerman <[email protected]>
  • Loading branch information
Fred Zimmerman committed Aug 25, 2024
1 parent 610e9f6 commit 4f70a8a
Show file tree
Hide file tree
Showing 5 changed files with 62 additions and 10 deletions.
16 changes: 14 additions & 2 deletions Codexes2Gemini/classes/Codexes/Builders/Codexes2PartsOfTheBook.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import logging
import os
import traceback
import uuid
from importlib import resources
from time import sleep
from typing import List
Expand Down Expand Up @@ -162,8 +163,19 @@ def process_codex_to_book_part(self, plan: PromptGroups):
f"Output for prompt {i + 1} does not meet desired length of {plan.minimum_required_output_tokens}. Discarding.")

if satisfactory_results:
self.logger.info(f"Returning satisfactory results of {self.count_tokens(satisfactory_results)}")

self.logger.info(f"Returning satisfactory results of {self.count_tokens(satisfactory_results)}")

if plan.require_json_output:
output_data = {
"text": response.text,
"prompt_feedback": response.prompt_feedback,
# ... (Add other relevant data to the dictionary) ...
}
json_output_path = f"{plan.thisdoc_dir}/{plan.output_file_path}_{str(uuid.uuid4())[:6]}.json"
with open(json_output_path, 'w') as json_file:
json.dump(output_data, json_file, indent=4)
self.logger.info(f"JSON output saved to {json_output_path}")
st.info(f"JSON output saved to {json_output_path}")
else:
self.logger.warning("No satisfactory results were generated.")
satisfactory_results = "No satisfactory results were generated."
Expand Down
4 changes: 3 additions & 1 deletion Codexes2Gemini/classes/Codexes/Builders/PromptGroups.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ def __init__(self, context: str = "", context_file_paths: List[str] = None, user
selected_system_instruction_keys: List[str] = None,
selected_user_prompt_values: List[str] = None,
selected_user_prompts_dict: Dict[str, Any] = None,
config_file: str = None, use_all_user_keys: bool = False, add_system_prompt: str = "") -> None:
config_file: str = None, use_all_user_keys: bool = False, add_system_prompt: str = "",
require_json_output=False) -> None:

self.logger = logging.getLogger(__name__)
self.logger.setLevel(log_level)
Expand Down Expand Up @@ -59,6 +60,7 @@ def __init__(self, context: str = "", context_file_paths: List[str] = None, user
self.list_of_user_keys_to_use = list_of_user_keys_to_use or []
self.continuation_prompts = continuation_prompts
self.output_file_path = output_file_base_name
self.require_json_output = require_json_output
self.number_to_run = number_to_run
self.minimum_required_output = minimum_required_output
self.minimum_required_output_tokens = minimum_required_output_tokens
Expand Down
9 changes: 9 additions & 0 deletions Codexes2Gemini/resources/prompts/user_prompts_dict.json
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,15 @@
"book highlights"
]
},
"outline2prompts": {
"prompt": "Expand this outline for a book into the following objects:\n) A document that may be used as context for model requests. This contains factual information about plot, characters, themes and other elements that will help ensure continuity.\n2) System instructions to guide the model in how to create responses.\n3) A series of user prompts which each generate a chapter in the book. Each prompt can be expanded additional details and specific actions for each character. You can also introduce new characters or locations as the story progresses. Remember to maintain consistency with the themes, plot twists, and overall tone of the book.",
"tags": [
"book outline",
"AI writing",
"style",
"prompts"
]
},
"page_by_page_analysis": {
"prompt": "Now do a page-by-page analysis of the uploaded manuscript. Award +1 for each positive, -1 for each negative, calculate totals and net.",
"tags": [
Expand Down
24 changes: 17 additions & 7 deletions Codexes2Gemini/ui/streamlit_ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
from Codexes2Gemini.classes.Utilities.utilities import configure_logger
from Codexes2Gemini.classes.user_space import UserSpace, save_user_space, load_user_space
from Codexes2Gemini import __version__, __announcements__
from Codexes2Gemini.ui.multi_context_page import MultiContextUI as MCU


logger = configure_logger("DEBUG")
Expand Down Expand Up @@ -449,6 +450,7 @@ def multiplan_builder(user_space: UserSpace):
maximum_output_tokens = 10000000
minimum_required_output = False
minimum_required_output_tokens = 50
require_json_output = st.checkbox("Require JSON Output", value=False)

with st.expander("Set Output Destinations"):
thisdoc_dir = st.text_input("Output directory", value=os.path.join(os.getcwd(), 'output', 'c2g'))
Expand Down Expand Up @@ -477,6 +479,7 @@ def multiplan_builder(user_space: UserSpace):
"minimum_required_output": minimum_required_output,
"minimum_required_output_tokens": minimum_required_output_tokens,
"log_level": log_level,
"require_json_output": require_json_output
})
st.session_state.multiplan.append(st.session_state.current_plan)
st.success(f"Plan '{plan_name}' added to multiplan")
Expand Down Expand Up @@ -556,6 +559,9 @@ def run_multiplan(multiplan, user_space):
'complete_system_instruction': plan['complete_system_instruction'],
'selected_system_instructions': plan['selected_system_instruction_keys'],
'selected_user_prompts_dict': plan['selected_user_prompts_dict'],
'user_prompts_dict': plan['user_prompts_dict'],
'log_level': plan['log_level'],
'require_json_output': plan['require_json_output'],
}

try:
Expand Down Expand Up @@ -898,15 +904,19 @@ def run_streamlit_app():
user_space = UserSpace()
save_user_space(user_space)

tab1, tab2, tab4 = st.tabs(["Create Build Plans", "Run Saved Plans", "UserSpace"])

with tab1:
# Create pages using st.sidebar.selectbox
page = st.sidebar.selectbox(
"Select a page",
["Create Build Plans", "Run Saved Plans", "Multi-Context Processing", "UserSpace"],
)
if page == "Create Build Plans":
multiplan_builder(user_space)

with tab2:
elif page == "Run Saved Plans":
upload_build_plan()

with tab4:
elif page == "Multi-Context Processing":
multi_context_app = MCU(user_space)
multi_context_app.render()
elif page == "UserSpace":
user_space_app(user_space)


Expand Down
19 changes: 19 additions & 0 deletions Codexes2Gemini/ui/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import json
from importlib import resources

import streamlit as st


def filter_dict(dictionary, filter_text):
return {k: v for k, v in dictionary.items() if
filter_text.lower() in k.lower() or (
isinstance(v, dict) and filter_text.lower() in v.get('prompt', '').lower())}


def load_json_file(file_name):
try:
with resources.files('Codexes2Gemini.resources.prompts').joinpath(file_name).open('r') as file:
return json.load(file)
except Exception as e:
st.error(f"Error loading JSON file: {e}")
return {}

0 comments on commit 4f70a8a

Please sign in to comment.