-
Notifications
You must be signed in to change notification settings - Fork 0
/
prompt_refiner.py
46 lines (41 loc) · 2.56 KB
/
prompt_refiner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# Set your OpenAI API key as an environment variable
import os
import openai
from openai import OpenAI
import time
def prompt_refiner(key):
client = OpenAI(api_key=key)
class ChatCompletionMessage:
def __init__(self, content, role, function_call=None, tool_calls=None):
self.content = content
self.role = role
self.function_call = function_call
self.tool_calls = tool_calls
def get_text_content(self):
# Return only the text content
return self.content
# Initialize the OpenAI client
client = openai.OpenAI()
with open("./output_text_visual.txt", 'r') as file:
image_prompts = file.read()
# Your code for interacting with the OpenAI API goes here
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "you will take in a python command, and edit the prompt in the following way. i want you to silently count how many abstract, unimageable (imageable = if put into an image generator AI, it generates a corresponding visual for that word) words are in the image prompt text. your job is to regenerate the python command (with the same command format) but with as close to 0 an abstract-word number as possible in the text prompt. use associations to help you here--if the word is neurofeedback, use words like brain, electrodes, television_show, movie_screen, ocean_waves, etc. your output will be parsed as raw code, so do not include any: commentary, numeration. do not numerate, do not use bullet pts, etc. separate your output commands with new lines only, be very careful so that it parses as correct code. also please focus on replacing on image able words with image able associations. please make the proms very short and direct, and always include the word 'photorealistic' in responses"},
{"role": "user", "content": str(image_prompts)}
]
)
# Extract the actual content generated by GPT-3.5 Turbo
generated_message = completion.choices[0].message
generated_message_object = ChatCompletionMessage(
content=generated_message.content,
role='assistant' # Assuming the role is 'assistant'
)
# Print out the text that will be read
text_content = generated_message_object.get_text_content()
# Save the text content to a file
with open('output_text_visual_refined.txt', 'w') as file:
file.write(text_content)
print("Text to be read:", text_content)
return text_content