-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscene_generator_webui.py
144 lines (117 loc) · 5.33 KB
/
scene_generator_webui.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import re
import os
import copy
import openai
import gradio as gr
os.environ["http_proxy"] = "http://127.0.0.1:7890"
os.environ["https_proxy"] = "http://127.0.0.1:7890"
os.environ["socks_proxy"] = "http://127.0.0.1:7890"
openai.api_base = "https://api.ai-yyds.com/v1"
openai.api_key = os.getenv(
"OPENAI_KEY", default="sk-GM3AyFSCFHwbJdnC4c1a2637E4Bf4433AcFcAc8c3e976cFe"
)
# GPT-4 sk-GM3AyFSCFHwbJdnC4c1a2637E4Bf4433AcFcAc8c3e976cFe
# GPT-3.5 danielstea1d@outlook.com----a1bJSl028ia1bJSl028i----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
class CodeGenerator:
def __init__(
self,
role="robot",
file_path=None,
preprompt=None,
model="gpt-3.5-turbo",
oncecall=False,
):
"""
通过file或者str初始化llm的prompt, file的优先级高
"""
robot_role = "You are a desktop robotic arm with 6 degrees of freedom and a gripper end effector. You need to understand my instructions and assist me in completing the assembly of the parts."
scene_role = "You should act as an scene detector used to detect new scene observations after a desktop robot completes its actions."
valid_role = "You need to act as a validator and answer the validation questions based on given robot code."
if role == "robot":
role = robot_role
elif role == "scene":
role = scene_role
elif role == "valid":
role = valid_role
else:
role = ""
self.model = model
self.file_path = file_path
self.preprompt = preprompt
self.system = [
{
"role": "system",
"content": role,
},
]
self.history = copy.deepcopy(self.system)
if file_path:
with open(file_path, "r", encoding="utf-8") as file:
self.preprompt = file.read()
if oncecall:
self.get_llm_response(user_input=self.preprompt)
def get_llm_response(self, user_input=None):
"""
获取llm的反馈api
"""
if user_input:
self.history.append({"role": "user", "content": user_input})
completion = openai.ChatCompletion.create(
model=self.model, # model="gpt-3.5-turbo",
messages=self.history, # prompt
temperature=0.2, # 0~2, 数字越大越有想象空间, 越小答案越确定
n=1, # 生成的结果数
# top_p=0.1, # 结果采样策略,0.1只采样前10%可能性的结果
# presence_penalty=0, # 主题的重复度 default 0, between -2 and 2. 控制围绕主题程度,越大越可能谈论新主题。
# frequency_penalty=0, # 重复度惩罚因子 default 0, between -2 and 2. 减少重复生成的字。
# stream=False,
# logprobs=1, # Modify the likelihood of specified tokens appearing in the completion.
# stop="\n" # 结束字符标记
)
answer = completion.choices[0].message.content
self.history.append({"role": "assistant", "content": answer})
# print(f"ChatGPT: {answer}")
return answer
def clear_history(self):
"""
clear the history
"""
print("CLEAR")
self.history = copy.deepcopy(self.system)
self.get_llm_response(user_input=self.preprompt)
# only scene generation
# codeg = CodeGenerator(role="scene",file_path="src/workspaces/scene_description_prompt.yml", model="gpt-3.5-turbo", oncecall=True)
# codeg = CodeGenerator(role="scene",file_path="src/workspaces/scene_description_prompt.yml", model="gpt-4-0613", oncecall=True)
# code & scene generation
codeg = CodeGenerator(role="robot",file_path="experiments/prompts/cot_3shot_sot_scene.yml", model="gpt-4-0613", oncecall=True)
# codeg = CodeGenerator(role="robot",file_path="eval/experiments/prompts/cot_3shot_comment_scene.yml", model="gpt-4-0613", oncecall=True)
# codeg = CodeGenerator(role="valid", file_path="eval/prompts/validation_prompt.yml", model="gpt-3.5-turbo", oncecall=True)
# codeg = CodeGenerator(role="valid", file_path="eval/prompts/validation_prompt.yml", model="gpt-4-0613", oncecall=True)
# codeg = CodeGenerator()
def write_to_file(question, answer):
result = ""
answer = re.sub(r'\n\s*\n', '\n', answer)
result += question + '\n' + answer + '\n\n'
with open("experiments/tmp.yml", "a") as f:
f.write(result)
def answer(question, history=[]):
history.append(question)
message = codeg.get_llm_response(question)
write_to_file(question, message)
history.append(message)
responses = [(u, b) for u, b in zip(history[::2], history[1::2])]
# print(responses)
return responses, history
with gr.Blocks(css="#chatbot{height:1000px}!important") as demo:
chatbot = gr.Chatbot(elem_id="chatbot", label="Assembly Helper")
state = gr.State([])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Please input your instrutions...").style(container=False)
with gr.Column(scale=16, min_width=0):
clear = gr.Button("Clear")
txt.submit(answer, [txt, state], [chatbot, state])
# txt.submit(lambda: None, None, txt)
clear.click(codeg.clear_history)
clear.click(lambda: [], None, chatbot)
clear.click(lambda: [], None, state)
demo.launch()