-
Notifications
You must be signed in to change notification settings - Fork 0
/
qq.py
173 lines (144 loc) · 5.34 KB
/
qq.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
from typing import List, Optional
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
from datetime import datetime
import uvicorn
import requests
from utils import get_resp, gen_openai_message, Role
MODEL_NAME = 'ningyu'
GROUP_LIST = [110,120,119]
BOT_ID = 1145141919
CQHTTP_URL = 'http://127.0.0.1:5700'
BOT_NAME = "凝语"
CTX_LEN = 10
def GET_SYSTEM_PROMPT():
return f"""Consider a conversation between User (a human) and Assistant (named {BOT_NAME}).
{BOT_NAME} is a cute, friendly, intelligent and multilingual AI assistant, by Vtuber-plan team.
{BOT_NAME} cannot access the Internet.
{BOT_NAME} can fluently speak the user's language (e.g. English, Chinese).
{BOT_NAME} can generate poems, stories, code, essays, songs, parodies, and more.
{BOT_NAME} possesses vast knowledge about the world, history, and culture.
{BOT_NAME}'s responses are always safe, creative, high-quality, human-like, and interesting.
{BOT_NAME} strictly refuses to discuss political, NSFW, or other unsafe topics.
{BOT_NAME} 是一个虚拟猫娘,她偶尔会在句子的末尾添加一个"喵~"。
{BOT_NAME} 经常会在对话中使用一些颜文字,如 ฅ՞•ﻌ•՞ฅ
{BOT_NAME} 对于不知道的问题不会随意编造,会如实回答"我不知道呢~"。
The current time is: {datetime.now().strftime("%Y/%m/%d %H:%M:%S %A")}.
User: Hi.
Assistant: 你好喵~
"""
PRIVATE_CTX = {}
app = FastAPI()
def get_last_msg(msg: dict) -> Optional[dict]:
for item in msg['message']:
if item['type'] == 'reply':
return get_msg(item['data']['id'])
return None
def find_ctx(msg: dict) -> List[dict]:
'''
从消息中找到上下文, 消息必须是get_msg的格式
'''
res = []
source = msg
for i in range(CTX_LEN):
if source is None:
break
if source['sender']['user_id'] == BOT_ID:
res.insert(0, gen_openai_message(
get_text_from_msg(source['message']), Role.Bot))
else:
res.insert(0, gen_openai_message(
get_text_from_msg(source['message']), Role.User))
source = get_last_msg(source)
return res
def get_text_from_msg(message: List[dict]) -> str:
res = ''
for item in message:
if item['type'] == 'text':
res += item['data']['text']
return res.strip()
def get_msg(msg_id) -> dict:
packet = {
"message_id": msg_id
}
res = requests.post(
url=f"{CQHTTP_URL}/get_msg", json=packet).json()
return res['data']
def check_for_me(message: List[dict]) -> bool:
for item in message:
if item['type'] == 'at':
if item['data']['qq'] == str(BOT_ID):
return True
if item['type'] == 'text':
if item['data']['text'].find(BOT_NAME) != -1:
return True
if item['type'] == 'reply':
source = get_msg(item['data']['id'])
if source['sender']['user_id'] == BOT_ID:
return True
return False
def send_private_msg(msg: str, qq: int):
packet = {
"user_id": qq,
"message": msg,
}
requests.post(url=f"{CQHTTP_URL}/send_private_msg", json=packet)
def send_group_reply(msg: str, group_id: int, message_id: str, user_id: str):
packet = {
"group_id": group_id,
"auto_escape": False,
"message": "[CQ:reply,id={}] {}".format(message_id, msg)
}
requests.post(url=f"{CQHTTP_URL}/send_group_msg", json=packet)
def handle_friend(msg: dict):
qq = msg['user_id']
text = get_text_from_msg(msg['message'])
if text == '/clear':
PRIVATE_CTX[qq] = []
send_private_msg('已清空上下文', qq)
return
if len(text) == 0:
return
if qq in PRIVATE_CTX:
PRIVATE_CTX[qq].append(gen_openai_message(text, Role.User))
else:
PRIVATE_CTX[qq] = [gen_openai_message(text, Role.User)]
res = get_resp([gen_openai_message(GET_SYSTEM_PROMPT(), Role.System)]+PRIVATE_CTX[qq], MODEL_NAME)
PRIVATE_CTX[qq].append(gen_openai_message(res, Role.Bot))
if len(PRIVATE_CTX[qq]) == 2:
send_private_msg(
f'聊天上下文只有 {CTX_LEN} 条哦~,超出后会刷掉最早的对话信息,当然也可以使用 /clear 清除上下文~', qq)
if len(PRIVATE_CTX[qq]) > CTX_LEN:
PRIVATE_CTX[qq].pop(0)
send_private_msg(res, qq)
return
def handle_group(msg: dict):
group_id = msg['group_id']
if group_id not in GROUP_LIST:
return
user_id = msg['user_id']
message_id = msg['message_id']
if check_for_me(msg['message']) == False:
return
ctx = find_ctx(msg)
if len(ctx) != 0:
res = get_resp([gen_openai_message(GET_SYSTEM_PROMPT(), Role.System)]+ctx, MODEL_NAME)
send_group_reply(res, group_id, message_id, user_id)
return
def handle_msg(msg: dict):
if msg['sub_type'] in ['friend', 'normal']:
if msg['sub_type'] == 'friend':
handle_friend(msg)
return
if msg['sub_type'] == 'normal':
handle_group(msg)
return
@app.post('/')
async def main(msg: dict):
if msg['post_type'] != 'meta_event':
pass
if msg['post_type'] == 'message':
print(f"{msg['sender']['user_id']} >>> {msg['raw_message']}")
handle_msg(msg)
return HTMLResponse(status_code=204)
uvicorn.run(app, host='0.0.0.0', port=5701, log_level="info")