Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

简易支持插件,添加sdwebui(novelai画图), godcmd(管理员指令增强)插件,Banwords(敏感词过滤)插件 #442

Merged
merged 21 commits into from
Mar 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ config.json
QR.png
nohup.out
tmp
plugins.json
7 changes: 5 additions & 2 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,17 @@
from channel import channel_factory
from common.log import logger


from plugins import *
if __name__ == '__main__':
try:
# load config
config.load_config()

# create channel
channel = channel_factory.create_channel("wx")
channel_name='wx'
channel = channel_factory.create_channel(channel_name)
if channel_name=='wx':
PluginManager().load_plugins()

# startup channel
channel.startup()
Expand Down
4 changes: 3 additions & 1 deletion bot/baidu/baidu_unit_bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import requests
from bot.bot import Bot
from bridge.reply import Reply, ReplyType


# Baidu Unit对话接口 (可用, 但能力较弱)
Expand All @@ -14,7 +15,8 @@ def reply(self, query, context=None):
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(url, data=post_data.encode(), headers=headers)
if response:
return response.json()['result']['context']['SYS_PRESUMED_HIST'][1]
reply = Reply(ReplyType.TEXT, response.json()['result']['context']['SYS_PRESUMED_HIST'][1])
return reply

def get_token(self):
access_key = 'YOUR_ACCESS_KEY'
Expand Down
6 changes: 5 additions & 1 deletion bot/bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,12 @@
"""


from bridge.context import Context
from bridge.reply import Reply


class Bot(object):
def reply(self, query, context=None):
def reply(self, query, context : Context =None) -> Reply:
"""
bot auto-reply content
:param req: received message
Expand Down
114 changes: 66 additions & 48 deletions bot/chatgpt/chat_gpt_bot.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,42 @@
# encoding:utf-8

from bot.bot import Bot
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from config import conf, load_config
from common.log import logger
from common.expired_dict import ExpiredDict
import openai
import time

if conf().get('expires_in_seconds'):
all_sessions = ExpiredDict(conf().get('expires_in_seconds'))
else:
all_sessions = dict()

# OpenAI对话模型API (可用)
class ChatGPTBot(Bot):
def __init__(self):
openai.api_key = conf().get('open_ai_api_key')
proxy = conf().get('proxy')
self.sessions = SessionManager()
if proxy:
openai.proxy = proxy

def reply(self, query, context=None):
# acquire reply content
if not context or not context.get('type') or context.get('type') == 'TEXT':
if context.type == ContextType.TEXT:
logger.info("[OPEN_AI] query={}".format(query))
session_id = context.get('session_id') or context.get('from_user_id')
session_id = context['session_id']
reply = None
if query == '#清除记忆':
Session.clear_session(session_id)
return '记忆已清除'
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, '记忆已清除')
elif query == '#清除所有':
Session.clear_all_session()
return '所有人记忆已清除'
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, '所有人记忆已清除')
elif query == '#更新配置':
load_config()
return '配置已更新'

session = Session.build_session_query(query, session_id)
reply = Reply(ReplyType.INFO, '配置已更新')
if reply:
return reply
session = self.sessions.build_session_query(query, session_id)
logger.debug("[OPEN_AI] session query={}".format(session))

# if context.get('stream'):
Expand All @@ -44,14 +45,29 @@ def reply(self, query, context=None):

reply_content = self.reply_text(session, session_id, 0)
logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}".format(session, session_id, reply_content["content"]))
if reply_content["completion_tokens"] > 0:
Session.save_session(reply_content["content"], session_id, reply_content["total_tokens"])
return reply_content["content"]

elif context.get('type', None) == 'IMAGE_CREATE':
return self.create_img(query, 0)
if reply_content['completion_tokens'] == 0 and len(reply_content['content']) > 0:
reply = Reply(ReplyType.ERROR, reply_content['content'])
elif reply_content["completion_tokens"] > 0:
self.sessions.save_session(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content['content'])
logger.debug("[OPEN_AI] reply {} used 0 tokens.".format(reply_content))
return reply

elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
else:
reply = Reply(ReplyType.ERROR, 'Bot不支持处理{}类型的消息'.format(context.type))
return reply

def reply_text(self, session, session_id, retry_count=0) ->dict:
def reply_text(self, session, session_id, retry_count=0) -> dict:
'''
call openai's ChatCompletion to get the answer
:param session: a conversation session
Expand All @@ -70,8 +86,8 @@ def reply_text(self, session, session_id, retry_count=0) ->dict:
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
return {"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response.choices[0]['message']['content']}
except openai.error.RateLimitError as e:
# rate limit exception
Expand All @@ -86,15 +102,15 @@ def reply_text(self, session, session_id, retry_count=0) ->dict:
# api connection exception
logger.warn(e)
logger.warn("[OPEN_AI] APIConnection failed")
return {"completion_tokens": 0, "content":"我连接不到你的网络"}
return {"completion_tokens": 0, "content": "我连接不到你的网络"}
except openai.error.Timeout as e:
logger.warn(e)
logger.warn("[OPEN_AI] Timeout")
return {"completion_tokens": 0, "content":"我没有收到你的消息"}
return {"completion_tokens": 0, "content": "我没有收到你的消息"}
except Exception as e:
# unknown exception
logger.exception(e)
Session.clear_session(session_id)
self.sessions.clear_session(session_id)
return {"completion_tokens": 0, "content": "请再问我一次吧"}

def create_img(self, query, retry_count=0):
Expand All @@ -107,22 +123,29 @@ def create_img(self, query, retry_count=0):
)
image_url = response['data'][0]['url']
logger.info("[OPEN_AI] image_url={}".format(image_url))
return image_url
return True, image_url
except openai.error.RateLimitError as e:
logger.warn(e)
if retry_count < 1:
time.sleep(5)
logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.create_img(query, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
return False, "提问太快啦,请休息一下再问我吧"
except Exception as e:
logger.exception(e)
return None
return False, str(e)


class SessionManager(object):
def __init__(self):
if conf().get('expires_in_seconds'):
sessions = ExpiredDict(conf().get('expires_in_seconds'))
else:
sessions = dict()
self.sessions = sessions

class Session(object):
@staticmethod
def build_session_query(query, session_id):
def build_session_query(self, query, session_id):
'''
build query with conversation history
e.g. [
Expand All @@ -135,36 +158,33 @@ def build_session_query(query, session_id):
:param session_id: session id
:return: query content with conversaction
'''
session = all_sessions.get(session_id, [])
session = self.sessions.get(session_id, [])
if len(session) == 0:
system_prompt = conf().get("character_desc", "")
system_item = {'role': 'system', 'content': system_prompt}
session.append(system_item)
all_sessions[session_id] = session
self.sessions[session_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session

@staticmethod
def save_session(answer, session_id, total_tokens):
def save_session(self, answer, session_id, total_tokens):
max_tokens = conf().get("conversation_max_tokens")
if not max_tokens:
# default 3000
max_tokens = 1000
max_tokens=int(max_tokens)
max_tokens = int(max_tokens)

session = all_sessions.get(session_id)
session = self.sessions.get(session_id)
if session:
# append conversation
gpt_item = {'role': 'assistant', 'content': answer}
session.append(gpt_item)

# discard exceed limit conversation
Session.discard_exceed_conversation(session, max_tokens, total_tokens)

self.discard_exceed_conversation(session, max_tokens, total_tokens)

@staticmethod
def discard_exceed_conversation(session, max_tokens, total_tokens):
def discard_exceed_conversation(self, session, max_tokens, total_tokens):
dec_tokens = int(total_tokens)
# logger.info("prompt tokens used={},max_tokens={}".format(used_tokens,max_tokens))
while dec_tokens > max_tokens:
Expand All @@ -173,13 +193,11 @@ def discard_exceed_conversation(session, max_tokens, total_tokens):
session.pop(1)
session.pop(1)
else:
break
break
dec_tokens = dec_tokens - max_tokens

@staticmethod
def clear_session(session_id):
all_sessions[session_id] = []
def clear_session(self, session_id):
self.sessions[session_id] = []

@staticmethod
def clear_all_session():
all_sessions.clear()
def clear_all_session(self):
self.sessions.clear()
47 changes: 25 additions & 22 deletions bot/openai/open_ai_bot.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# encoding:utf-8

from bot.bot import Bot
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from config import conf
from common.log import logger
import openai
Expand All @@ -13,30 +15,31 @@ class OpenAIBot(Bot):
def __init__(self):
openai.api_key = conf().get('open_ai_api_key')


def reply(self, query, context=None):
# acquire reply content
if not context or not context.get('type') or context.get('type') == 'TEXT':
logger.info("[OPEN_AI] query={}".format(query))
from_user_id = context.get('from_user_id') or context.get('session_id')
if query == '#清除记忆':
Session.clear_session(from_user_id)
return '记忆已清除'
elif query == '#清除所有':
Session.clear_all_session()
return '所有人记忆已清除'

new_query = Session.build_session_query(query, from_user_id)
logger.debug("[OPEN_AI] session query={}".format(new_query))

reply_content = self.reply_text(new_query, from_user_id, 0)
logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
if reply_content and query:
Session.save_session(query, reply_content, from_user_id)
return reply_content

elif context.get('type', None) == 'IMAGE_CREATE':
return self.create_img(query, 0)
if context and context.type:
if context.type == ContextType.TEXT:
logger.info("[OPEN_AI] query={}".format(query))
from_user_id = context['session_id']
reply = None
if query == '#清除记忆':
Session.clear_session(from_user_id)
reply = Reply(ReplyType.INFO, '记忆已清除')
elif query == '#清除所有':
Session.clear_all_session()
reply = Reply(ReplyType.INFO, '所有人记忆已清除')
else:
new_query = Session.build_session_query(query, from_user_id)
logger.debug("[OPEN_AI] session query={}".format(new_query))

reply_content = self.reply_text(new_query, from_user_id, 0)
logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
if reply_content and query:
Session.save_session(query, reply_content, from_user_id)
reply = Reply(ReplyType.TEXT, reply_content)
return reply
elif context.type == ContextType.IMAGE_CREATE:
return self.create_img(query, 0)

def reply_text(self, query, user_id, retry_count=0):
try:
Expand Down
40 changes: 33 additions & 7 deletions bridge/bridge.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,42 @@
from bridge.context import Context
from bridge.reply import Reply
from common.log import logger
from bot import bot_factory
from common.singleton import singleton
from voice import voice_factory


@singleton
class Bridge(object):
def __init__(self):
pass
self.btype={
"chat": "chatGPT",
"voice_to_text": "openai",
"text_to_voice": "baidu"
}
self.bots={}

def fetch_reply_content(self, query, context):
return bot_factory.create_bot("chatGPT").reply(query, context)
def get_bot(self,typename):
if self.bots.get(typename) is None:
logger.info("create bot {} for {}".format(self.btype[typename],typename))
if typename == "text_to_voice":
self.bots[typename] = voice_factory.create_voice(self.btype[typename])
elif typename == "voice_to_text":
self.bots[typename] = voice_factory.create_voice(self.btype[typename])
elif typename == "chat":
self.bots[typename] = bot_factory.create_bot(self.btype[typename])
return self.bots[typename]

def get_bot_type(self,typename):
return self.btype[typename]

def fetch_voice_to_text(self, voiceFile):
return voice_factory.create_voice("openai").voiceToText(voiceFile)

def fetch_text_to_voice(self, text):
return voice_factory.create_voice("baidu").textToVoice(text)
def fetch_reply_content(self, query, context : Context) -> Reply:
return self.get_bot("chat").reply(query, context)

def fetch_voice_to_text(self, voiceFile) -> Reply:
return self.get_bot("voice_to_text").voiceToText(voiceFile)

def fetch_text_to_voice(self, text) -> Reply:
return self.get_bot("text_to_voice").textToVoice(text)

Loading