From 22ac4b0d5e13fc20d05bcd690a8af40d792b92c1 Mon Sep 17 00:00:00 2001 From: winter <664946893@qq.com> Date: Fri, 5 May 2023 14:37:13 +0800 Subject: [PATCH] add python --- python/config.py | 23 ++++++++ python/gptAgent.py | 90 ++++++++++++++++++++++++++++++ python/server.py | 134 +++++++++++++++++++++++++++++++++++++++++++++ python/start.sh | 4 ++ python/stop.sh | 16 ++++++ 5 files changed, 267 insertions(+) create mode 100644 python/config.py create mode 100644 python/gptAgent.py create mode 100644 python/server.py create mode 100644 python/start.sh create mode 100644 python/stop.sh diff --git a/python/config.py b/python/config.py new file mode 100644 index 0000000..08282cf --- /dev/null +++ b/python/config.py @@ -0,0 +1,23 @@ +#! /usr/bin/python +#服务配置### +agentServer = { + 'port':9999, + 'threads':1, + 'agentApiKey':'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' +} + +####过滤### +agentFilterMoel = { + 'isOpen':True, + 'filterKeys':['Gpt','gPt','gpT','GPt','GpT','GPT','gpt','Openai','openAI','oPENAI','OpEnAi','OPENAI','oPenAi','OpENAI','openai','oPEnAi','OpEnai','oPENai','OPENai','OpENai','openAi','oPENAi','OpENaI','OPENaI','oPEnaI','OpEnAI','OPENAI','OpenAI','OpenAi'], + 'oneMessageSplit':[',','.',',','。','?','?','!','!'] +} + +###上下文#### +agentTextual = { + 'isOpen':False, + 'talkCounts':3, + 'resultCounts':3 +} + + diff --git a/python/gptAgent.py b/python/gptAgent.py new file mode 100644 index 0000000..47193dc --- /dev/null +++ b/python/gptAgent.py @@ -0,0 +1,90 @@ +#! /usr/bin/python +import tornado.web +import tornado.ioloop +import tornado.httpserver +import tornado.options +import os +import datetime +import openai +import asyncio +from tornado.web import RequestHandler +from tornado.options import define,options +from tornado.websocket import WebSocketHandler +import openai_async +import config +import re + +openai.api_key = config.agentServer['agentApiKey']; + +def clearContent(content): + for ck in config.agentFilterMoel['filterKeys']: + content = content.replace(ck,'SmartChat') + return content + + +async def talkGPTWithStream(self,message): + print("{} says {}".format(self.request.remote_ip,message)) + try: + #currentLineContents='' + #currentIsSend=False + async for chunk in await openai.ChatCompletion.acreate( + model='gpt-3.5-turbo', + #model='gpt-4-0314', + messages=[{'role': 'user', 'content': message}], + temperature=0.8, + #max_tokens=400, + stream=True # this time, we set stream=True + ): + content = chunk["choices"][0].get("delta", {}).get("content") + role_conent = chunk["choices"][0].get("delta", {}).get("role") + #print("response {}",content) + if config.agentFilterMoel['isOpen']: + if content is not None: + ###如果是结尾 + if content in config.agentFilterMoel['oneMessageSplit']: + self.my_replay_content = self.my_replay_content+content + self.currentIsSend=True + else: + self.my_replay_content = self.my_replay_content + content + self.currentIsSend=False + + if self.currentIsSend: + ####过滤关键字 + cContent = clearContent(self.my_replay_content) + self.my_replay_content=''; + await self.write_message(cContent) + #await self.write_message(currentLineContents) + else: + if role_conent is not None: + print('') + else: + await self.write_message("___talk_end___") + else: + if content is not None: + await self.write_message(content) + else: + if role_conent is not None: + print('') + else: + await self.write_message("___talk_end___") + except Exception as es: + print('onMessage error',es) + self.close() + + +async def talkGPT(self,message): + print("{} says {}".format(self.request.remote_ip,message)) + response = await openai.Completion.acreate( + model="text-davinci-003", + prompt= message, + temperature=0, + max_tokens=4000, + top_p=1, + frequency_penalty=0.5, + presence_penalty=0 + ) + #print(f"response:{response}") + response_text = response['choices'][0]['text'] + await self.write_message(response_text+"___talk_end___") + + diff --git a/python/server.py b/python/server.py new file mode 100644 index 0000000..77b6e09 --- /dev/null +++ b/python/server.py @@ -0,0 +1,134 @@ +#! /usr/bin/python +import tornado.web +import tornado.ioloop +import tornado.httpserver +import tornado.options +import os +import datetime +import openai +import asyncio +from tornado.web import RequestHandler +from tornado.options import define,options +from tornado.websocket import WebSocketHandler +import openai_async +import config +import gptAgent + +#define("port",default=9999,type=int) + +class IndexHandler(RequestHandler): + def get(self): + self.render("index.html") + +class ChatHandler(WebSocketHandler): + + users = set() #用来存在在线用户的容器 + + def open(self): + print('进来了') + self.users.add(self) #建立连接后添加到用户容器中 + for u in self.users: + print("user count{}".format(len(self.users))) + u.write_message(u"[%s]-[%s]-进入" % (self.request.remote_ip,datetime.datetime.now().strftime("%d/%m/%Y"))) + print("{} enter".format(self.request.remote_ip)) + + def on_message(self,message): + for u in self.users: # 向所有用户广播消息 + u.write_message(u"[%s]-[%s]-说:%s" % (self.request.remote_ip,datetime.datetime.now().strftime("%d/%m/%Y"),message)) + print("{} says {}".format(self.request.remote_ip,message)) + + def on_close(self): + print('离开了') + self.users.remove(self) + for u in self.users: + u.write_message(u"[%s]-[%s]-离开" % (self.request.remote_ip,datetime.datetime.now().strftime("%d/%m/%Y"))) + print("{} leave".format(self.request.remote_ip)) + + def check_orign(self,orign): + #接受所有跨域 + return True + + # parsed_orign = urllib.parse.urlparse(orign) + # return parsed_orign.netloc.endswith(".malls.iformall.com") + +#openai.api_key = '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'; +class gptAgentStreamHandler(WebSocketHandler): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.my_replay_content = "" + self.currentIsSend=False + self.currentIsAll=False + + async def open(self): + print("{} connect".format(self.request.remote_ip)) + + async def on_message(self,message): + await gptAgent.talkGPTWithStream(self,message) + def on_close(self): + print("{} leave".format(self.request.remote_ip)) + + async def check_orign(self,orign): + #接受所有跨域 + return True + + # parsed_orign = urllib.parse.urlparse(orign) + # return parsed_orign.netloc.endswith(".malls.iformall.com") + +class gptAgentHandler(WebSocketHandler): + + async def open(self): + print("{} connect".format(self.request.remote_ip)) + + async def on_message(self,message): + await gptAgent.talkGPT(self,message) + + def on_close(self): + print("{} leave".format(self.request.remote_ip)) + + async def check_orign(self,orign): + #接受所有跨域 + return True + + # parsed_orign = urllib.parse.urlparse(orign) + # return parsed_orign.netloc.endswith(".malls.iformall.com") + +async def main(): + tornado.options.parse_command_line() + app = tornado.web.Application([ + (r"/index",IndexHandler), + (r"/wss/chat",ChatHandler), + #(r"/",gptAgentHandler) + (r"/",gptAgentStreamHandler) + ], + static_path = os.path.join(os.path.dirname(__file__),"static"), + template_path = os.path.join(os.path.dirname(__file__),"template"), + debug = True + ) + http_server = tornado.httpserver.HTTPServer(app,xheaders=True) + #http_server.listen(options.port) + http_server.bind(config.agentServer['port']) + http_server.start(config.agentServer['threads']) + await asyncio.Event().wait() + +if __name__ == '__main__': + asyncio.run(main()) + +#if __name__ == '__main__': + #tornado.options.parse_command_line() + #app = tornado.web.Application([ + # (r"/index",IndexHandler), + # (r"/wss/chat",ChatHandler), + # #(r"/",gptAgentHandler) + # (r"/",gptAgentStreamHandler) + # ], + # static_path = os.path.join(os.path.dirname(__file__),"static"), + # template_path = os.path.join(os.path.dirname(__file__),"template"), + # debug = True + #) + #http_server = tornado.httpserver.HTTPServer(app,xheaders=True) + ##http_server.listen(options.port) + #http_server.bind(options.port) + #http_server.start(1) + #tornado.ioloop.IOLoop.current().start() + diff --git a/python/start.sh b/python/start.sh new file mode 100644 index 0000000..7b66d15 --- /dev/null +++ b/python/start.sh @@ -0,0 +1,4 @@ +#!/bin/bash +nohup ./server.py > nohup.log 2>&1 & + + diff --git a/python/stop.sh b/python/stop.sh new file mode 100644 index 0000000..05deb23 --- /dev/null +++ b/python/stop.sh @@ -0,0 +1,16 @@ +#!/bin/bash +PID=`ps -ef | grep /usr/bin/python | grep server.py | awk '{print \$2}'` +echo "ps -ef | grep /usr/bin/python | grep server.py | awk '{print \$2}'"; +echo "KILL PID: $PID"; + if [ "$PID"x != ""x ]; then + kill -9 $PID + if [ $? -eq 0 ]; then + echo "Kill $1 SUCCESS" + else + echo "Kill $1 FAILED" + exit 1; + fi + fi + + +