@@ -0,0 +1,112 @@ | |||||
#! /usr/bin/python | |||||
import tornado.web | |||||
import tornado.ioloop | |||||
import tornado.httpserver | |||||
import tornado.options | |||||
import os | |||||
import datetime | |||||
import openai | |||||
import asyncio | |||||
from tornado.web import RequestHandler | |||||
from tornado.options import define,options | |||||
from tornado.websocket import WebSocketHandler | |||||
import openai_async | |||||
import config | |||||
import json | |||||
import traceback | |||||
import aiohttp | |||||
def clearBaiChuanContent(content): | |||||
for ck in config.agentBaiChuanFilterMoel['filterKeys']: | |||||
content = content.replace(ck,'xx') | |||||
return content | |||||
async def talkBaiChuanWithStream(self,message): | |||||
print("{} says {}".format(self.request.remote_ip,message)) | |||||
#url = "https://api.baichuan-ai.com/v1/chat/completions" | |||||
#api_key = config.agentServer['agentBaiChuanApiKey'] | |||||
url = "https://api.baichuan-ai.com/v1/chat/completions" | |||||
api_key = "9f7d5847897f8090195cdc2c3249d0a7" | |||||
try: | |||||
_msg = json.loads(message) | |||||
try: | |||||
if 'model' in _msg : | |||||
_baichuanModel = _msg['model'] | |||||
_real_msg = _msg['messages'] | |||||
else: | |||||
_baichuanModel = 'Baichuan2' | |||||
_real_msg = _msg | |||||
except Exception as gmex: | |||||
_baichuanModel = 'Baichuan2' | |||||
_real_msg = _msg | |||||
_data = { | |||||
"model": _baichuanModel, | |||||
"messages": _real_msg, | |||||
"stream": True | |||||
} | |||||
#json_data = json.dumps(_data) | |||||
headers = { | |||||
"Content-Type": "application/json", | |||||
"Authorization": "Bearer " + api_key | |||||
} | |||||
#http_client = tornado.httpclient.AsyncHTTPClient() | |||||
async with aiohttp.ClientSession() as http_client: | |||||
async with http_client.post(url,headers=headers,json=_data) as resp: | |||||
isDone=False | |||||
while True: | |||||
chunk = await resp.content.read(2048) | |||||
if not chunk: | |||||
break | |||||
chunk_str = chunk.decode() | |||||
#print('chunk_str {}',chunk_str) | |||||
con_list = chunk_str.strip('\n').split('data:') | |||||
for con in con_list: | |||||
con = con.strip() | |||||
if con == "": | |||||
continue | |||||
elif con == '[DONE]': | |||||
isDone=True | |||||
break | |||||
else: | |||||
con_dict = json.loads(con) | |||||
for con_cho in con_dict['choices']: | |||||
self.my_replay_content=self.my_replay_content+con_cho['delta']['content'] | |||||
#print('resp',resp) | |||||
#print("1111111111111111111") | |||||
print("response {}",self.my_replay_content) | |||||
if config.agentBaiChuanFilterMoel['isOpen']: | |||||
if resp is not None: | |||||
cContent = clearBaiChuanContent(self.my_replay_content) | |||||
self.my_replay_content='' | |||||
if isDone: | |||||
cContent = cContent+"___talk_end___" | |||||
await self.write_message(cContent) | |||||
#await self.write_message(currentLineContents) | |||||
else: | |||||
if role_conent is not None: | |||||
print('') | |||||
else: | |||||
self.my_replay_content='' | |||||
await self.write_message("___talk_end___") | |||||
else: | |||||
if resp is not None: | |||||
_cont = self.my_replay_content | |||||
if isDone: | |||||
_cont = _cont+"___talk_end___" | |||||
self.my_replay_content='' | |||||
await self.write_message(_cont) | |||||
else: | |||||
self.my_replay_content='' | |||||
await self.write_message("___talk_end___") | |||||
else: | |||||
self.my_replay_content='' | |||||
await self.write_message("___talk_end___") | |||||
except Exception as es: | |||||
print('onMessage error',es) | |||||
self.close() | |||||
@@ -0,0 +1,41 @@ | |||||
#! /usr/bin/python | |||||
import requests | |||||
import json | |||||
def do_request(): | |||||
url = "https://api.baichuan-ai.com/v1/chat/completions" | |||||
api_key = "9f7d5847897f8090195cdc2c3249d0a7" | |||||
data = { | |||||
"model": "Baichuan2", | |||||
"messages": [ | |||||
{ | |||||
"role": "user", | |||||
"content": "世界第一高峰是" | |||||
} | |||||
], | |||||
"stream": True | |||||
} | |||||
json_data = json.dumps(data) | |||||
headers = { | |||||
"Content-Type": "application/json", | |||||
"Authorization": "Bearer " + api_key | |||||
} | |||||
response = requests.post(url, data=json_data, headers=headers, timeout=60) | |||||
if response.status_code == 200: | |||||
print("请求成功!") | |||||
print("响应body:", response.text) | |||||
print("请求成功,X-BC-Request-Id:", response.headers.get("X-BC-Request-Id")) | |||||
else: | |||||
print("请求失败,状态码:", response.status_code) | |||||
print("请求失败,body:", response.text) | |||||
print("请求失败,X-BC-Request-Id:", response.headers.get("X-BC-Request-Id")) | |||||
if __name__ == "__main__": | |||||
do_request() | |||||
@@ -0,0 +1,30 @@ | |||||
#! /usr/bin/python | |||||
#服务配置### | |||||
agentServer = { | |||||
'port':9999, | |||||
'threads':1, | |||||
'agentApiKey':'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', | |||||
'agentBaiChuanApiKey':'9f7d5847897f8090195cdc2c3249d0a7' | |||||
} | |||||
####过滤### | |||||
agentFilterMoel = { | |||||
'isOpen':True, | |||||
'filterKeys':['Gpt','gPt','gpT','GPt','GpT','GPT','gpt','Openai','openAI','oPENAI','OpEnAi','OPENAI','oPenAi','OpENAI','openai','oPEnAi','OpEnai','oPENai','OPENai','OpENai','openAi','oPENAi','OpENaI','OPENaI','oPEnaI','OpEnAI','OPENAI','OpenAI','OpenAi'], | |||||
'oneMessageSplit':[',','.',',','。','?','?','!','!'] | |||||
} | |||||
agentBaiChuanFilterMoel = { | |||||
'isOpen':False, | |||||
'filterKeys':['Gpt','gPt','gpT','GPt','GpT','GPT','gpt','Openai','openAI','oPENAI','OpEnAi','OPENAI','oPenAi','OpENAI','openai','oPEnAi','OpEnai','oPENai','OPENai','OpENai','openAi','oPENAi','OpENaI','OPENaI','oPEnaI','OpEnAI','OPENAI','OpenAI','OpenAi'], | |||||
'oneMessageSplit':[',','.',',','。','?','?','!','!'] | |||||
} | |||||
###上下文#### | |||||
agentTextual = { | |||||
'isOpen':False, | |||||
'talkCounts':3, | |||||
'resultCounts':3 | |||||
} | |||||
@@ -0,0 +1,130 @@ | |||||
#! /usr/bin/python | |||||
import tornado.web | |||||
import tornado.ioloop | |||||
import tornado.httpserver | |||||
import tornado.options | |||||
import os | |||||
import datetime | |||||
import openai | |||||
import asyncio | |||||
from tornado.web import RequestHandler | |||||
from tornado.options import define,options | |||||
from tornado.websocket import WebSocketHandler | |||||
import openai_async | |||||
import config | |||||
import json | |||||
openai.api_key = config.agentServer['agentApiKey']; | |||||
def clearContent(content): | |||||
for ck in config.agentFilterMoel['filterKeys']: | |||||
content = content.replace(ck,'SmartChat') | |||||
return content | |||||
async def talkGPTWithStream(self,message): | |||||
print("{} says {}".format(self.request.remote_ip,message)) | |||||
try: | |||||
_msg = json.loads(message) | |||||
try: | |||||
if 'model' in _msg : | |||||
_gptModel = _msg['model'] | |||||
_real_msg = _msg['messages'] | |||||
#print("msg is {}".format(type(_real_msg))) | |||||
if _gptModel == '4': | |||||
_gptModel = 'gpt-4-0314' | |||||
elif _gptModel == '3': | |||||
_gptModel = 'gpt-3.5-turbo' | |||||
else: | |||||
print(_gptModel) | |||||
#_gptModel = 'gpt-3.5-turbo' | |||||
else: | |||||
_gptModel = 'gpt-3.5-turbo' | |||||
_real_msg = _msg | |||||
except Exception as gmex: | |||||
_gptModel = 'gpt-3.5-turbo' | |||||
_real_msg = _msg | |||||
#print(_msg) | |||||
async for chunk in await openai.ChatCompletion.acreate( | |||||
model=_gptModel, | |||||
#model='gpt-4-0314', | |||||
#messages=[{'role': 'user', 'content': message}], | |||||
messages=_real_msg, | |||||
temperature=0.8, | |||||
max_tokens=4000, | |||||
stream=True # this time, we set stream=True | |||||
): | |||||
content = chunk["choices"][0].get("delta", {}).get("content") | |||||
role_conent = chunk["choices"][0].get("delta", {}).get("role") | |||||
#print("response {}",content) | |||||
if config.agentFilterMoel['isOpen']: | |||||
if content is not None: | |||||
###如果是结尾 | |||||
if content in config.agentFilterMoel['oneMessageSplit']: | |||||
self.my_replay_content = self.my_replay_content+content | |||||
self.currentIsSend=True | |||||
else: | |||||
self.my_replay_content = self.my_replay_content + content | |||||
self.currentIsSend=False | |||||
if self.currentIsSend: | |||||
####过滤关键字 | |||||
cContent = clearContent(self.my_replay_content) | |||||
self.my_replay_content=''; | |||||
await self.write_message(cContent) | |||||
#await self.write_message(currentLineContents) | |||||
else: | |||||
if role_conent is not None: | |||||
print('') | |||||
else: | |||||
await self.write_message("___talk_end___") | |||||
else: | |||||
if content is not None: | |||||
await self.write_message(content) | |||||
else: | |||||
if role_conent is not None: | |||||
print('') | |||||
else: | |||||
await self.write_message("___talk_end___") | |||||
except Exception as es: | |||||
print('onMessage error',es) | |||||
self.close() | |||||
async def talkGPT(self,message): | |||||
print("{} says {}".format(self.request.remote_ip,message)) | |||||
try: | |||||
_msg = json.loads(message) | |||||
try: | |||||
if 'model' in _msg : | |||||
_gptModel = _msg['model'] | |||||
_real_msg = _msg['messages'] | |||||
#print("msg is {}".format(type(_real_msg))) | |||||
if _gptModel == '4': | |||||
_gptModel = 'gpt-4-0314' | |||||
elif _gptModel == '3': | |||||
_gptModel = 'gpt-3.5-turbo-0125' | |||||
else: | |||||
_gptModel = 'gpt-3.5-turbo-0125' | |||||
else: | |||||
_gptModel = 'gpt-3.5-turbo-0125' | |||||
_real_msg = _msg | |||||
except Exception as gmex: | |||||
_gptModel = 'gpt-3.5-turbo-0125' | |||||
_real_msg = _msg | |||||
response = await openai.Completion.acreate( | |||||
model= _gptModel, | |||||
prompt= _real_msg, | |||||
temperature=0, | |||||
max_tokens=4000, | |||||
top_p=1, | |||||
frequency_penalty=0.5, | |||||
presence_penalty=0 | |||||
) | |||||
#print(f"response:{response}") | |||||
response_text = response['choices'][0]['text'] | |||||
await self.write_message(response_text+"___talk_end___") | |||||
except Exception as es: | |||||
print('onMessage error',es) | |||||
self.close() | |||||
@@ -0,0 +1,157 @@ | |||||
#! /usr/bin/python | |||||
import tornado.web | |||||
import tornado.ioloop | |||||
import tornado.httpserver | |||||
import tornado.options | |||||
import os | |||||
import datetime | |||||
import openai | |||||
import asyncio | |||||
from tornado.web import RequestHandler | |||||
from tornado.options import define,options | |||||
from tornado.websocket import WebSocketHandler | |||||
import openai_async | |||||
import config | |||||
import gptAgent | |||||
import baichuanAgent | |||||
#define("port",default=9999,type=int) | |||||
class IndexHandler(RequestHandler): | |||||
def get(self): | |||||
self.render("index.html") | |||||
class ChatHandler(WebSocketHandler): | |||||
users = set() #用来存在在线用户的容器 | |||||
def open(self): | |||||
print('进来了') | |||||
self.users.add(self) #建立连接后添加到用户容器中 | |||||
for u in self.users: | |||||
print("user count{}".format(len(self.users))) | |||||
u.write_message(u"[%s]-[%s]-进入" % (self.request.remote_ip,datetime.datetime.now().strftime("%d/%m/%Y"))) | |||||
print("{} enter".format(self.request.remote_ip)) | |||||
def on_message(self,message): | |||||
for u in self.users: # 向所有用户广播消息 | |||||
u.write_message(u"[%s]-[%s]-说:%s" % (self.request.remote_ip,datetime.datetime.now().strftime("%d/%m/%Y"),message)) | |||||
print("{} says {}".format(self.request.remote_ip,message)) | |||||
def on_close(self): | |||||
print('离开了') | |||||
self.users.remove(self) | |||||
for u in self.users: | |||||
u.write_message(u"[%s]-[%s]-离开" % (self.request.remote_ip,datetime.datetime.now().strftime("%d/%m/%Y"))) | |||||
print("{} leave".format(self.request.remote_ip)) | |||||
def check_orign(self,orign): | |||||
#接受所有跨域 | |||||
return True | |||||
# parsed_orign = urllib.parse.urlparse(orign) | |||||
# return parsed_orign.netloc.endswith(".malls.iformall.com") | |||||
#openai.api_key = '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'; | |||||
class gptAgentStreamHandler(WebSocketHandler): | |||||
def __init__(self, *args, **kwargs): | |||||
super().__init__(*args, **kwargs) | |||||
self.my_replay_content = "" | |||||
self.currentIsSend=False | |||||
self.currentIsAll=False | |||||
async def open(self): | |||||
print("{} connect".format(self.request.remote_ip)) | |||||
async def on_message(self,message): | |||||
await gptAgent.talkGPTWithStream(self,message) | |||||
def on_close(self): | |||||
print("{} leave".format(self.request.remote_ip)) | |||||
async def check_orign(self,orign): | |||||
#接受所有跨域 | |||||
return True | |||||
# parsed_orign = urllib.parse.urlparse(orign) | |||||
# return parsed_orign.netloc.endswith(".malls.iformall.com") | |||||
class gptAgentHandler(WebSocketHandler): | |||||
async def open(self): | |||||
print("{} connect".format(self.request.remote_ip)) | |||||
async def on_message(self,message): | |||||
await gptAgent.talkGPT(self,message) | |||||
def on_close(self): | |||||
print("{} leave".format(self.request.remote_ip)) | |||||
async def check_orign(self,orign): | |||||
#接受所有跨域 | |||||
return True | |||||
# parsed_orign = urllib.parse.urlparse(orign) | |||||
# return parsed_orign.netloc.endswith(".malls.iformall.com") | |||||
class gptBaiChuanStreamHandler(WebSocketHandler): | |||||
def __init__(self, *args, **kwargs): | |||||
super().__init__(*args, **kwargs) | |||||
self.my_replay_content = "" | |||||
self.currentIsSend=False | |||||
self.currentIsAll=False | |||||
async def open(self): | |||||
print("{} connect".format(self.request.remote_ip)) | |||||
async def on_message(self,message): | |||||
await baichuanAgent.talkBaiChuanWithStream(self,message) | |||||
def on_close(self): | |||||
print("{} leave".format(self.request.remote_ip)) | |||||
async def check_orign(self,orign): | |||||
#接受所有跨域 | |||||
return True | |||||
async def main(): | |||||
tornado.options.parse_command_line() | |||||
app = tornado.web.Application([ | |||||
(r"/index",IndexHandler), | |||||
(r"/wss/chat",ChatHandler), | |||||
(r"/talk",gptAgentHandler), | |||||
(r"/",gptAgentStreamHandler), | |||||
(r"/baichuan",gptBaiChuanStreamHandler) | |||||
], | |||||
static_path = os.path.join(os.path.dirname(__file__),"static"), | |||||
template_path = os.path.join(os.path.dirname(__file__),"template"), | |||||
debug = True | |||||
) | |||||
http_server = tornado.httpserver.HTTPServer(app,xheaders=True) | |||||
#http_server.listen(options.port) | |||||
http_server.bind(config.agentServer['port']) | |||||
http_server.start(config.agentServer['threads']) | |||||
await asyncio.Event().wait() | |||||
if __name__ == '__main__': | |||||
asyncio.run(main()) | |||||
#if __name__ == '__main__': | |||||
#tornado.options.parse_command_line() | |||||
#app = tornado.web.Application([ | |||||
# (r"/index",IndexHandler), | |||||
# (r"/wss/chat",ChatHandler), | |||||
# #(r"/",gptAgentHandler) | |||||
# (r"/",gptAgentStreamHandler) | |||||
# ], | |||||
# static_path = os.path.join(os.path.dirname(__file__),"static"), | |||||
# template_path = os.path.join(os.path.dirname(__file__),"template"), | |||||
# debug = True | |||||
#) | |||||
#http_server = tornado.httpserver.HTTPServer(app,xheaders=True) | |||||
##http_server.listen(options.port) | |||||
#http_server.bind(options.port) | |||||
#http_server.start(1) | |||||
#tornado.ioloop.IOLoop.current().start() | |||||
@@ -0,0 +1,4 @@ | |||||
#!/bin/bash | |||||
nohup ./server.py > nohup.log 2>&1 & | |||||
@@ -0,0 +1,16 @@ | |||||
#!/bin/bash | |||||
PID=`ps -ef | grep /usr/bin/python | grep server.py | awk '{print \$2}'` | |||||
echo "ps -ef | grep /usr/bin/python | grep server.py | awk '{print \$2}'"; | |||||
echo "KILL PID: $PID"; | |||||
if [ "$PID"x != ""x ]; then | |||||
kill -9 $PID | |||||
if [ $? -eq 0 ]; then | |||||
echo "Kill $1 SUCCESS" | |||||
else | |||||
echo "Kill $1 FAILED" | |||||
exit 1; | |||||
fi | |||||
fi | |||||