|
- #! /usr/bin/python
- import tornado.web
- import tornado.ioloop
- import tornado.httpserver
- import tornado.options
- import os
- import datetime
- import openai
- import asyncio
- from tornado.web import RequestHandler
- from tornado.options import define,options
- from tornado.websocket import WebSocketHandler
- import openai_async
- import config
- import re
-
- openai.api_key = config.agentServer['agentApiKey'];
-
- def clearContent(content):
- for ck in config.agentFilterMoel['filterKeys']:
- content = content.replace(ck,'SmartChat')
- return content
-
-
- async def talkGPTWithStream(self,message):
- print("{} says {}".format(self.request.remote_ip,message))
- try:
- #currentLineContents=''
- #currentIsSend=False
- async for chunk in await openai.ChatCompletion.acreate(
- model='gpt-3.5-turbo',
- #model='gpt-4-0314',
- messages=[{'role': 'user', 'content': message}],
- temperature=0.8,
- #max_tokens=400,
- stream=True # this time, we set stream=True
- ):
- content = chunk["choices"][0].get("delta", {}).get("content")
- role_conent = chunk["choices"][0].get("delta", {}).get("role")
- #print("response {}",content)
- if config.agentFilterMoel['isOpen']:
- if content is not None:
- ###如果是结尾
- if content in config.agentFilterMoel['oneMessageSplit']:
- self.my_replay_content = self.my_replay_content+content
- self.currentIsSend=True
- else:
- self.my_replay_content = self.my_replay_content + content
- self.currentIsSend=False
-
- if self.currentIsSend:
- ####过滤关键字
- cContent = clearContent(self.my_replay_content)
- self.my_replay_content='';
- await self.write_message(cContent)
- #await self.write_message(currentLineContents)
- else:
- if role_conent is not None:
- print('')
- else:
- await self.write_message("___talk_end___")
- else:
- if content is not None:
- await self.write_message(content)
- else:
- if role_conent is not None:
- print('')
- else:
- await self.write_message("___talk_end___")
- except Exception as es:
- print('onMessage error',es)
- self.close()
-
-
- async def talkGPT(self,message):
- print("{} says {}".format(self.request.remote_ip,message))
- response = await openai.Completion.acreate(
- model="text-davinci-003",
- prompt= message,
- temperature=0,
- max_tokens=4000,
- top_p=1,
- frequency_penalty=0.5,
- presence_penalty=0
- )
- #print(f"response:{response}")
- response_text = response['choices'][0]['text']
- await self.write_message(response_text+"___talk_end___")
-
-
|