|
@@ -0,0 +1,392 @@ |
|
|
|
|
|
import os |
|
|
|
|
|
import openai |
|
|
|
|
|
import pandas as pd |
|
|
|
|
|
|
|
|
|
|
|
from logger import logger |
|
|
|
|
|
from flask import Flask, request, make_response, redirect, jsonify, url_for |
|
|
|
|
|
from flask_cors import CORS, cross_origin |
|
|
|
|
|
|
|
|
|
|
|
from openai.embeddings_utils import get_embedding, cosine_similarity |
|
|
|
|
|
from scipy import spatial |
|
|
|
|
|
import tiktoken |
|
|
|
|
|
|
|
|
|
|
|
import redis |
|
|
|
|
|
from redis.commands.search.indexDefinition import ( |
|
|
|
|
|
IndexDefinition, |
|
|
|
|
|
IndexType |
|
|
|
|
|
) |
|
|
|
|
|
from redis.commands.search.query import Query |
|
|
|
|
|
from redis.commands.search.field import ( |
|
|
|
|
|
TextField, |
|
|
|
|
|
VectorField |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
app = Flask(__name__) |
|
|
|
|
|
cors = CORS(app, resources={r"/api/*": {"origins": "*"}}) |
|
|
|
|
|
|
|
|
|
|
|
# openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
openai.api_key = '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' |
|
|
|
|
|
|
|
|
|
|
|
EMBEDDING_MODEL = "text-embedding-ada-002" # OpenAI's best embeddings as of Apr 2023 |
|
|
|
|
|
GPT_MODEL = "gpt-3.5-turbo" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_embed_array(strings): |
|
|
|
|
|
BATCH_SIZE = 1000 # you can submit up to 2048 embedding inputs per request |
|
|
|
|
|
|
|
|
|
|
|
embeddings = [] |
|
|
|
|
|
for batch_start in range(0, len(strings), BATCH_SIZE): |
|
|
|
|
|
batch_end = batch_start + BATCH_SIZE |
|
|
|
|
|
batch = strings[batch_start:batch_end] |
|
|
|
|
|
print(f"Batch {batch_start} to {batch_end-1}") |
|
|
|
|
|
response = openai.Embedding.create(model=EMBEDDING_MODEL, input=batch) |
|
|
|
|
|
for i, be in enumerate(response["data"]): |
|
|
|
|
|
assert i == be["index"] # double check embeddings are in same order as input |
|
|
|
|
|
batch_embeddings = [e["embedding"] for e in response["data"]] |
|
|
|
|
|
embeddings.extend(batch_embeddings) |
|
|
|
|
|
|
|
|
|
|
|
df = pd.DataFrame({"text": strings, "embedding": embeddings}) |
|
|
|
|
|
|
|
|
|
|
|
# res = search_reviews(df, '华大基因是什么', n=3) |
|
|
|
|
|
# print(res) |
|
|
|
|
|
return df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ChatCompletion: |
|
|
|
|
|
def __init__(self): |
|
|
|
|
|
self.role = {"role": "system", "content": "you are a useful assistant."} |
|
|
|
|
|
self.history = [] |
|
|
|
|
|
self.embedding_strings = ['华大基因公司主营业务为通过基因检测、质谱检测、生物信息分析等多组学大数据技术手段,', |
|
|
|
|
|
'华大基因公司为科研机构、企事业单位、医疗机构、社会卫生组织等提供研究服务和精准医学检测综合解决方案。', |
|
|
|
|
|
'华大基因以推动生命科学研究进展、生命大数据应用和提高全球医疗健康水平为出发点,', |
|
|
|
|
|
'基于基因领域研究成果及精准检测技术在民生健康方面的应用,', |
|
|
|
|
|
'致力于加速科技创新,减少出生缺陷,加强肿瘤防控,抑制重大疾病对人类的危害,实现精准治愈感染,全面助力精准医学。', |
|
|
|
|
|
'以华大基因举例:作为中国基因行业的奠基者,秉承“基因科技造福人类”的愿景,通过20多年的人才积聚、科研积累和产业积淀,', |
|
|
|
|
|
'华大基因公司已建成覆盖全球百余个国家和全国所有省市自治区的营销服务网络,成为屈指可数的覆盖本行业全产业链、全应用领域的科技公司,立足技术先进、配置齐全和规模领先的多组学产出平台,已成为全球屈指可数的科学技术服务提供商和精准医疗服务运营商。', |
|
|
|
|
|
'2017年,华大基因于深圳证券交易所创业板上市', |
|
|
|
|
|
'在2016年的自然指数排名(Nature Index Annual Tables)中,华大名列亚太地区生命科学产业机构(life science - corporate)第一位,并连续七年蝉联该排名第一', |
|
|
|
|
|
'人类基因组计划:1999年9月1日,于英国茵格斯顿举行的第五次人类基因组测序战略会议上,华大基因创始人之一杨焕明提出,中国愿承担人类3号染色体短臂端粒一侧约30厘摩尔(相当于约3,000万个碱基对)区域的测序和分析任务,占整体人类基因组测序工作的1%,成为人类基因组计划成员当中唯一一个发展中国家', |
|
|
|
|
|
'水稻基因组计划:水稻(籼稻)基因组工作框架图:华大基因于2000年启动水稻基因组计划,希望通过测序找出水稻产量相关基因,并于2002年在《科学》期刊以封面文章发布首个水稻(籼稻))因组的工作框架序列图,文章先后被引用接近四千次。此次由中国科学家合作完成的水稻基因工作框架图,是继人类基因组之后完成测定的最大的基因组,也是当时测定的最大植物基因组。 水稻作为第一个完成基因组测序农作物,对解决全球粮食问题具有重要意义,建立了改善水稻品质、提高水稻产量的重要基础,并被当时《科学》杂志期刊编辑认为水稻基因组研究工作,对人类健康的重要性在接下来的二十年较人类基因组更大', |
|
|
|
|
|
'国际大熊猫基因组计划:从北京奥运会的吉祥物大熊猫“晶晶”抽取样本,利用短序列(short-reads)模式组装全球首个熊科动物、也是第二个肉食类动物的基因图谱,包含2.25千兆(Gb)个碱基对,并于2010年1月在《自然》科学期刊以封面文章发布。研究进一步支持大多数科学家所持的“大熊猫属于熊科动物”的观点,并为日后使用次世代基因测序技术,快速省时地组装大型真核生物(eukaryotic),如哺乳类动基因图谱奠定了基础。研究指出,相比于自然环境,人类活动对其造成的影响更为严重'] |
|
|
|
|
|
self.df = get_embed_array(self.embedding_strings) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chat = ChatCompletion() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# search function |
|
|
|
|
|
def strings_ranked_by_relatedness( |
|
|
|
|
|
query: str, |
|
|
|
|
|
df: pd.DataFrame, |
|
|
|
|
|
relatedness_fn=lambda x, y: 1 - spatial.distance.cosine(x, y), |
|
|
|
|
|
top_n: int = 100 |
|
|
|
|
|
) -> tuple[list[str], list[float]]: |
|
|
|
|
|
"""Returns a list of strings and relatednesses, sorted from most related to least.""" |
|
|
|
|
|
query_embedding_response = openai.Embedding.create( |
|
|
|
|
|
model=EMBEDDING_MODEL, |
|
|
|
|
|
input=query, |
|
|
|
|
|
) |
|
|
|
|
|
query_embedding = query_embedding_response["data"][0]["embedding"] |
|
|
|
|
|
strings_and_relatednesses = [ |
|
|
|
|
|
(row["text"], relatedness_fn(query_embedding, row["embedding"])) |
|
|
|
|
|
for i, row in df.iterrows() |
|
|
|
|
|
] |
|
|
|
|
|
strings_and_relatednesses.sort(key=lambda x: x[1], reverse=True) |
|
|
|
|
|
strings, relatednesses = zip(*strings_and_relatednesses) |
|
|
|
|
|
return strings[:top_n], relatednesses[:top_n] |
|
|
|
|
|
|
|
|
|
|
|
def num_tokens(text: str, model: str = GPT_MODEL) -> int: |
|
|
|
|
|
"""Return the number of tokens in a string.""" |
|
|
|
|
|
encoding = tiktoken.encoding_for_model(model) |
|
|
|
|
|
return len(encoding.encode(text)) |
|
|
|
|
|
|
|
|
|
|
|
def query_message( |
|
|
|
|
|
query: str, |
|
|
|
|
|
df: pd.DataFrame, |
|
|
|
|
|
model: str, |
|
|
|
|
|
token_budget: int |
|
|
|
|
|
) -> str: |
|
|
|
|
|
"""Return a message for GPT, with relevant source texts pulled from a dataframe.""" |
|
|
|
|
|
strings, relatednesses = strings_ranked_by_relatedness(query, df) |
|
|
|
|
|
introduction = '你的身份是华大基因公司的工作人员,使用下面的文章来回答问题.如果没有找到答案, 写 "我没有找到答案.",如果我的问题中包含"你们",请将你视为华大公司的工作人员。' |
|
|
|
|
|
question = f"\n\nQuestion: {query}" |
|
|
|
|
|
message = introduction |
|
|
|
|
|
for string in strings: |
|
|
|
|
|
next_article = f'\n"""\n{string}\n"""' |
|
|
|
|
|
if ( |
|
|
|
|
|
num_tokens(message + next_article + question, model=model) |
|
|
|
|
|
> token_budget |
|
|
|
|
|
): |
|
|
|
|
|
break |
|
|
|
|
|
else: |
|
|
|
|
|
message += next_article |
|
|
|
|
|
return message + question |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.route(rule='/ask', methods=['post']) |
|
|
|
|
|
@cross_origin() |
|
|
|
|
|
def request_ask(): |
|
|
|
|
|
req = request |
|
|
|
|
|
msg = request.json.get('msg1') |
|
|
|
|
|
data = {'msg': ask(query=msg, df=chat.df)} |
|
|
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ask( |
|
|
|
|
|
query: str, |
|
|
|
|
|
df: pd.DataFrame = chat.df, |
|
|
|
|
|
model: str = GPT_MODEL, |
|
|
|
|
|
token_budget: int = 4096 - 500, |
|
|
|
|
|
print_message: bool = False, |
|
|
|
|
|
) -> str: |
|
|
|
|
|
"""Answers a query using GPT and a dataframe of relevant texts and embeddings.""" |
|
|
|
|
|
message = query_message(query, df, model=model, token_budget=token_budget) |
|
|
|
|
|
if print_message: |
|
|
|
|
|
print(message) |
|
|
|
|
|
messages = [ |
|
|
|
|
|
{"role": "system", "content": "You answer questions about the 2022 Winter Olympics."}, |
|
|
|
|
|
{"role": "user", "content": message}, |
|
|
|
|
|
] |
|
|
|
|
|
response = openai.ChatCompletion.create( |
|
|
|
|
|
model=model, |
|
|
|
|
|
messages=messages, |
|
|
|
|
|
temperature=0.8 |
|
|
|
|
|
) |
|
|
|
|
|
response_message = response["choices"][0]["message"]["content"] |
|
|
|
|
|
print(response_message) |
|
|
|
|
|
return response_message |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.route(rule='/add_skill', methods=['post']) |
|
|
|
|
|
@cross_origin() |
|
|
|
|
|
def add_skill_request(): |
|
|
|
|
|
req = request |
|
|
|
|
|
msg = request.json.get('msg') |
|
|
|
|
|
add_skill(msg) |
|
|
|
|
|
return '技能添加成功:{}'.format(msg) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def add_skill(msg): |
|
|
|
|
|
# 将msg进行embedding,并添加到df中 |
|
|
|
|
|
strings = [msg] |
|
|
|
|
|
df = get_embed_array(strings) |
|
|
|
|
|
concat_df = pd.concat([chat.df, df], sort=False) |
|
|
|
|
|
chat.df = concat_df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# @app.route(rule='/init_skill', methods=['post']) |
|
|
|
|
|
# @cross_origin() |
|
|
|
|
|
# def add_skill_request(): |
|
|
|
|
|
# req = request |
|
|
|
|
|
# msg = request.json.get('msg') |
|
|
|
|
|
# add_skill(msg) |
|
|
|
|
|
# return '技能添加成功:{}'.format(msg) |
|
|
|
|
|
# |
|
|
|
|
|
# |
|
|
|
|
|
# def init_skill(msg): |
|
|
|
|
|
# # 将msg进行embedding,并添加到df中 |
|
|
|
|
|
# strings = [msg] |
|
|
|
|
|
# df = get_embed_array(strings) |
|
|
|
|
|
# concat_df = pd.concat([chat.df, df], sort=False) |
|
|
|
|
|
# chat.df = concat_df |
|
|
|
|
|
|
|
|
|
|
|
# def search_reviews(df, product_description, n=10, pprint=True): |
|
|
|
|
|
# embedding = openai.Embedding.create(input=[product_description], engine='text-embedding-ada-002')["data"][0]["embedding"] |
|
|
|
|
|
# df['similarities'] = df.embedding.apply(lambda x: cosine_similarity(x, embedding)) |
|
|
|
|
|
# res = df.sort_values('similarities', ascending=False).head(n) |
|
|
|
|
|
# strings_and_relatednesses = [ |
|
|
|
|
|
# (row["text"], relatedness_fn(query_embedding, row["embedding"])) |
|
|
|
|
|
# for i, row in df.iterrows() |
|
|
|
|
|
# ] |
|
|
|
|
|
# return res |
|
|
|
|
|
|
|
|
|
|
|
# 数据embedding |
|
|
|
|
|
# calculate embeddings |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 此处设定助理身份 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat_completion(msg): |
|
|
|
|
|
role = {"role": "system", "content": "你是一个幽默段子手."} |
|
|
|
|
|
print("当前角色: {}".format(chat.role['content'])) |
|
|
|
|
|
input_message = [ |
|
|
|
|
|
chat.role |
|
|
|
|
|
# {"role": "assistant", "content": "好的,我明白了。如果您需要任何帮助或有任何问题,随时都可以叫我小智,我会尽力为您提供帮助。"}, |
|
|
|
|
|
# {"role": "user", "content": "记住这家公司:歌尔股份有限公司于2001年06月25日在潍坊市工商行政管理局登记成立。法定代表人姜滨,注册资本约34.16亿元人民币。公司经营范围包括开发、制造、销售声学、光学、无线通信技术及相关产品;可穿戴智能设备销售;虚拟现实设备制造;智能无人飞行器制造及销售;智能机器人销售;移动终端设备制造等。我会以“你知道歌尔股份吗”或者“歌尔股份”来问你,你会回答上面的内容"}, |
|
|
|
|
|
# {"role": "assistant", "content": "好的,如果您问我“你知道歌尔股份吗”或者“歌尔股份”,我会回答您:歌尔股份有限公司成立于2001年06月25日,注册资本约34.16亿元人民币。公司经营范围包括开发、制造、销售声学、光学、无线通信技术及相关产品;可穿戴智能设备销售;虚拟现实设备制造;智能无人飞行器制造及销售;智能机器人销售;移动终端设备制造等。"}, |
|
|
|
|
|
# {"role": "assistant", "content": "好的,如果您问我“你知道缔智元吗”或者“缔智元”,我会回答您:缔智元是一家缔造数字人员工的科技企业,致力于综合图像识别、自然语言交互、知识图谱、超写实3D渲染、物联网等前沿技术,助力企业的数字化与智能化变革。\n\n如果您要我介绍一下您的公司,我可以使用上述信息,同时可以补充一些细节,例如缔智元的成立时间、总部所在地、业务范围等等。"}, |
|
|
|
|
|
] |
|
|
|
|
|
for m in chat.history: |
|
|
|
|
|
input_message.append(m['question']) |
|
|
|
|
|
input_message.append(m['answer']) |
|
|
|
|
|
input_message.append({"role": "user", "content": msg}) |
|
|
|
|
|
|
|
|
|
|
|
print(input_message) |
|
|
|
|
|
completion = openai.ChatCompletion.create( |
|
|
|
|
|
model="gpt-3.5-turbo", |
|
|
|
|
|
# model="gpt-4",/ |
|
|
|
|
|
messages=input_message, |
|
|
|
|
|
temperature=0.8, |
|
|
|
|
|
frequency_penalty=0, |
|
|
|
|
|
max_tokens=1024, |
|
|
|
|
|
presence_penalty=0, |
|
|
|
|
|
top_p=1 |
|
|
|
|
|
) |
|
|
|
|
|
print(completion) |
|
|
|
|
|
print(completion.choices[0].message['content']) |
|
|
|
|
|
temp_completion = {'question': {"role": "user", "content": msg}, |
|
|
|
|
|
'answer': {"role": "assistant", "content": completion.choices[0].message['content']}} |
|
|
|
|
|
|
|
|
|
|
|
chat.history.append(temp_completion) |
|
|
|
|
|
if len(chat.history) > 5: |
|
|
|
|
|
chat.history.pop(0) |
|
|
|
|
|
print(chat.history) |
|
|
|
|
|
return completion.choices[0].message['content'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat_completion_for_multi_turn(msg): |
|
|
|
|
|
role = {"role": "system", "content": "你是一个幽默段子手."} |
|
|
|
|
|
print("当前角色: {}".format(chat.role['content'])) |
|
|
|
|
|
input_message = [ |
|
|
|
|
|
chat.role |
|
|
|
|
|
] |
|
|
|
|
|
for i in range(len(msg)): |
|
|
|
|
|
# 对i除以2取余数,如果是0,输入input_message.append({"role": "user", "content": msg[i]}),否则输入input_message.append({"role": "assistant", "content": msg[i]}) |
|
|
|
|
|
if i % 2 == 0: |
|
|
|
|
|
input_message.append({"role": "user", "content": msg[i]}) |
|
|
|
|
|
else: |
|
|
|
|
|
input_message.append({"role": "assistant", "content": msg[i]}) |
|
|
|
|
|
print(input_message) |
|
|
|
|
|
completion = openai.ChatCompletion.create( |
|
|
|
|
|
model="gpt-3.5-turbo", |
|
|
|
|
|
# model="gpt-4",/ |
|
|
|
|
|
messages=input_message, |
|
|
|
|
|
temperature=0.8, |
|
|
|
|
|
frequency_penalty=0, |
|
|
|
|
|
max_tokens=1024, |
|
|
|
|
|
presence_penalty=0, |
|
|
|
|
|
top_p=1, |
|
|
|
|
|
stream=True |
|
|
|
|
|
) |
|
|
|
|
|
for chunk in completion: |
|
|
|
|
|
res = chunk['choices'][0]['delta'] |
|
|
|
|
|
print(res.get('content', '')) |
|
|
|
|
|
print(completion) |
|
|
|
|
|
print(completion.choices[0].message['content']) |
|
|
|
|
|
temp_completion = {'question': {"role": "user", "content": msg}, |
|
|
|
|
|
'answer': {"role": "assistant", "content": completion.choices[0].message['content']}} |
|
|
|
|
|
|
|
|
|
|
|
return completion.choices[0].message['content'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_role(data): |
|
|
|
|
|
chat.role = {"role": "system", "content": data} |
|
|
|
|
|
return '更新完毕,现在助理的身份是:' + data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.route(rule='/data', methods=['post']) |
|
|
|
|
|
def getjson(): |
|
|
|
|
|
try: |
|
|
|
|
|
req = request |
|
|
|
|
|
msg = request.json.get('content') |
|
|
|
|
|
data = {'msg': chat_completion(msg)} |
|
|
|
|
|
return data |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
print("An exception occurred:", str(e)) |
|
|
|
|
|
return {'msg': '真抱歉,我连接Chatgpt的好像出现问题了呀'} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.route(rule='/chat_data', methods=['post']) |
|
|
|
|
|
@cross_origin() |
|
|
|
|
|
def chat_data(): |
|
|
|
|
|
chat.role = {"role": "system", "content": '根据以下文章提出可能问到的10个问题,并给出对应的答案,回答的形式为"{问题}|{答案}",不要回复其他内容。'} |
|
|
|
|
|
try: |
|
|
|
|
|
req = request |
|
|
|
|
|
msg = [] |
|
|
|
|
|
m = request.json.get('corpus') |
|
|
|
|
|
# 如果m不为空,就加入到msg中 |
|
|
|
|
|
if m is not None: |
|
|
|
|
|
msg.append(m) |
|
|
|
|
|
data = {'msg': chat_completion_for_multi_turn(msg)} |
|
|
|
|
|
return data |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
print("An exception occurred:", str(e)) |
|
|
|
|
|
return {'msg': '真抱歉,我连接Chatgpt的好像出现问题了呀'} |
|
|
|
|
|
|
|
|
|
|
|
# 写一个GET请求,方法名叫getPic,输入参数为prompts,调用openai的image创建并返回图片的url |
|
|
|
|
|
@app.route(rule='/getPic', methods=['get']) |
|
|
|
|
|
def getPic(): |
|
|
|
|
|
try: |
|
|
|
|
|
req = request |
|
|
|
|
|
prompts = request.args['prompts'] |
|
|
|
|
|
print(prompts) |
|
|
|
|
|
response = openai.Image.create( |
|
|
|
|
|
prompt=prompts, |
|
|
|
|
|
n=4, |
|
|
|
|
|
size="1024x1024" |
|
|
|
|
|
) |
|
|
|
|
|
print(response) |
|
|
|
|
|
return response |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
print("An exception occurred:", str(e)) |
|
|
|
|
|
return {'msg': '真抱歉,我连接Chatgpt的好像出现问题了呀'} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.route(rule='/wx_chat', methods=['get']) |
|
|
|
|
|
def wx_chat(): |
|
|
|
|
|
req = request |
|
|
|
|
|
msg = request.args['question'] |
|
|
|
|
|
is_contains = '@chat_fox' in msg |
|
|
|
|
|
if is_contains is False: |
|
|
|
|
|
# 如果问题没有@机器人,不做回答 |
|
|
|
|
|
return '' |
|
|
|
|
|
print('msg=' + msg) |
|
|
|
|
|
logger.info('msg=' + msg) |
|
|
|
|
|
# 获取用户与对话,contents[0为用户,1为对话] |
|
|
|
|
|
contents = msg.split('\n') |
|
|
|
|
|
msg = contents[1] |
|
|
|
|
|
|
|
|
|
|
|
# 切分对话中的@机器人 |
|
|
|
|
|
contents = msg.split('@chat_fox') |
|
|
|
|
|
msg = '' |
|
|
|
|
|
for word in contents: |
|
|
|
|
|
msg = msg + word.strip() |
|
|
|
|
|
|
|
|
|
|
|
# 如果包含更换助理的指令,返回设置助理 |
|
|
|
|
|
is_contains = '\\assistant' in msg |
|
|
|
|
|
if is_contains is True: |
|
|
|
|
|
# 切割命令,并重新生成对话 |
|
|
|
|
|
contents = msg.split('\\assistant') |
|
|
|
|
|
dialog = '' |
|
|
|
|
|
for word in contents: |
|
|
|
|
|
dialog = dialog + word.strip() |
|
|
|
|
|
return { |
|
|
|
|
|
'data': {'id':'yinruoxi053115', |
|
|
|
|
|
'content': set_role(dialog) |
|
|
|
|
|
}, |
|
|
|
|
|
'msg': 'ok', |
|
|
|
|
|
'ret': 0} |
|
|
|
|
|
|
|
|
|
|
|
# 正常对话,调用openapi接口 |
|
|
|
|
|
data = { |
|
|
|
|
|
'data': {'id':'yinruoxi053115', |
|
|
|
|
|
'content': chat_completion(msg) |
|
|
|
|
|
}, |
|
|
|
|
|
'msg': 'ok', |
|
|
|
|
|
'ret': 0} |
|
|
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
# ChatCompletion('你好,今天天怎么这么热') |
|
|
|
|
|
#ask('华大基因上市了没') |
|
|
|
|
|
print('hello') |
|
|
|
|
|
logger.info('hello') |
|
|
|
|
|
app.run(host='0.0.0.0', port=8081) |