|
- """
- A simple wrapper for the official ChatGPT API
- https://github.com/acheong08/ChatGPT.git v3.py
- """
- import json
- import os
- import sys
- import platform
- from typing import NoReturn
-
- import requests
- import tiktoken #解析成token令牌
-
- class Chatbot_V3:
- """
- Official ChatGPT API
- """
-
- def __init__(
- self,
- api_key: str,
- engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
- proxy: str = None,
- max_tokens: int = 3000,
- temperature: float = 0.5,
- top_p: float = 1.0,
- presence_penalty: float = 0.0,
- frequency_penalty: float = 0.0,
- reply_count: int = 1,
- system_prompt: str = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
- ) -> None:
- """
- Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
- """
- self.engine = engine
- self.session = requests.Session()
- self.api_key = api_key
- self.system_prompt = system_prompt
- self.max_tokens = max_tokens
- self.temperature = temperature
- self.top_p = top_p
- self.presence_penalty = presence_penalty
- self.frequency_penalty = frequency_penalty
- self.reply_count = reply_count
-
- if proxy:
- self.session.proxies = {
- "http": proxy,
- "https": proxy,
- }
-
- self.conversation: dict = {
- "default": [
- {
- "role": "system",
- "content": system_prompt,
- },
- ],
- }
- if max_tokens > 4000:
- raise Exception("Max tokens cannot be greater than 4000")
-
- if self.get_token_count("default") > self.max_tokens:
- raise Exception("System prompt is too long")
- self.skills = {}
- # 判断是否存在skills.csv文件,如果存在则读取key,value并加载进skills字典
- print("加载skills.csv文件")
- if os.path.exists("/Users/ruoxiyin/Documents/缔智元/代码/chatGPT_Web-main/skills.csv"):
- with open("/Users/ruoxiyin/Documents/缔智元/代码/chatGPT_Web-main/skills.csv", "r", encoding="utf-8") as f:
- for line in f.readlines():
- key, value = line.strip().split("|")
- self.skills[key] = value
-
-
- def add_to_conversation(
- self,
- message: str,
- role: str,
- convo_id: str = "default",
- ) -> None:
- """
- Add a message to the conversation
- """
- self.conversation[convo_id].append({"role": role, "content": message})
-
- def get_conversation(
- self,
- convo_id: str = "default",
- ) -> list:
- """
- Get the conversation
- """
- return self.conversation[convo_id]
-
- def __truncate_conversation(self, convo_id: str = "default") -> None:
- """
- Truncate the conversation
- """
- while True:
- if (
- self.get_token_count(convo_id) > self.max_tokens
- and len(self.conversation[convo_id]) > 1
- ):
- # Don't remove the first message
- self.conversation[convo_id].pop(1)
- else:
- break
-
- # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
- def get_token_count(self, convo_id: str = "default") -> int:
- """
- Get token count
- """
- if self.engine not in ["gpt-3.5-turbo", "gpt-3.5-turbo-0301"]:
- raise NotImplementedError("Unsupported engine {self.engine}")
-
- encoding = tiktoken.encoding_for_model(self.engine)
-
- num_tokens = 0
- for message in self.conversation[convo_id]:
- # every message follows <im_start>{role/name}\n{content}<im_end>\n
- num_tokens += 4
- for key, value in message.items():
- num_tokens += len(encoding.encode(value))
- if key == "name": # if there's a name, the role is omitted
- num_tokens += 1 # role is always required and always 1 token
- num_tokens += 2 # every reply is primed with <im_start>assistant
- return num_tokens
-
- def get_max_tokens(self, convo_id: str) -> int:
- """
- Get max tokens
- """
- return self.max_tokens - self.get_token_count(convo_id)
-
- def ask_stream(
- self,
- prompt: str,
- role: str = "user",
- convo_id: str = "default",
- **kwargs,
- ) -> str:
- """
- Ask a question
- """
- # Make conversation if it doesn't exist
- if convo_id not in self.conversation:
- self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
- self.add_to_conversation(prompt, "user", convo_id=convo_id)
- self.__truncate_conversation(convo_id=convo_id)
- print(convo_id)
-
- # Get response
- response = self.session.post(
- os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
- headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
- json={
- "model": self.engine,
- "messages": self.conversation[convo_id],
- "stream": True,
- # kwargs
- "temperature": kwargs.get("temperature", self.temperature),
- "top_p": kwargs.get("top_p", self.top_p),
- "presence_penalty": kwargs.get(
- "presence_penalty",
- self.presence_penalty,
- ),
- "frequency_penalty": kwargs.get(
- "frequency_penalty",
- self.frequency_penalty,
- ),
- "n": kwargs.get("n", self.reply_count),
- "user": role,
- "max_tokens": self.get_max_tokens(convo_id=convo_id),
- },
- stream=True,
- )
- if response.status_code != 200:
- raise Exception(
- f"Error: {response.status_code} {response.reason} {response.text}",
- )
- response_role: str = None
- full_response: str = ""
- for line in response.iter_lines():
- if not line:
- continue
- # Remove "data: "
- line = line.decode("utf-8")[6:]
- if line == "[DONE]":
- break
- resp: dict = json.loads(line)
- choices = resp.get("choices")
- if not choices:
- continue
- delta = choices[0].get("delta")
- if not delta:
- continue
- if "role" in delta:
- response_role = delta["role"]
- if "content" in delta:
- content = delta["content"]
- full_response += content
- yield content
- self.add_to_conversation(full_response, response_role, convo_id=convo_id)
-
- def ask_stream_text(
- self,
- prompt: str,
- role: str = "user",
- convo_id: str = "default",
- **kwargs,
- ) -> str:
- """
- Ask a question, push as Streaming text
- """
- # Make conversation if it doesn't exist
- if convo_id not in self.conversation:
- self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
- # 去除上下文接入
- self.add_to_conversation(prompt, "user", convo_id=convo_id)
- self.__truncate_conversation(convo_id=convo_id)
- # Get response
- response = self.session.post(
- os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
- headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
- json={
- "model": self.engine,
- "messages": self.conversation[convo_id],
- "stream": True,
- # kwargs
- "temperature": kwargs.get("temperature", self.temperature),
- "top_p": kwargs.get("top_p", self.top_p),
- "presence_penalty": kwargs.get(
- "presence_penalty",
- self.presence_penalty,
- ),
- "frequency_penalty": kwargs.get(
- "frequency_penalty",
- self.frequency_penalty,
- ),
- "n": kwargs.get("n", self.reply_count),
- "user": role,
- "max_tokens": self.get_max_tokens(convo_id=convo_id),
- },
- stream=True,
- )
- if response.status_code != 200:
- raise Exception(
- f"Error: {response.status_code} {response.reason} {response.text}",
- )
- response_role: str = None
- full_response: str = ""
- for line in response.iter_lines():
- if not line:
- continue
- line = line.decode("utf-8")[6:]
- if line == "[DONE]":
- break;
- resp: dict = json.loads(line)
- choices = resp.get("choices")
- if not choices:
- continue
- delta = choices[0].get("delta")
- if not delta:
- continue
- if "role" in delta:
- response_role = delta["role"]
- if "content" in delta:
- content = delta['content']
- full_response += content
- yield 'data: {}\n\n'.format(json.dumps({'content':content})) #Got the answer content to push
- yield 'data: {}\n\n'.format(json.dumps({'content':'[DONE]'})) #The answer is over, pushing [DONE]
- # 去除上下文接入
- self.conversation[convo_id].clear()
- # self.add_to_conversation(full_response, response_role, convo_id=convo_id) #Add the answer to the conversation
-
- def ask(
- self,
- prompt: str,
- role: str = "user",
- convo_id: str = "default",
- **kwargs,
- ) -> str:
- """
- Non-streaming ask
- """
- response = self.ask_stream(
- prompt=prompt,
- role=role,
- convo_id=convo_id,
- **kwargs,
- )
- full_response: str = "".join(response)
- return full_response
-
-
- def rollback(self, n: int = 1, convo_id: str = "default") -> None:
- """
- Rollback the conversation
- """
- for _ in range(n):
- self.conversation[convo_id].pop()
-
- def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
- """
- Reset the conversation
- """
- self.conversation[convo_id] = [
- {"role": "system", "content": system_prompt or self.system_prompt},
- ]
-
- if __name__ == "__main__":
- #获取config.json中的数据
- path = os.path.split(os.path.realpath(__file__))[0]
- with open(path + '/config.json', 'r') as f:
- config = json.load(f)
- if platform.system().lower() == 'linux':
- proxys = None
- else:
- proxys = config['proxy']
- chat = Chatbot_V3(config['key'], "gpt-3.5-turbo", proxy=proxys)
- while True:
- promt = input("请输入:")
- if promt == 'exit':
- break;
- res = chat.ask(promt)
- print(res)
|