Source code for llmtuner.llms.gpt

import openai 
import json
from llmtuner import config

openai.api_key = config.openai_api_key

[docs] class GPTChatter: def __init__(self, filename = ""):
[docs] self.name = "plaingpt"
[docs] self.history = []
if filename: self.load_history(filename)
[docs] def chat(self, prompt, usehisto = 0, store = True, printstyle = False): """Getting a response from the client, * 'usehisto': adding the number of prior exchanges as chat history to the query * 'store': storing query and result to history * 'printstyle': if True returning answer as linebreaked markdown """ if usehisto>0: message = [] startentry = 0 if len(self.history)>=usehisto*2: startentry = len(self.history)-usehisto*2 for i in range(usehisto*2): if len(self.history) > i + startentry: message.append(self.history[i + startentry]) message.append({"role": "user", "content": prompt}) else: message = [ {"role": "user", "content": prompt} ] if store: self.history.append({"role": "user", "content": prompt}) chat = openai.ChatCompletion.create(model=config.openai_model, messages=message) answer = chat.choices[0].message.content if store: self.history.append({"role": "assistant", "content": answer}) if printstyle: return self._print_answer(answer) else: return answer
[docs] def reset_history(self): self.history = []
[docs] def get_last_answer(self): for i in range(len(self.history)): if self.history[-i-1]["role"] == "assistant": return self.history[-i-1]["content"] return ""
[docs] def save_history(self, outfilename): with open(outfiename, 'w') as file: json.dump(self.history, file)
[docs] def load_history(self, infilename): with open(infilename, 'r') as file: self.history = json.load(file)
@staticmethod
[docs] def _print_answer(answer): for line in answer.split("\n"): print (line)