Skip to content

Commit

Permalink
Merge pull request #13 from Adri6336/fix_character/model selection bug
Browse files Browse the repository at this point in the history
Due to the way models are named and selected from the set_model function, functions would use default models instead of the currently used one. Additionally, stay in character was erroneously left using GPT-4 only; this update should fix that.
  • Loading branch information
Adri6336 authored Mar 25, 2023
2 parents 13a8865 + f724d93 commit d7ff444
Showing 1 changed file with 8 additions and 5 deletions.
13 changes: 8 additions & 5 deletions chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,13 @@
from general_functions import *


def stay_in_character(message: str, key: str) -> tuple:
def stay_in_character(message: str, key: str, model: str) -> tuple:
"""
If the AI says something too robotic, this will have it stay in character.
:param message: This is the message the AI gave you.
:param key: This is your OpenAI key.
:param model: desired GPT model
:return: (bool representing success status, message, tokens)
"""

Expand All @@ -32,7 +33,7 @@ def stay_in_character(message: str, key: str) -> tuple:

# Setup GPT
gpt = GPT3(key)
gpt.set_model('gpt-4')
gpt.set_model(model)

# Try to get rephrased version
try:
Expand Down Expand Up @@ -143,6 +144,7 @@ class Chatbot():
conversation_memories = ''
total_back_and_forth = [] # This will contain the entire conversation, preserved through recycling
gpt_model = 'text-davinci-003' # This determines the model you're using for completion. Edit with self.set_model()
model_selection = 'davinci' # This represents what went into the set_model function
max_tokens = 4000
tokens = 0 # This represents the current token consumption
full_conversation = ''
Expand Down Expand Up @@ -286,7 +288,7 @@ def say_to_chatbot(self, text: str, outloud: bool = True,
# Also manage token count here
if declares_self_ai(reply):
try:
new_response = stay_in_character(reply, self.api_key)
new_response = stay_in_character(reply, self.api_key, self.model_selection)

if new_response[0]: # If the attempt was successful
#self.tokens += new_response[2] # Add tokens to total
Expand Down Expand Up @@ -351,7 +353,7 @@ def recycle_tokens(self, chunk_by: int = 2, quiet=True):
ct = 0 # This will count until a specified termination threshold to protect againt infinite loops
terminate_value = len(chunks)
errorct = 0
gpt_model = self.gpt_model
gpt_model = self.model_selection

# 1. Collect mini summaries for entire conversation
info('Loading', 'topic')
Expand Down Expand Up @@ -449,7 +451,7 @@ def create_memories(self, chunk_by=2, quiet=True, restore=False):
ct = 0 # This will count until a specified termination threshold to protect againt infinite loops
terminate_value = len(chunks)
errorct = 0
model_placeholder = self.gpt_model
model_placeholder = self.model_selection

memory_directive = ("Create a new single memory text dict with the following format:\n\n" +
"{humans_job:[], humans_likes:[], humans_dislikes[], humans_personality:[], facts_about_human:[], things_discussed:[], humans_interests:[], things_to_remember:[]}\n\n" +
Expand Down Expand Up @@ -914,6 +916,7 @@ def set_model(self, desired_model: str, quiet=True):
# 1. Set model
self.gpt_model = models[desired_model][0]
self.max_tokens = models[desired_model][1]
self.model_selection = desired_model

# 2. Determine if max tokens are passed on new model
if self.tokens >= self.max_tokens:
Expand Down

0 comments on commit d7ff444

Please sign in to comment.