Skip to content

Commit

Permalink
enable retrying request for extension if API call fails
Browse files Browse the repository at this point in the history
  • Loading branch information
LeonOstrez committed Oct 2, 2023
1 parent fec07eb commit d6a677c
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 16 deletions.
2 changes: 1 addition & 1 deletion pilot/helpers/AgentConvo.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def send_message(self, prompt_path=None, prompt_data=None, function_calls: Funct
else:
# if we don't, get the response from LLM
try:
response = create_gpt_chat_completion(self.messages, self.high_level_step, function_calls=function_calls)
response = create_gpt_chat_completion(self.messages, self.high_level_step, self.agent.project, function_calls=function_calls)
except TokenLimitError as e:
save_development_step(self.agent.project, prompt_path, prompt_data, self.messages, '', str(e))
raise e
Expand Down
8 changes: 5 additions & 3 deletions pilot/prompts/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,10 @@ def get_additional_info_from_openai(project, messages):
while not is_complete:
# Obtain clarifications using the OpenAI API
# { 'text': new_code }
response = create_gpt_chat_completion(messages, 'additional_info')
response = create_gpt_chat_completion(messages, 'additional_info', project)

if response is not None:
if response['text'].strip() == END_RESPONSE:
if response['text'] and response['text'].strip() == END_RESPONSE:
# print(response['text'] + '\n')
return messages

Expand Down Expand Up @@ -132,7 +132,9 @@ def get_additional_info_from_user(project, messages, role):
break
response = create_gpt_chat_completion(
generate_messages_from_custom_conversation(role, [get_prompt('utils/update.prompt'), message, answer], 'user'),
'additional_info')
'additional_info',
project
)

message = response

Expand Down
25 changes: 15 additions & 10 deletions pilot/utils/llm_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import time
import json
import tiktoken
import questionary
from prompt_toolkit.styles import Style

from jsonschema import validate, ValidationError
from utils.style import red
Expand All @@ -15,6 +15,7 @@
from helpers.exceptions.TokenLimitError import TokenLimitError
from utils.utils import fix_json, get_prompt
from utils.function_calling import add_function_calls_to_request, FunctionCallSet, FunctionType
from utils.questionary import styled_text


def get_tokens_in_messages(messages: List[str]) -> int:
Expand Down Expand Up @@ -58,7 +59,7 @@ def num_tokens_from_functions(functions):
return num_tokens


def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TOKENS_FOR_GPT_RESPONSE,
def create_gpt_chat_completion(messages: List[dict], req_type, project,
function_calls: FunctionCallSet = None):
"""
Called from:
Expand All @@ -69,7 +70,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
"Please check this message and say what needs to be changed... {message}"
:param messages: [{ "role": "system"|"assistant"|"user", "content": string }, ... ]
:param req_type: 'project_description' etc. See common.STEPS
:param min_tokens: defaults to 600
:param project: project
:param function_calls: (optional) {'definitions': [{ 'name': str }, ...]}
see `IMPLEMENT_CHANGES` etc. in `pilot/const/function_calls.py`
:return: {'text': new_code}
Expand Down Expand Up @@ -99,7 +100,7 @@ def create_gpt_chat_completion(messages: List[dict], req_type, min_tokens=MIN_TO
add_function_calls_to_request(gpt_data, function_calls)

try:
response = stream_gpt_completion(gpt_data, req_type)
response = stream_gpt_completion(gpt_data, req_type, project)
return response
except TokenLimitError as e:
raise e
Expand Down Expand Up @@ -186,12 +187,15 @@ def wrapper(*args, **kwargs):
print(err_str)
logger.error(f'There was a problem with request to openai API: {err_str}')

user_message = questionary.text(
project = args[2]
user_message = styled_text(
project,
"Do you want to try make the same request again? If yes, just press ENTER. Otherwise, type 'no'.",
style=questionary.Style([
('question', 'fg:red'),
('answer', 'fg:orange')
])).ask()
style=Style.from_dict({
'question': '#FF0000 bold',
'answer': '#FF910A bold'
})
)

# TODO: take user's input into consideration - send to LLM?
# https://github.com/Pythagora-io/gpt-pilot/issues/122
Expand All @@ -202,11 +206,12 @@ def wrapper(*args, **kwargs):


@retry_on_exception
def stream_gpt_completion(data, req_type):
def stream_gpt_completion(data, req_type, project):
"""
Called from create_gpt_chat_completion()
:param data:
:param req_type: 'project_description' etc. See common.STEPS
:param project: NEEDED FOR WRAPPER FUNCTION retry_on_exception
:return: {'text': str} or {'function_calls': {'name': str, arguments: '{...}'}}
"""

Expand Down
4 changes: 2 additions & 2 deletions pilot/utils/questionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def styled_select(*args, **kwargs):
return questionary.select(*args, **kwargs).unsafe_ask() # .ask() is included here


def styled_text(project, question, ignore_user_input_count=False):
def styled_text(project, question, ignore_user_input_count=False, style=None):
if not ignore_user_input_count:
project.user_inputs_count += 1
user_input = get_saved_user_input(project, question)
Expand All @@ -36,7 +36,7 @@ def styled_text(project, question, ignore_user_input_count=False):

if project.ipc_client_instance is None or project.ipc_client_instance.client is None:
config = {
'style': custom_style,
'style': style if style is not None else custom_style,
}
question = remove_ansi_codes(question) # Colorama and questionary are not compatible and styling doesn't work
response = questionary.text(question, **config).unsafe_ask() # .ask() is included here
Expand Down

0 comments on commit d6a677c

Please sign in to comment.