-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathllm_connect.py
196 lines (162 loc) · 14.2 KB
/
llm_connect.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
from pyperplan.llm_to_pddl import llm_to_pddl
from openapillm import get_code_llm_openai
from hugchatapi import hugchatter
import sys
import logging
import time
file_path = "testingfolder/sampledomain.pddl"
def save_text_to_file(text, file_path):
with open(file_path, 'w') as file:
file.write(text)
def get_code_from_previous_generated(file_path):
with open(file_path, 'r') as file:
code = file.read()
return code
def generate_domain(question, model_name, error_code):
try:
# --------------- First run when user inputs the domain text. -------------------
if error_code == False:
global conversational_texts
conversational_texts = []
global global_counter
global_counter = 0
global model_used
model_used = model_name
file_path = "testingfolder/sampledomain.pddl"
# Initialising the question with the user input for LLAMA3.
# question = question+"\n \n Instruction: Generate PDDL domain code for the above text description in code blocks delimited only between ```pddl <CODE></CODE> ```. This code should be solved in a STRIPS Planner. Do not give any explanations."
# Initialising the question with the user input for MISTRALAI.
# question = question+"\n \nInstruction: Generate PDDL domain code for the above text description in code blocks delimited only between ```pddl <CODE></CODE> ```. This code should be solved in a STRIPS Planner. Do not give any explanations in the output."
# question = question+"\n \n Requirement: Generate PDDL / STRIPS domain code for the above text description in code blocks delimited only between ```pddl <CODE></CODE> ```. This code should be solved in a STRIPS Planner. Do not give any explanations."
# Initialising the question with the user input for ChatGPT.
question = question + "\n \nRequirement: Generate PDDL domain code for the above text description in code blocks delimited only between ```pddl <CODE></CODE> ```. Do not give any explanations."
# ------- Calling ChatGPT API for Generating PDDL Code. -------
if model_name == "CHAT_GPT": # Calling chatgpt api model
answer = get_code_llm_openai(question)
time.sleep(3)
if answer['success']==False:
conversational_texts.append("**USER** :\n" + question)
conversational_texts.append("**LLM MODEL** ({}):".format(model_name) +"\n"+str(answer['answer'])+"\n")
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
time.sleep(3)
return conversational_texts[-1].split("LLM MODEL",1)[1]
# ------- Calling HUGGING CHAT API for Generating PDDL Code. -------
elif model_name == "HUGGING_FACE": # Calling huggingface api model
answer = hugchatter(question)
if answer['answer']=="NO RESULT FOUND":
conversational_texts.append("**USER** :" + question)
conversational_texts.append("**LLM MODEL** ({}):".format(model_used) +"\n"+str(answer['text'])+"\n")
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
time.sleep(3)
text_return = conversational_texts[-1].split("(HUGGING_FACE):",1)[1]
print(answer['answer'])
# print(text_return)
logging.error("ERROR : "+str(global_counter)+ " : "+text_return)
sys.exit("ERROR : "+str(global_counter)+ " : "+text_return)
else:
return "Invalid Model Name. Please provide a valid model name."
conversational_texts.append("**USER** :\n" + question) # Appending user input to conversational_texts
conversational_texts.append("**LLM MODEL** ({}):".format(model_name)+"\n```pddl \n"+ answer['answer'] + "\n```") # Appending LLM model output to conversational_texts
save_text_to_file(answer['answer'], file_path) # Saving the generated code to a file
global_counter += 1 # Incrementing the global counter for the number of times the API is called.
final = llm_to_pddl(True) # Calling Pyperplan to check the generated domain code.
if final['success']:
final['domain'] = conversational_texts[-1]
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
logging.info(final)
return final['domain'].split("pddl",1)[1].split('```')[0]
# ---------------- Second run when user inputs the error code. -------------------
elif error_code == True:
logging.error("----- RUNNING FOR ERROR ---> {}".format(model_used)) # Logging the error for Model.
code_with_error = get_code_from_previous_generated(file_path="testingfolder/sampledomain.pddl") # Getting the code from the file.
file_path = "testingfolder/sampledomain.pddl"
# ------- Calling ChatGPT API for Generating PDDL Code. -------
if model_used == "CHAT_GPT": # Calling chatgpt api model
if global_counter <= 9:
error = code_with_error + "\n The above PDDL code produces the following error -> " + str(question) + "\n\n Requirement 1: Correct the error and give the entire PDDL code. \n Requirement 2: Do not create a problem file. Do not use conditional expressions. \n Requirement 3: Generate PDDL domain code in code blocks delimited only between ```pddl <CODE></CODE> ```. Do not give any explanations."
error_formatted = "\n```pddl\n"+code_with_error +"\n```"+"\n The above code produces the following error: " + str(question) + "\n\n Requirement 1: Correct the error and give the entire PDDL code. \n Requirement 2: Do not create a problem file. Do not use conditional expressions. \n Requirement 3: Generate PDDL domain code in code blocks delimited only between ```pddl <CODE></CODE> ```. Do not give any explanations."
conversational_texts.append("**USER** :" + error_formatted) # Appending user input to conversational_texts
logging.error("=========== CHAT COUNT ==========>> " + str(global_counter)) # Printing the global counter for the number of times the API is called.
answer = get_code_llm_openai(error) # Calling ChatGPT API for Generating PDDL Code for the error.
time.sleep(3)
if answer['answer']=="NO RESULT FOUND":
conversational_texts.append("**USER** :" + question)
conversational_texts.append("**LLM MODEL** ({}):".format(model_used) +"\n"+str(answer['text'])+"\n")
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
return conversational_texts[-1]
else:
conversational_texts.append("**LLM MODEL** ({}):".format(model_used)+"\n```pddl"+ answer['answer'] + "\n```") # Appending LLM model output to conversational_texts
save_text_to_file(answer['answer'], file_path) # Saving the generated code to a file.
time.sleep(3)
global_counter += 1 # Incrementing the global counter for the number of times the API is called.
final = llm_to_pddl(True) # Calling Pyperplan to check the generated domain code.
if final['success']: # If the domain code is correct.
final['domain'] = conversational_texts[-1]
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md') # Saving the conversation to a file.
logging.error("SUCCESS : "+str(global_counter))
sys.exit("SUCCESS : "+str(global_counter))
return final['domain'] # Returning the domain code.
else: # If the global counter exceeds 10.
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
logging.error("ERROR : "+str(global_counter))
sys.exit("ERROR : "+str(global_counter))
return conversational_texts[-2] # Returning the last conversation.
elif model_used == "HUGGING_FACE": # Calling huggingface api model
if global_counter <= 19:
# FOR LLAMA3
# error = code_with_error + "\n \n The above PDDL code produces the following error -> " + str(question) + "\n\nRequirement 1: Correct the error and only give the entire PDDL code for solving in a STRIPS Planner.\n Requirement 2: Do not create a problem file and do not use conditional expressions.\n Requirement 3: Give your output as the PDDL code in code blocks delimited only between ```pddl <CODE></CODE> ``` without any explanations."
# error_formatted = "\n```pddl\n"+code_with_error +"\n```"+"\n \n The above code produces the following error: " + str(question) + "\n\nRequirement 1: Correct the error and only give the entire PDDL code for solving in a STRIPS Planner.\n Requirement 2: Do not create a problem file and do not use conditional expressions.\n Requirement 3: Give your output as the PDDL code in code blocks delimited only between ```pddl <CODE></CODE> ``` without any explanations."
# FOR MISTRALAI
error = code_with_error + "\n\nThe above PDDL code produces the following error -> " + str(question) + "\n\nInstruction 1: Correct the error and give the entire PDDL code in code blocks and please do not give any explanations in the output.\nInstruction 2: Do not create a problem file and do not use conditional expressions. PLEASE DO NOT USE conditional expressions, functions or derived functions, equality or any syntax from PDDL2.1 and higher."
error_formatted = "\n```pddl\n"+code_with_error +"\n```"+"\n\nThe above code produces the following error -> " + str(question) + "\n\nInstruction 1: Correct the error and give the entire PDDL code in code blocks and please do not give any explanations in the output.\nInstruction 2: Do not create a problem file and do not use conditional expressions."
conversational_texts.append("**USER** :" + error_formatted) # Appending user input to conversational_texts
print("=========== CHAT COUNT ==========>> " + str(global_counter)) # Printing the global counter for the number of times the API is called.
logging.error("=========== CHAT COUNT ==========>> " + str(global_counter)) # Printing the global counter for the number of times the API is called.
answer = hugchatter(error)
time.sleep(3)
# flag = {'flag':False, 'chatID':answer['chatID'], 'cookies':answer['cookies']}
if answer['answer']=="NO RESULT FOUND":
conversational_texts.append("**LLM MODEL** ({}):".format(model_used) +"\n"+str(answer['text'])+"\n")
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
time.sleep(3)
text_return = conversational_texts[-1].split("(HUGGING_FACE):",1)[1]
print(answer['answer'])
# print(text_return)
logging.error("ERROR : "+str(global_counter)+ " : "+text_return)
sys.exit("ERROR : "+str(global_counter)+ " : "+text_return)
return text_return
conversational_texts.append("**LLM MODEL** ({}):".format(model_used) +"\n```pddl \n"+ answer['answer'] + "\n```") # Appending LLM model output to conversational_texts
save_text_to_file(answer["answer"], "testingfolder/sampledomain.pddl")
time.sleep(3)
global_counter += 1
final = llm_to_pddl(True)
if final['success']:
logging.error("SUCCESS : "+str(global_counter))
final['domain'] = conversational_texts[-1]
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
time.sleep(3)
logging.info(final)
sys.exit("SUCCESS : "+str(global_counter))
return final['domain'].split("pddl",1)[1].split('```')[0]
else:
try:
paragraph = '\n'.join(conversational_texts)
save_text_to_file(paragraph, 'testingfolder/conversation.md')
text_return = conversational_texts[-1].split("pddl",1)[1].split('```')[0]
return text_return
except Exception as e:
logging.error("ERROR : "+str(global_counter))
logging.error(" ----- API COUNTER ENDED ----- > " + str(e))
return "--- API ERROR: Please try again later !!!!! ----"
except Exception as e:
logging.error(" < --------- Exited Code -------- > ")
sys.exit(" < --------- Exited Code -------- > ")
# return conversational_texts[-1].split("pddl",1)[1].split('```')[0]