-
Notifications
You must be signed in to change notification settings - Fork 0
/
gpt.py
229 lines (169 loc) · 7.17 KB
/
gpt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
import logging
import openai
from openai import OpenAI
import streamlit as st
"""
This is a GPT text generation module. It is used to generate text based on a prompt.
"""
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"
)
API_KEY = st.secrets["openai"]["key"]
class GPTModelHandler:
"""Handles GPT Model Operations."""
def __init__(
self,
api_key: str = API_KEY,
model: str = "gpt-4o",
prompt: str = "This is a test",
) -> None:
"""
Initialize the GPTModelHandler.
Args:
api_key (str): The API key for OpenAI.
model (str): The GPT model to use. Default is 'gpt-3.5-turbo'.
"""
assert isinstance(api_key, str), "api_key must be a string."
assert isinstance(model, str), "model must be a string."
assert isinstance(prompt, str), "prompt must be a string."
self.api_key = api_key
self.model = model
self.client = OpenAI(api_key=self.api_key)
self.prompt = prompt
def generate_response(self, system_role: str = "system"):
"""
Generate response from GPT model.
Args:
prompt (str): The prompt for the GPT model.
system_role (str): The role for the prompt. Default is 'system'.
Returns:
str: The generated response.
"""
# assert isinstance(system_role, str), "system_role must be a string."
messages = [{"role": system_role, "content": self.prompt}]
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
)
logging.info("Generated GPT response")
return response.choices[0].message.content
class PromptOptimizer(GPTModelHandler):
"""This optimizes promps that come in"""
def __init__(self, prompt: str = "this is a test") -> None:
super().__init__()
assert isinstance(prompt, str), "prompt must be a string."
self.prompt = f""" You are a world class prompt engineer. Please improve and enhance this prompt: {prompt}. Guidelines:
- Use your analytics prowness and creative imagination to return a stellar prompt for me.
- Output should not include any explanation on the changes, just the revied prompts"""
logging.info("Updated prompt for optimization")
def response(self) -> str:
"""Generate response for the prompt optimizer."""
return self.generate_response(self.prompt)
class TOCD(GPTModelHandler):
"""Template: Task, Output, Context, Data"""
def __init__(self, task: str, output: str, context: str, data: str) -> None:
"""Initializing Class"""
super().__init__()
assert isinstance(task, str), "task must be a string."
assert isinstance(output, str), "output must be a string."
assert isinstance(context, str), "context must be a string."
assert isinstance(data, str), "data must be a string."
self.task = task
self.output = output
self.context = context
self.data = data
self.prompt = f"""
Complete this Task:{self.task}.
Format for Output: {self.output}
To Ensure Relevance Remember: {self.context}
Here is Data you Need for Your Response: {self.data}
"""
logging.info(f"instance of TOCD class created using model: {self.model}")
def response(self) -> str:
"""Generate response for the TOCD template."""
return self.generate_response(self.prompt)
class RTAO(GPTModelHandler):
"""Role, Task, Audience, Output"""
def __init__(self, role: str, task: str, audience: str, output: str) -> None:
super().__init__()
assert isinstance(role, str), "role must be a string."
assert isinstance(task, str), "task must be a string."
assert isinstance(audience, str), "audience must be a string."
assert isinstance(output, str), "output must be a string."
self.role = role
self.task = task
self.audience = audience
self.output = output
self.prompt = f"""
Act as a {self.role}.
Accomplish this task: {task}.
The target audience/demographic for your response is: {self.audience}.
Your response should confirm to this output: {self.output}
"""
def response(self) -> str:
"""Generate response for the RTAO template."""
return self.generate_response(self.prompt)
class Ultimate(GPTModelHandler):
"The ultimate prompt template"
def __init__(self, role, behavior, task, structure, constraints, data) -> None:
super().__init__()
assert isinstance(role, str), "role must be a string."
assert isinstance(behavior, str), "behavior must be a string."
assert isinstance(task, str), "task must be a string."
assert isinstance(structure, str), "structure must be a string."
assert isinstance(constraints, str), "constraints must be a string."
assert isinstance(data, str), "data must be a string."
self.role = role
self.task = task
self.behavior = behavior
self.structure = structure
self.constraints = constraints
self.data = data
self.prompt = f"""
Act as a {self.role}. Your key traits are {self.behavior}.
Help me with this task: {self.task}.
Your response/output should be formatted like this: {self.structure}.
Note: here are the relevant constaints: {self.constraints}
You can reference this data to help you with your response: {self.data}
"""
def response(self) -> str:
"""Generate response for the Ultimate template."""
return self.generate_response(self.prompt)
if __name__ == "__main__":
TASK = (
"give me ideas on how to write python to speed up my work as a data scientist"
)
STRUCTURE = "In the output use citatios from Minto Pyramid in your response"
OUTPUT = "use minto pyramid to respond, write it like a mckinsey consultant, Make the output in markdown"
CONTEXT = "you are an independent contractor trying to get more business"
DATA = "You made $300K last year you want to make $600 this year"
AUDIENCE = (
"Your auddience is colleage students at UCS in the computer science department"
)
ROLE = "you are a very successful start up founder who does hands on coding"
BEHAVIOR = "You have a coaching program for young programmers"
CONSTRAINTS = (
"you can't use any cloud technology, all must be locally written and deployed"
)
oTOCD = TOCD(
task=TASK,
output=OUTPUT,
context=CONTEXT,
data=DATA,
)
# logging.info(oTOCD.response())
oRTAO = RTAO(role=ROLE, task=TASK, audience=AUDIENCE, output=OUTPUT)
# logging.info(oRTAO.response())
oUltimate = Ultimate(
role=ROLE,
behavior=BEHAVIOR,
task=TASK,
structure=STRUCTURE,
constraints=CONSTRAINTS,
data=DATA,
)
print(oUltimate.response())
# oPromptOptimizer = PromptOptimizer(
# "Please write code to pull video game data in Python"
# )
# print(oPromptOptimizer.response())