-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path02-lang_basics.py
157 lines (127 loc) · 3.66 KB
/
02-lang_basics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "ipython==8.32.0",
# "langchain==0.3.17",
# "langchain-community==0.3.16",
# "marimo",
# "openai==1.61.1",
# "python-dotenv==1.0.1",
# ]
# ///
import marimo
__generated_with = "0.11.0"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import openai
import os
import IPython
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from dotenv import load_dotenv, find_dotenv
import marimo as mo
return (
IPython,
LLMChain,
OpenAI,
PromptTemplate,
find_dotenv,
load_dotenv,
mo,
openai,
os,
)
@app.cell
def _(os):
# Delete if not using in UTSA
os.environ["http_proxy"] = "http://xa-proxy.utsarr.net:80"
os.environ["https_proxy"] = "http://xa-proxy.utsarr.net:80"
return
@app.cell
def _(find_dotenv, load_dotenv, openai, os):
# Ensure you have a .env file in the AgenticAISystems folder with your OPENAI_API_KEY.
working_dir = os.getcwd()
status = load_dotenv(
find_dotenv(
filename=f'{working_dir}/AgenticAISystems/.env',
raise_error_if_not_found=True
)
)
# API configuration
client = openai.OpenAI(api_key='')
client.api_key = os.getenv("OPENAI_API_KEY")
# LangChain
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
return client, status, working_dir
@app.cell
def _(client):
def set_open_params(
model="gpt-3.5-turbo",
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
):
""" set openai parameters"""
openai_params = {}
openai_params['model'] = model
openai_params['temperature'] = temperature
openai_params['max_tokens'] = max_tokens
openai_params['top_p'] = top_p
openai_params['frequency_penalty'] = frequency_penalty
openai_params['presence_penalty'] = presence_penalty
return openai_params
def get_completion(params, messages):
""" GET completion from openai api"""
response = client.chat.completions.create(
model = params['model'],
messages = messages,
temperature = params['temperature'],
max_tokens = params['max_tokens'],
top_p = params['top_p'],
frequency_penalty = params['frequency_penalty'],
presence_penalty = params['presence_penalty'],
)
return response
return get_completion, set_open_params
@app.cell
def _(PromptTemplate):
# Define a prompt template.
# Here, we're asking the LLM to explain a topic in simple terms.
prompt = PromptTemplate(
input_variables=["topic"],
template="Explain the concept of {topic} as a scientists."
)
return (prompt,)
@app.cell
def _(OpenAI):
# Initialize the OpenAI LLM.
llm = OpenAI(temperature=0.5)
return (llm,)
@app.cell
def _(llm, prompt):
# Compose the prompt and LLM using the chaining operator.
# This creates a RunnableSequence equivalent to the old LLMChain.
chain = prompt | llm
return (chain,)
@app.cell
def _():
# Prepare the input as a dictionary.
input_data = {"topic": "quantum computing"}
return (input_data,)
@app.cell
def _(chain, input_data):
# Execute the chain using the invoke() method.
result = chain.invoke(input_data)
return (result,)
@app.cell
def _(result):
print("Response from the OpenAI LLM:")
print(result)
return
if __name__ == "__main__":
app.run()