-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchroma_with_turbo_retreavel.py
45 lines (33 loc) · 1.22 KB
/
chroma_with_turbo_retreavel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
os.environ["OPENAI_API_KEY"] = "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"
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
persist_directory = 'db'
embedding = OpenAIEmbeddings()
vectordb2 = Chroma(persist_directory=persist_directory,
embedding_function=embedding,
)
retriever = vectordb2.as_retriever(search_kwargs={"k": 2})
# Set up the turbo LLM
turbo_llm = ChatOpenAI(
temperature=0,
model_name='gpt-3.5-turbo'
)
# create the chain to answer questions
qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True)
# Cite sources
def process_llm_response(llm_response):
print(llm_response['result'])
print('\n\nSources:')
for source in llm_response["source_documents"]:
print(source.metadata['source'])
# full example
while True:
query = str(input("Enter Query: "))
llm_response = qa_chain(query)
process_llm_response(llm_response)