forked from KillSwitch140/Gforce
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathGforce.py
184 lines (152 loc) · 7.56 KB
/
Gforce.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import streamlit as st
import datetime
import os
from os import environ
import PyPDF2
from langchain.vectorstores import DeepLake
from langchain.agents import initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
# from langchain.agents.agent_toolkits import ZapierToolkit
# from langchain.utilities.zapier import ZapierNLAWrapper
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
import pysqlite3
from langchain.chat_models import ChatOpenAI
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
import qdrant_client
from qdrant_client import QdrantClient,models
from qdrant_client.http.models import PointStruct
from langchain.agents import initialize_agent
from langchain.vectorstores import Qdrant
# from zap import schedule_interview
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
openai_api_key = st.secrets["OPENAI_API_KEY"]
deeplake_key = st.secrets["ACTIVELOOP_TOKEN"]
# QDRANT_COLLECTION ="resume"
def generate_response(doc_texts, openai_api_key, query_text):
doc_texts = None
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.4,openai_api_key=openai_api_key)
# Split documents into chunks
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# texts = text_splitter.create_documents(doc_texts)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
custom_prompt_template = """
You are the scrum master/product manager of the codebase given to you. Consider each task/feature in the question and determine the possibility of implementing it on the current codebase.
If the possibility is less, then you say that such a feature cannot be implemented.
If the feature can be implemented, then consider the feature to be a user story in a JIRA board and plan out the tickets for the story using the codebase.
Ensure that you include the files and modules which are to be changed for each ticket.
Task: Prepare in 3 paragraphs
Topic: Project Planning
Style: Technical
Tone: Professional
Audience: Project Manager
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""
# You are project planner. You will be given a codebase and will have to break it down into subtasks for teams to develop/
# Plan out out each task and subtask step by step. Plan tasks only relevant to the provided document. Do not make up irrelevant tasks./
# Be helpful and answer in detail while preferring to use information from provided documents.
# Task: Prepare in 3 paragraphs
# Topic: Project Planning
# Style: Technical
# Tone: Professional
# Audience: Project Manager
# Context: {context}
# Question: {question}
# Only return the helpful answer below and nothing else.
# Helpful answer:
# """
prompt = PromptTemplate(template=custom_prompt_template,
input_variables=['context', 'question'])
db = DeepLake(dataset_path="hub://arjunsridhar9720/sweep-codebase", token = deeplake_key,read_only=True, embedding_function=embeddings)
retriever = db.as_retriever()
retriever.search_kwargs['distance_metric'] = 'cos'
retriever.search_kwargs['fetch_k'] = 100
retriever.search_kwargs['maximal_marginal_relevance'] = True
retriever.search_kwargs['k'] = 10
model = ChatOpenAI(model='gpt-4') # switch to 'gpt-4'
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever)
# chain_type_kwargs = {"prompt": PROMPT}
qa = RetrievalQA.from_chain_type(llm=llm,
chain_type='stuff',
retriever=retriever,
return_source_documents=False,
chain_type_kwargs={'prompt': prompt}
)
response = qa({'query': query_text})
print (response)
return response["result"]
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "You are a Q&A chatbot that answers questions based on uploaded files"}]
# Page title
st.set_page_config(page_title='Gforce Resume Assistant', layout='wide')
st.title('Gforce Resume Assistant')
# File upload
# uploaded_files = st.file_uploader('Please upload you resume(s)', type=['pdf','txt'], accept_multiple_files=True)
# Query text
query_text = st.text_input('Enter your question:', placeholder='Select candidates based on experience and skills')
# Initialize chat placeholder as an empty list
if "chat_placeholder" not in st.session_state.keys():
st.session_state.chat_placeholder = []
# Form input and query
# if st.button('Submit', key='submit_button'):
uploaded_files = True
if openai_api_key.startswith('sk-'):
if uploaded_files and query_text:
# documents = [read_pdf_text(file) for file in uploaded_files]
# documents = uploaded_files
with st.spinner('Chatbot is typing...'):
documents = None
response = generate_response(documents,openai_api_key, query_text)
st.session_state.chat_placeholder.append({"role": "user", "content": query_text})
st.session_state.chat_placeholder.append({"role": "assistant", "content": response})
# Update chat display
for message in st.session_state.chat_placeholder:
with st.chat_message(message["role"]):
st.write(message["content"])
else:
st.warning("Please upload one or more PDF files and enter a question to start the conversation.")
def clear_chat_history():
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
st.session_state.chat_placeholder = []
# uploaded_files.clear()
query_text = ""
st.empty() # Clear the chat display
st.button('Clear Chat History', on_click=clear_chat_history)
# Create a sidebar with text input boxes and a button
# st.sidebar.header("Schedule Interview")
# person_name = st.sidebar.text_input("Enter Person's Name", "")
# person_email = st.sidebar.text_input("Enter Person's Email Address", "")
# date = st.sidebar.date_input("Select Date for Interview")
# time = st.sidebar.time_input("Select Time for Interview")
# schedule_button = st.sidebar.button("Schedule Interview")
# if schedule_button:
# if not person_name:
# st.sidebar.error("Please enter the person's name.")
# elif not person_email:
# st.sidebar.error("Please enter the person's email address.")
# elif not date:
# st.sidebar.error("Please select the date for the interview.")
# elif not time:
# st.sidebar.error("Please select the time for the interview.")
# else:
# # Call the schedule_interview function from the zap.py file
# success = schedule_interview(person_name, person_email, date, time)
# if success:
# st.sidebar.success("Interview Scheduled Successfully!")
# else:
# st.sidebar.error("Failed to Schedule Interview")