-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrecruitment_lib.py
198 lines (168 loc) · 6.71 KB
/
recruitment_lib.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import os
from langchain.llms.bedrock import Bedrock
from langchain.chains import ConversationChain
from langchain.agents import load_tools,initialize_agent, Tool, load_tools
from langchain_community.tools.google_jobs import GoogleJobsQueryRun
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
from langchain.agents import initialize_agent, Tool, AgentType
from dotenv import load_dotenv
load_dotenv()
def init_llm():
model_parameter = {"temperature": 0, "top_p": 0.5, "max_tokens_to_sample": 2000}
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"),
region_name=os.environ.get("BWB_REGION_NAME"),
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"),
model_id="anthropic.claude-v2",
model_kwargs=model_parameter,
)
return llm
def get_llm(streaming_callback):
model_parameter = {"temperature": 1, "top_p": 0.5, "max_tokens_to_sample": 2000}
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"),
region_name=os.environ.get("BWB_REGION_NAME"),
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"),
model_id="anthropic.claude-v2",
model_kwargs=model_parameter,
streaming=True,
callbacks=[streaming_callback]
)
return llm
def get_rag_chat_response(input_text, streaming_callback):
llm = get_llm(streaming_callback)
return llm.invoke(input=input_text)
def rewrite_resume(input_text, streaming_callback):
model_parameter = {"temperature": 1, "top_p": 1, "max_tokens_to_sample": 2000}
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"),
region_name=os.environ.get("BWB_REGION_NAME"),
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"),
model_id="anthropic.claude-v2",
model_kwargs=model_parameter,
callbacks=[streaming_callback]
)
prompt = """Your name is Head Hunter. You are the best recruitment consultant expert, you will rewrite the input resume in better format
. You only need to rewrite content and do not give instruction:
\n\nHuman: here is the resume content
<text>""" + str(input_text) + """</text>
\n\nAssistant: """
return llm.invoke(input=prompt)
def summary_resume_stream(input_text, streaming_callback):
llm = get_llm(streaming_callback)
prompt = """You are the best recruitment consultant expert, you will scan the resume and output concide content for the following informantion:
Contact, Experience, Skills,Certificates and suggested jobs based on the resume in a bulleted list in <response> tag.
Output all content same as input language with the following format
Contact:
Experience:
Skills:
Certificates:
Suggested jobs:
\n\nHuman: here is the resume content
<text>""" + str(input_text) + """</text>
\n\nAssistant: """
return llm.invoke(prompt)
def suggested_jobs(input_text):
llm = init_llm()
prompt = """You are the best recruitment consultant expert, you will scan the resume and output concise content for the following informantion:
suggest jobs based on the resume.
Put your response in <response></response> tags
\n\nHuman: here is the resume content
<text>""" + input_text + """</text>
\n\nAssistant: """
print(input_text)
return llm.invoke(prompt)
def get_jobs(jobs):
tool = GoogleJobsQueryRun(api_wrapper=GoogleJobsAPIWrapper())
res = tool.run(jobs)
print(res)
return res
def search_jobs(input_text, st_callback):
tools=[
Tool(
name="search jobs",
func=get_jobs,
description="Use this to search jobs"
),
]
agent = initialize_agent(tools=tools,llm=init_llm(),agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
prompt="""\n\nHuman: You are a jobs advisor. Give a job summary with the format in a bulleted list
<format>
- Job Title
- Salary
- Location
- Company name
- Job description
</format>
<steps>
1) Use "search jobs" tool to search jobs. Output- List of Jobs
2) Provide summary for the job with Job Title, Salary, Location, Company name, Job description
</steps>
Use the following format:
Question: the input resume you must scan to suggest jobs
Thought: you should always think about what to do, Also try to follow All steps mentioned above
Action: the action to take, should be one of [search jobs]
Action Input: the input to the action
Observation: the result of the action
Thought: I now know the final answer
Final Answer: the final answer to the original input question
{input}
\n\nAssistant:
{agent_scratchpad}
"""
agent.agent.llm_chain.prompt.template=prompt
response = agent({
"input": str(input_text),
"output":"output",
"chat_history": [],
},
callbacks=[st_callback])
return response
def initializeAgent():
tools=[
Tool(
name="get suggested jobs",
func=suggested_jobs,
description="use this tool get suggested jobs for resume")
]
agent=initialize_agent(
llm=init_llm(),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
tools=tools,
verbose=True,
max_iteration=1,
return_intermediate_steps=True,
handle_parsing_errors=True,
output_key="output",
)
prompt="""You are a jobs advisor. Give jobs recommendations for given resume based on following instructions.
<instructions>
Answer the following questions as best as you can. You have access to the following tools:
get suggested jobs: use this tool get suggested jobs for a input resume.
</instructions>
<steps>
1) Use "get suggested jobs" tool get suggested jobs for a input resume. Output- job title
</steps>
Use the following format:
Question: the input resume you must scan to suggest jobs
Thought: you should always think about what to do, Also try to follow steps mentioned above
Action: the action to take, should be one of [get suggested jobs]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
\n\nHuman:
Here is the user question: {input}
\n\nAssistant:
{agent_scratchpad}
"""
agent.agent.llm_chain.prompt.template=prompt
return agent
def query_resume(question, resume, streaming_callback):
llm = get_llm(streaming_callback)
prompt = """Human: here is the resume content:
<text>""" + str(resume) + """</text>
Question: """ + question + """
\n\nAssistant: """
return llm.invoke(prompt)