From 454ae9d1ffc9d7074cc3b7220da493c9f3abc83a Mon Sep 17 00:00:00 2001 From: takline Date: Mon, 7 Oct 2024 19:52:26 +0000 Subject: [PATCH] simplify resume improver and fix langchain issue --- models/job_post.py | 21 ++-- prompts/prompts.py | 2 +- prompts/prompts.yaml | 12 +- services/__init__.py | 1 - services/extractor.py | 25 ---- services/langchain_helpers.py | 126 ++++++++++++++++++++- services/resume_improver.py | 170 ++++++---------------------- tests/test_data/John_Doe_resume.pdf | Bin 79173 -> 80038 bytes tests/test_data/test_output.yaml | 1 + 9 files changed, 178 insertions(+), 180 deletions(-) delete mode 100644 services/extractor.py create mode 100644 tests/test_data/test_output.yaml diff --git a/models/job_post.py b/models/job_post.py index 18bbf2b..fb6b5b4 100644 --- a/models/job_post.py +++ b/models/job_post.py @@ -1,10 +1,12 @@ from langchain_core.pydantic_v1 import BaseModel, Field from typing import List, Optional from ..prompts.prompts import Prompts -from ..services.extractor import ExtractorLLM +from .. import config +from .. import services Prompts.initialize() + class JobDescription(BaseModel): """Description of a job posting.""" @@ -39,20 +41,25 @@ class JobDescription(BaseModel): None, description=Prompts.descriptions["JOB_DESCRIPTION"]["technical_skills"] ) non_technical_skills: Optional[List[str]] = Field( - None, description=Prompts.descriptions["JOB_DESCRIPTION"]["non_technical_skills"] + None, + description=Prompts.descriptions["JOB_DESCRIPTION"]["non_technical_skills"], ) -class JobPost(ExtractorLLM): +class JobPost: def __init__(self, posting: str): """Initialize JobPost with the job posting string.""" - super().__init__() self.posting = posting + self.extractor_llm = services.langchain_helpers.create_llm( + chat_model=config.CHAT_MODEL, + model_name=config.MODEL_NAME, + temperature=config.TEMPERATURE, + cache=True, + ) self.parsed_job = None def parse_job_post(self, **chain_kwargs) -> dict: """Parse the job posting to extract job description and skills.""" - self.parsed_job = self.extract_from_input( - pydantic_object=JobDescription, input=self.posting, **chain_kwargs - ) + model = self.extractor_llm.with_structured_output(JobDescription) + self.parsed_job = model.invoke(self.posting).dict() return self.parsed_job diff --git a/prompts/prompts.py b/prompts/prompts.py index f008cb7..9024579 100644 --- a/prompts/prompts.py +++ b/prompts/prompts.py @@ -39,7 +39,7 @@ def _load_prompts(yaml_path: str) -> dict: sub_data["job_posting_template"] ), HumanMessagePromptTemplate.from_template( - sub_data.get("master_resume_template", "") + sub_data.get("resume_template", "") ), HumanMessage(content=sub_data["instruction_message"]), HumanMessage(content=sub_data["criteria_message"]), diff --git a/prompts/prompts.yaml b/prompts/prompts.yaml index d518a47..b6c3fc4 100644 --- a/prompts/prompts.yaml +++ b/prompts/prompts.yaml @@ -9,14 +9,14 @@ SECTION_HIGHLIGHTER: Keywords that may be triggered by Applicant Tracking Systems (ATS) that should be added (if applicable): {ats_keywords} The ideal candidate has the following skills:{technical_skills} {non_technical_skills} - master_resume_template: | - {section} + resume_template: | + {section} instruction_message: | - Identify the relevant portions from the that match the , rephrase these relevant portions into highlights, and rate the relevance of each highlight to the on a scale of 1-5. + Identify the relevant portions from the that match the , rephrase these relevant portions into highlights, and rate the relevance of each highlight to the on a scale of 1-5. criteria_message: | - - Each highlight must be based on what is mentioned in the . - - In each highlight, include how that experience in the demonstrates an ability to perform duties mentioned in the . + - Each highlight must be based on what is mentioned in the . + - In each highlight, include how that experience in the demonstrates an ability to perform duties mentioned in the . - In each highlight, try to include action verbs, give tangible and concrete examples, and include success metrics when available. - Grammar, spellings, and sentence structure must be correct. steps_message: | @@ -24,7 +24,7 @@ SECTION_HIGHLIGHTER: - Create a for following the while meeting all the . - What are needed to follow the ? - Follow all steps one by one and show your . - - Verify that highlights are reflective of the and not the . Update if necessary. + - Verify that highlights are reflective of the and not the . Update if necessary. - Verify that all are met, and update if necessary. - Provide the answer to the with prefix . diff --git a/services/__init__.py b/services/__init__.py index c847586..769f9bf 100644 --- a/services/__init__.py +++ b/services/__init__.py @@ -1,4 +1,3 @@ -from .extractor import * from .resume_improver import * from .langchain_helpers import * from .background_runner import * diff --git a/services/extractor.py b/services/extractor.py deleted file mode 100644 index d6be940..0000000 --- a/services/extractor.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import Optional -from langchain_core.pydantic_v1 import BaseModel -from .. import config -from . import langchain_helpers - -class ExtractorLLM: - def __init__(self): - """Initialize the Extractor LLM with a default model.""" - self.extractor_llm = langchain_helpers.create_llm( - chat_model=config.CHAT_MODEL, - model_name=config.MODEL_NAME, - temperature=config.TEMPERATURE, - cache=True, - ) - - def extract_from_input(self, pydantic_object, input: str, **chain_kwargs) -> dict: - """Extract structured data from input using the specified pydantic object.""" - try: - model_with_structure = self.extractor_llm.with_structured_output( - pydantic_object - ) - return model_with_structure.invoke(input).dict() - except Exception as e: - print("Encountered exception during parsing input. See below:") - print(e) diff --git a/services/langchain_helpers.py b/services/langchain_helpers.py index 3481bf2..d16f7e1 100644 --- a/services/langchain_helpers.py +++ b/services/langchain_helpers.py @@ -6,6 +6,7 @@ import langchain from langchain_community.cache import InMemoryCache from .. import config +from .. import utils # Set up LLM cache langchain.llm_cache = InMemoryCache() @@ -26,6 +27,7 @@ def format_list_as_string(lst: list, list_sep: str = "\n- ") -> str: return str(lst) + def format_prompt_inputs_as_strings(prompt_inputs: list[str], **kwargs): """Convert values to string for all keys in kwargs matching list in prompt inputs.""" return { @@ -45,12 +47,12 @@ def parse_date(date_str: str) -> datetime: def datediff_years(start_date: str, end_date: str) -> float: - """Get difference between arbitrarily formatted dates in fractional years to the floor month. - + """Calculate the difference between two dates in fractional years. + Args: start_date (str): The start date in string format. end_date (str): The end date in string format. Can be "Present" to use the current date. - + Returns: float: The difference in years, including fractional years. """ @@ -58,3 +60,121 @@ def datediff_years(start_date: str, end_date: str) -> float: end_date = datetime.today().strftime("%Y-%m-%d") datediff = relativedelta(parse_date(end_date), parse_date(start_date)) return datediff.years + datediff.months / 12.0 + + +def chain_formatter(format_type: str, input_data) -> str: + """Format resume/job inputs for inclusion in a runnable sequence. + + Args: + format_type (str): The type of data to format (e.g., 'experience', 'projects', 'skills', 'education'). + input_data: The data to be formatted. + + Returns: + str: The formatted data as a string. + """ + if format_type == 'experience': + as_list = format_experiences_for_prompt(input_data) + return format_prompt_inputs_as_strings(as_list) + elif format_type == 'projects': + as_list = format_projects_for_prompt(input_data) + return format_prompt_inputs_as_strings(as_list) + elif format_type == 'skills': + as_list = format_skills_for_prompt(input_data) + return format_prompt_inputs_as_strings(as_list) + elif format_type == 'education': + return format_education_for_resume(input_data) + else: + return input_data + + +def format_education_for_resume(education_list: list[dict]) -> str: + """Format education entries for inclusion in a resume. + + Args: + education_list (list[dict]): A list of dictionaries containing education details. + + Returns: + str: A formatted string of education entries. + """ + formatted_education = [] + for entry in education_list: + school = entry.get('school', '') + degrees = ', '.join(degree.get('names', ['Degree'])[0] for degree in entry.get('degrees', [])) + formatted_education.append(f"{school}: {degrees}") + return '\n'.join(formatted_education) + + +def format_skills_for_prompt(input_data) -> list: + """Format skills for inclusion in a prompt. + + Args: + skills (list): The list of skills. + + Returns: + list: A formatted list of skills. + """ + result = [] + for cat in input_data: + curr = "" + if cat.get("category", ""): + curr += f"{cat['category']}: " + if "skills" in cat: + curr += "Proficient in " + curr += ", ".join(cat["skills"]) + result.append(curr) + return result + +def get_cumulative_time_from_titles(titles) -> int: + """Calculate the cumulative time from job titles. + + Args: + titles (list): A list of job titles with start and end dates. + + Returns: + int: The cumulative time in years. + """ + result = 0.0 + for t in titles: + if "startdate" in t and "enddate" in t: + if t["enddate"] == "current": + last_date = datetime.today().strftime("%Y-%m-%d") + else: + last_date = t["enddate"] + result += datediff_years(start_date=t["startdate"], end_date=last_date) + return round(result) + +def format_experiences_for_prompt(input_data) -> list: + """Format experiences for inclusion in a prompt. + + Returns: + list: A formatted list of experiences. + """ + result = [] + for exp in input_data: + curr = "" + if "titles" in exp: + exp_time = get_cumulative_time_from_titles(exp["titles"]) + curr += f"{exp_time} years experience in:" + if "highlights" in exp: + curr += format_list_as_string(exp["highlights"], list_sep="\n - ") + curr += "\n" + result.append(curr) + return result + +def format_projects_for_prompt(input_data) -> list: + """Format projects for inclusion in a prompt. + + Returns: + list: A formatted list of projects. + """ + result = [] + for exp in input_data: + curr = "" + if "name" in exp: + name = exp["name"] + curr += f"Side Project: {name}" + if "highlights" in exp: + curr += format_list_as_string(exp["highlights"], list_sep="\n - ") + curr += "\n" + result.append(curr) + return result diff --git a/services/resume_improver.py b/services/resume_improver.py index 1a820a2..75cc143 100644 --- a/services/resume_improver.py +++ b/services/resume_improver.py @@ -18,7 +18,6 @@ from .. import utils from .. import config from .langchain_helpers import * -from .extractor import ExtractorLLM from ..prompts import Prompts from ..models.job_post import JobPost from ..pdf_generation import ResumePDFGenerator @@ -29,7 +28,7 @@ from .background_runner import BackgroundRunner -class ResumeImprover(ExtractorLLM): +class ResumeImprover: def __init__(self, url, resume_location=None, llm_kwargs: dict = None): """Initialize ResumeImprover with the job post URL and optional resume location. @@ -48,7 +47,6 @@ def __init__(self, url, resume_location=None, llm_kwargs: dict = None): self.parsed_job = None self.llm_kwargs = llm_kwargs or {} self.editing = False - self.text_area = None self.clean_url = None self.job_data_location = None self.yaml_loc = None @@ -67,9 +65,7 @@ def _update_resume_fields(self): self.experiences = utils.get_dict_field( field="experiences", data_dict=self.resume ) - self.projects = utils.get_dict_field( - field="projects", data_dict=self.resume - ) + self.projects = utils.get_dict_field(field="projects", data_dict=self.resume) self.skills = utils.get_dict_field(field="skills", data_dict=self.resume) self.objective = utils.get_dict_field(field="objective", data_dict=self.resume) @@ -316,32 +312,20 @@ def run_config(background_config, resume_improver): return output def _get_formatted_chain_inputs(self, chain, section=None): + output_dict = {} + raw_self_data = self.__dict__ if section is not None: - formatted_prompt = format_prompt_inputs_as_strings( - prompt_inputs=chain.input_schema().dict(), - **self.parsed_job, - degrees=self.degrees, - experiences=self._format_experiences_for_prompt(), - projects=self._format_projects_for_prompt(), - education=utils.dict_to_yaml_string(dict(Education=self.education)), - skills=self._format_skills_for_prompt(), - objective=self.objective, - section=section - ) - else: - formatted_prompt = format_prompt_inputs_as_strings( - prompt_inputs=chain.input_schema().dict(), - **self.parsed_job, - degrees=self.degrees, - experiences=self._format_experiences_for_prompt(), - projects=self._format_projects_for_prompt(), - education=utils.dict_to_yaml_string(dict(Education=self.education)), - skills=self._format_skills_for_prompt(), - objective=self.objective, + raw_self_data = raw_self_data.copy() + raw_self_data["section"] = section + for key in chain.get_input_schema().schema()["required"]: + output_dict[key] = chain_formatter( + key, raw_self_data.get(key) or self.parsed_job.get(key) ) - return formatted_prompt + return output_dict - def _chain_updater(self, prompt_msgs, **chain_kwargs) -> RunnableSequence: + def _chain_updater( + self, prompt_msgs, pydantic_object, **chain_kwargs + ) -> RunnableSequence: """Create a chain based on the prompt messages. Returns: @@ -349,7 +333,8 @@ def _chain_updater(self, prompt_msgs, **chain_kwargs) -> RunnableSequence: """ prompt = ChatPromptTemplate(messages=prompt_msgs) llm = create_llm(**self.llm_kwargs) - return prompt | llm | StrOutputParser() + runnable = prompt | llm.with_structured_output(schema=pydantic_object) + return runnable def _get_degrees(self, resume: dict): """Extract degrees from the resume. @@ -369,81 +354,6 @@ def _get_degrees(self, resume: dict): result.append(degree["names"]) return result - def _format_skills_for_prompt(self) -> list: - """Format skills for inclusion in a prompt. - - Args: - skills (list): The list of skills. - - Returns: - list: A formatted list of skills. - """ - result = [] - for cat in self.skills: - curr = "" - if cat.get("category", ""): - curr += f"{cat['category']}: " - if "skills" in cat: - curr += "Proficient in " - curr += ", ".join(cat["skills"]) - result.append(curr) - return result - - def _get_cumulative_time_from_titles(self, titles) -> int: - """Calculate the cumulative time from job titles. - - Args: - titles (list): A list of job titles with start and end dates. - - Returns: - int: The cumulative time in years. - """ - result = 0.0 - for t in titles: - if "startdate" in t and "enddate" in t: - if t["enddate"] == "current": - last_date = datetime.today().strftime("%Y-%m-%d") - else: - last_date = t["enddate"] - result += datediff_years(start_date=t["startdate"], end_date=last_date) - return round(result) - - def _format_experiences_for_prompt(self) -> list: - """Format experiences for inclusion in a prompt. - - Returns: - list: A formatted list of experiences. - """ - result = [] - for exp in self.experiences: - curr = "" - if "titles" in exp: - exp_time = self._get_cumulative_time_from_titles(exp["titles"]) - curr += f"{exp_time} years experience in:" - if "highlights" in exp: - curr += format_list_as_string(exp["highlights"], list_sep="\n - ") - curr += "\n" - result.append(curr) - return result - - def _format_projects_for_prompt(self) -> list: - """Format projects for inclusion in a prompt. - - Returns: - list: A formatted list of projects. - """ - result = [] - for exp in self.projects: - curr = "" - if "name" in exp: - name = exp["name"] - curr += f"Side Project: {name}" - if "highlights" in exp: - curr += format_list_as_string(exp["highlights"], list_sep="\n - ") - curr += "\n" - result.append(curr) - return result - def _combine_skills_in_category(self, l1: list[str], l2: list[str]): """Combine two lists of skills without duplicating lowercase entries. @@ -473,16 +383,6 @@ def _combine_skill_lists(self, l1: list[dict], l2: list[dict]): else: l1.append(s) - def _print_debug_message(self, chain_kwargs: dict, chain_output_unformatted: str): - """Print a debug message. - - Args: - chain_kwargs (dict): The keyword arguments for the chain. - chain_output_unformatted (str): The unformatted output from the chain. - """ - message = "Final answer is missing from the chain output." - return - def rewrite_section(self, section: list | str, **chain_kwargs) -> dict: """Rewrite a section of the resume. @@ -493,13 +393,13 @@ def rewrite_section(self, section: list | str, **chain_kwargs) -> dict: Returns: dict: The rewritten section. """ - chain = self._chain_updater(Prompts.lookup["SECTION_HIGHLIGHTER"], **chain_kwargs) - chain_inputs = self._get_formatted_chain_inputs(chain=chain, section=section) - section_revised_unformatted = chain.invoke(chain_inputs) - section_revised = self.extract_from_input( - pydantic_object=ResumeSectionHighlighterOutput, - input=section_revised_unformatted, + chain = self._chain_updater( + Prompts.lookup["SECTION_HIGHLIGHTER"], + ResumeSectionHighlighterOutput, + **chain_kwargs, ) + chain_inputs = self._get_formatted_chain_inputs(chain=chain, section=section) + section_revised = chain.invoke(chain_inputs).dict() section_revised = sorted( section_revised["final_answer"], key=lambda d: d["relevance"] * -1 ) @@ -547,13 +447,11 @@ def extract_matched_skills(self, **chain_kwargs) -> dict: dict: The extracted skills. """ - chain = self._chain_updater(Prompts.lookup["SKILLS_MATCHER"], **chain_kwargs) - chain_inputs = self._get_formatted_chain_inputs(chain=chain) - extracted_skills_unformatted = chain.invoke(chain_inputs) - extracted_skills = self.extract_from_input( - pydantic_object=ResumeSkillsMatcherOutput, - input=extracted_skills_unformatted, + chain = self._chain_updater( + Prompts.lookup["SKILLS_MATCHER"], ResumeSkillsMatcherOutput, **chain_kwargs ) + chain_inputs = self._get_formatted_chain_inputs(chain=chain) + extracted_skills = chain.invoke(chain_inputs).dict() if not extracted_skills or "final_answer" not in extracted_skills: return None extracted_skills = extracted_skills["final_answer"] @@ -581,13 +479,12 @@ def write_objective(self, **chain_kwargs) -> dict: Returns: dict: The written objective. """ - chain = self._chain_updater(Prompts.lookup["OBJECTIVE_WRITER"], **chain_kwargs) + chain = self._chain_updater( + Prompts.lookup["OBJECTIVE_WRITER"], ResumeSummarizerOutput, **chain_kwargs + ) chain_inputs = self._get_formatted_chain_inputs(chain=chain) - objective_unformatted = chain.invoke(chain_inputs) - objective = self.extract_from_input( - pydantic_object=ResumeSummarizerOutput, input=objective_unformatted - ) + objective = chain.invoke(chain_inputs).dict() if not objective or "final_answer" not in objective: return None return objective["final_answer"] @@ -601,12 +498,11 @@ def suggest_improvements(self, **chain_kwargs) -> dict: Returns: dict: The suggested improvements. """ - chain = self._chain_updater(Prompts.lookup["IMPROVER"], **chain_kwargs) - chain_inputs = self._get_formatted_chain_inputs(chain=chain) - improvements_unformatted = chain.invoke(chain_inputs) - improvements = self.extract_from_input( - pydantic_object=ResumeImproverOutput, input=improvements_unformatted + chain = self._chain_updater( + Prompts.lookup["IMPROVER"], ResumeImproverOutput, **chain_kwargs ) + chain_inputs = self._get_formatted_chain_inputs(chain=chain) + improvements = chain.invoke(chain_inputs).dict() if not improvements or "final_answer" not in improvements: return None return improvements["final_answer"] diff --git a/tests/test_data/John_Doe_resume.pdf b/tests/test_data/John_Doe_resume.pdf index 7d7bbbf0a09fa9d4d95e5f5d7206519f962a2390..fa72dcab06ac9843277d9e9ffd03d4e3646d2081 100644 GIT binary patch delta 4167 zcmai2OYG{}UJl!x+?;>5<_t_U=UxrvT`16&KA})3uL9*!3bYgolu{_KwiL*@*MY== z8V4Fr82l3_&hFI2J@6G1cl3I5CypH4sJZIQIJ+~W4&=Z0@v-+AxH@Qm-}|N8p-KYjhXt8Y_&*x&c3YncS>N6@bW>?I(+y8Xtj)0Qln02|h=WIIt<(FTkHrtDJU8W4@n7*D(&(@D-+Vz~BXh`8mq+ub`;WRuj9^#_ru`WlNnsRC`B9QiQ55P|GX$0*UXVwwbX{KF3lO&U{(r>S zwsU!qKbiK*S?->&2k*=KMRw_>?);$7Jiz`7McJ`0rv1QB`2M*M_0jjTZ~FO@|H0mPpc_RXvBzq1f;64A$L-_ARec_*{-_O?9i(S?)`oY;e|5MEOi_iZU z^+lf$7$y-Khf(tR^O&#nf8$Ba_tkR@^u2lhx0vs9ANKqi==ABvsTixypwl@-%w2M|5!&0`PEX1`Dc7BwLRk-HXQ*&6tgsVn>HG5DB|?kr&1_x! zbp>mCbXhN9iTWJiiABCRS@aAPTs7g(@C3?jM9cqYQrxD{6$(GbmKw~XO z=qT~(eqBb6IO}$t$|?y8Ab)itvI8EvU8z1DdjpmX2&>5W6oF=d-P*U&m{F1$e!a8f%vtCe`-CPuDT^>iX|Oc?7cHrrTe zT1r@aHXj{%%Aylfr>#|_S#P(o1+PnVDYcKLF|nFS7u`&*wXXr38Bg`Ed{=G8FYc~* zTZJG>k&{4950uWavYd})kRe_tQk}&3nA*BcTOT!`ff~-86y2cli?Cki7HBG`O6#t3 zP)d0Up60>One5g^NIMncD)H$~Ttll37={9-gYUl1FYKkR8YEC1$PVqcL-mb@t!|Y6_dX9HF z!M@rTeBT*G=z=j2^EvaZE3HiXEN9q3#|_6=-0XuI)Cul`t1B4FtGRtb(M)h$S7}Gb ziiS(ygoDPZ_0$3e$2lUFF~xMKVufM_2Fh1aTA+_|#RA#@6mt0%5e?-w0NPkLU}Cw<`fslS_HKzg(aoLDw z0z$i9zUjeW&}4?`K}^q2Kw79^VC5R^lo#y-SMDX@3ymNT zs9p2bY@dx|+=1u}&~#`1RDg=;3pJGw3Wd^Sp zz(%~Z0xvJFF}m&tgJwR&3@l^`@x_wWOmW9TJq}z8F0GslX35Wc-Ssq8SgXb3II$YX z&dbH5fD`*BeA_rBFmqvI$#@A9WvE19jja_?}QgnLYc75RJ|7NEefApLd=8$WUjQTVu3!p44@nY&`3<))f;=Q<&_L5+=VIRI$`ac zvpd%yzOUH_HlY#Qateh)VPTWNFMER<4)aMoO)d=cG^z@EuAGZrsYXmWv%B7CAyUu33+l)xb6?8w5u|qq(}{hGKZ7Oq5t|TRt!#m=7<@L2}{1lCfGa zl4#KzcWjjTSgr>smo`izSP|SZ9W>Jz(ZN)2gj8onuD)47S4=#$@*p_hY2(pY%Qu*^ zCP#ZZaxF9h(Z*`0-SX}!h{u5~o1T)9PAlS#+(a6X1H6L6C+-Xx!^yN%7WxTolb=be zU0m)tU9mG5kmu%*a|iq=5Imtu&^yR|6+Z`c-i=X4WeNxQIJOx%b1uHLC&9HPv@(GU zIT-TZ@o}(9!?-<~1e_b%r4#VvoLbJ9RChpOU?ZH%P>XS6lc=ZmM{it2Cr&Hx$gxT- ze+o7E6>sJB;0W27XH-I}BaxRIevj5Kd9>%t;msE1-Tw zfrO%~`4O?3vb!=rluNtmtuhLAWsw1w!+kxlrOvdC+mT~6zl`?1t`U*I#d2*6le`S< zwx~;9%~thz$_6H&)5ewds0X2=6c;78OSX||iW9nhU=TVGCY2+nETrAKzW}D%1`wgr z_=Id)>xh^QNpQa&R@{OlwQf?Fh;D<)Q4;8-_aw+iwTqCRn^V982h}#!Y>b5l5Zr7A zB5SA%5-E)wTZFT8gzC#DJ-=*q0D{dI$jN0-zMLWqmew(7P5s>5m$^8eTsIi ze9`g{qZO%V=uUK+U7&wq})`XgU)CZt3R}J5Zmu(?5ox%ox_WEEkIf|Q7H4uy$PSia&jLeC^yD77Hp@fyr5`eN z4-|Qfvp{=YraFY!G=*Jt($IGU{vaunXC)(CA=%;M3VjBN5Cp6JgDDih#*3 z;?kkEyc*0z_qNM=F&t0t3%PW_J87L8U0W9yGS!@WSJ;{K7$M71!aC*&^HtRihG$8m z4IWpcycMjb4(1`1RWz=0I40-pP(9bi7LpQ)W<_K>%%Y@*lT>?OwkNh21LAgmyoC7RAkKy@Zzj)xyJ&eSOpaV9lQY2K9aR@H9V0edAk(^usU3Z~LF#f50$7JUrpY z7=pnMfBF%IX0Xhsd1!{9@K5t_IP=t=R)+W_5u-``;e|hHj3bZo z4UGDRXc9xQPsNZJ4u90P@}MShiuiPNBu;0(fe|S3uw);JA&G}Wex75#?{I5+wOXMW z3V|~eok0jRL(>MT!!W)iaSTNY%>S41{O6cY!r%W0cuMyj=i)~mtK;uKeE;@WOh1Zz YFm1Yb`L1WKpU8(q>M0zqc56@n1rtKA>i_@% delta 3281 zcmai%%ggiFeaF$m3`3tzh89v%I!SnJOSWZ8*8SqEY|EBq`F^)-S+;CRzQ~q*g=wI5 zn?xb75w|{s*UVZZJzxQr`_3rz( zzaM`GU~XU2J_5Lp|MpLA-kT3@G3~wEr`k6G_PG@RKD_;x1_AV|4<40r5M?cYE(m8z8=fp#c6R$=LfzEPkTU+|_ z;2DP%kHofQ47|Y0Y203;h$t4ZRI5;Q(>-N~ND)>=$55=Ez$40PR#+O793g5krM`!G z*yV9f6}7V4>qQStC8F_%Zt)dus>Xe+&{_?OHTS5WMy6pDg}_!hdRtGa znuj#%ZNsNXb*XVjGwih&w1SK}HmP#M+PaWwWIzVg&tWg0Q)CC(R`Yo2m_ngzr&I|x zt5SEuIi|xM+;dv4q17Cw^^;`|N>=N3HCrrp$)=ENlnyxH?8`J4^wl9ziV*2TQIJ+TRun=ug7nP*2G2RAdRnKmf~=UD5>QkY~~Gv-)LPZ}vxCstBY7^v;?CKfG=NLyLt{jR%|E|Cs7pj6>p z)2ajZ)Y#8LJAE8JnrD(3AZth)>U^Zw8i=ck%cEN@ac1m@4K^bT4B3>ctm(bV$eYsQ znxkPvPInXDDLZu`q&;<}G}W@}em%C_uX6=ux-RZEuEF&uc@o6C^~_+;_LdpTznzqb z?F8*@O%xUV-IdXH+~YoVIaBIIVPgYw?`sHy&a|_0_Zc1YTbFP=Hzf9S8sA5?4jGGS zbl}<)*zr*BUe40OHAREVloysX%VlF1B`|4)1^(pj#|=4y)Uu*J<>`ECHFHQ0MvcTC z#;ROtSikiBV|bs$i zxT0fU@4CVU>e-XgJ`m|Hiif2YcOt6aS?qCiTT&8Tc-@#R=i#1uI4_U8rjV?(`QgmJ z6j4GFohnT_L`#@4xz1tT*V6$a7*ZNe$sdA7!$=@EG*?Vmb;;zy-`ogd289+vT{%oc zQi&u2^i}0%!5JjrStZpA<>iSW#1}IGcNL}S8&>dy9|w6bu$;muY886e;<9kVbSgAG zBUVT~XV+G&A4*@=gIc5nTES9|-?Ai<%gp>($K=^kAlxXHaHr^B1gDMa*bM8fZS!QI ztyCS$1iaXln$Ok1<$^bnc|KCBP?b&SBFD43JmZYzQPKN?-drKm>KtDYN=RZUd>+yY z4C24QiL56fi$iJBsIW-((J`Lv096c>e6P*+qnXg9$qw2HQOjCAdQq!P= z1Oy?A2B=Vp>$>LDsY$|(@_4EdqDPZXf9Z7y5N1hG*`0@_?q!;2HU+Su$Ak_TF}>&r zEY+%M9?9>POD8xa?Iw#1sxi`qJswLK86uHs&E^BTv)ZkZT^(uE!|^%44RG2#1CA1l z6wl-PU9TV^W%P<1MyD)2W4kOe3ZzS^onwUu#Y47{^Q{L~*PRsbHh*FSbaT%8i-&e# zUrr5+DFYg1B$X_nd|e%3M<;q7v<|sFJZAF4G&Nb8<<)=&gDBlx1Osv?Hz?)3tdD`< z6`bt|$!Nt_ua)Iy>M9btEF>0nd$=g2Ei{Q_sv0oDxJO=j>VFz3Jw@uVtbIqWYI z!O6qYa%h?PA7Rl<0>M~bmR2vkK*YB>1* zc!GQWys-)ronw5y%Qc$iYa|nH>I=)s9%J~f?4>GZ88I$gs$ZN7nVGE>xq~jI6a1nz zUs~IEhP3AB3L1^ph7`*ityUk;3#*ZqU#oqLQqwtY+t|=GJ6zTRLVdo8dPGWDz~z-H zbyxLOON_YcXhJu|)3n-dlEgK!JPm0Co)h)qG%PM9hgoOJwtd`AH|LS<>BPBO-!=*w zakQJ1=yQm}DwRsggK9R%F2;RL@z?<(h=z=}TIBH3UkKv}=a6K#6t5cfIuxv{+ih;X zmSF#EI~9A5zT9JcHQ#fr`?J!x`tuHoG1XN<+g8CCJ)C*d1W{~Hx?(yrwhJLkY}OOy zMz|<}Lu}dfSh;0WR4`bBm+WABv_c6X9rD$qGK`W<$_%wD!=Wo4Ht@F9oG@uVmV%GR z&azu(vwxYfT=OfRb)R|j&ErSU-(rFH?p_jN0rkqzPx#yM34_OR>IIM?Smvcb?tYbDHs)wD z{&FqHuq^(9hjYyRv3il@Sp4l5;rDiO&vC@_YYzBkK28|My{FAt6cLZxbK5pJoL$7p zIZN6UZDmsbU&Zscd7#7m_Dk}JwLS0rM6hoi_aoL>f5&