diff --git a/.env.example b/.env.example index c9abdff..8d9964b 100644 --- a/.env.example +++ b/.env.example @@ -1,2 +1,11 @@ HRFLOW_API_KEY="___FILL_ME___" -HRFLOW_USER_EMAIL="___FILL_ME___" \ No newline at end of file +HRFLOW_API_KEY_READ="___FILL_ME___" +HRFLOW_USER_EMAIL="___FILL_ME___" +HRFLOW_ALGORITHM_KEY="___FILL_ME___" +HRFLOW_BOARD_KEY="___FILL_ME___" +HRFLOW_JOB_KEY="___JOB_KEY_IN_BOARD___" +HRFLOW_PROFILE_KEY="___PROFILE_KEY_IN_SOURCE_QUICKSILVER_SYNC___" +HRFLOW_SOURCE_KEY_HAWK_SYNC="___FILL_ME___" +HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC="___FILL_ME___" +HRFLOW_SOURCE_KEY_QUICKSILVER_ASYNC="___FILL_ME___" +HRFLOW_SOURCE_KEY_MOZART_ASYNC="___FILL_ME___" \ No newline at end of file diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..5dfadc1 --- /dev/null +++ b/.flake8 @@ -0,0 +1,6 @@ +[flake8] +max-line-length = 88 +exclude = .pytest_cache, __pycache__, .env, .venv +black-config = pyproject.toml +per-file-ignores = __init__.py:F401 +ignore = E731, W503, E203 \ No newline at end of file diff --git a/.gitignore b/.gitignore index af36b32..9280e48 100755 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ credentials credentials_seg test/* test_assets/* +tests/assets .htpasswd docker/dependencies/libs/* diff --git a/Makefile b/Makefile index 6f63b13..bfdf052 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,10 @@ ARGS := clean: rm -rf build dist *.egg-info +clean_cache: + find . -type d \( -name '__pycache__' -o -name '.pytest_cache' \) -exec rm -rf {} + + rm -rf tests/assets + build: poetry build @@ -14,4 +18,13 @@ deploy-test: poetry publish -r test-pypi --build deploy: - poetry publish --build \ No newline at end of file + poetry publish --build + +flake8: + poetry run flake8 --config=./.flake8 + +style: + poetry run isort . && poetry run black --config=./pyproject.toml . + +check: + bash ./check.sh \ No newline at end of file diff --git a/README.md b/README.md index 5c5168f..e08ed9e 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Install using `pip install -U hrflow` or `conda install hrflow -c conda-forge`. ```py from hrflow import Hrflow -client = Hrflow(api_secret="YOUR_API_KEY"; api_user="YOU_USER_EMAIL") +client = Hrflow(api_secret="YOUR_API_KEY", api_user="YOU_USER_EMAIL") # read file from directory (in binary mode) with open("path_to_file.pdf", "rb") as f: diff --git a/check.sh b/check.sh new file mode 100755 index 0000000..676b7b7 --- /dev/null +++ b/check.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +PYTEST_RUN="poetry run pytest" +PYTEST_OPTIONS=(--verbose --tb=long --strict-markers --durations=0 --datefmt "%Y-%m-%d %H:%M:%S.%f%z") +PYTEST_DIR=tests/ + +if [ "$#" -gt 0 ]; then + for marker in "$@"; do + $PYTEST_RUN "${PYTEST_OPTIONS[@]}" "$PYTEST_DIR" -m "$marker" + done +else + $PYTEST_RUN "${PYTEST_OPTIONS[@]}" "$PYTEST_DIR" +fi diff --git a/hrflow/__init__.py b/hrflow/__init__.py index 71edadc..82e9fc4 100755 --- a/hrflow/__init__.py +++ b/hrflow/__init__.py @@ -1,4 +1,10 @@ +from .__version__ import ( + __author__, + __author_email__, + __description__, + __license__, + __title__, + __url__, + __version__, +) from .hrflow.hrflow import Hrflow - -from .__version__ import __title__, __description__, __url__, __version__ -from .__version__ import __author__, __author_email__, __license__ diff --git a/hrflow/__version__.py b/hrflow/__version__.py index 708b907..08f50a7 100644 --- a/hrflow/__version__.py +++ b/hrflow/__version__.py @@ -6,4 +6,4 @@ __version__ = importlib.metadata.version("hrflow") __author__ = "HrFlow.ai" __author_email__ = "contact@hrflow.ai" -__license__ = "MIT" \ No newline at end of file +__license__ = "MIT" diff --git a/hrflow/hrflow/auth/__init__.py b/hrflow/hrflow/auth/__init__.py new file mode 100644 index 0000000..03e3d1a --- /dev/null +++ b/hrflow/hrflow/auth/__init__.py @@ -0,0 +1,35 @@ +import typing as t + +from ..utils import validate_key, validate_response + +API_SECRET_REGEX = r"^ask[rw]?_[0-9a-f]{32}$" + + +class Auth: + def __init__(self, api): + self.client = api + + def get(self) -> t.Dict[str, t.Any]: + """ + Try your API Keys. This endpoint allows you to learn how to add the right + information to your API calls, so you can make them. + + Args: + api_user: + Your HrFlow.ai account's email. + api_secret: + Your API Key. + + Returns: + `/auth` response + """ + + validate_key( + "api_secret", + self.client.auth_header.get("X-API-KEY"), + regex=API_SECRET_REGEX, + ) + + response = self.client.get("auth") + + return validate_response(response) diff --git a/hrflow/hrflow/board/__init__.py b/hrflow/hrflow/board/__init__.py index cdc7e8a..089f9ec 100644 --- a/hrflow/hrflow/board/__init__.py +++ b/hrflow/hrflow/board/__init__.py @@ -1,31 +1,35 @@ -from ..utils import validate_key, validate_page, validate_limit, validate_value, validate_response - -from ..utils import ORDER_BY_VALUES +from ..utils import ( + ORDER_BY_VALUES, + validate_key, + validate_limit, + validate_page, + validate_response, + validate_value, +) class Board(object): - def __init__(self, client): self.client = client - def list(self, name=None, page=1, limit=30, sort_by='date', order_by='desc'): + def list(self, name=None, page=1, limit=30, sort_by="date", order_by="desc"): """ - Search boards for given filters. - - Args: - name: - name - page: - page - limit: - limit - sort_by: - sort_by - order_by: - order_by - - Returns - Result of source's search + Search boards for given filters. + + Args: + name: + name + page: + page + limit: + limit + sort_by: + sort_by + order_by: + order_by + + Returns + Result of source's search """ query_params = {} @@ -40,15 +44,15 @@ def list(self, name=None, page=1, limit=30, sort_by='date', order_by='desc'): def get(self, key=None): """ - Get source given a board key. + Get source given a board key. - Args: - key: - board_key - Returns - Board if exists + Args: + key: + board_key + Returns + Board if exists """ query_params = {"key": validate_key("Board", key)} - response = self.client.get('board', query_params) + response = self.client.get("board", query_params) return validate_response(response) diff --git a/hrflow/hrflow/hrflow.py b/hrflow/hrflow/hrflow.py index 73067d4..723a3b9 100755 --- a/hrflow/hrflow/hrflow.py +++ b/hrflow/hrflow/hrflow.py @@ -1,15 +1,16 @@ -import requests as req import json +import requests as req + +from .auth import Auth from .board import Board from .job import Job -from .text import Text from .profile import Profile -from .webhook import Webhook +from .rating import Rating from .source import Source +from .text import Text from .tracking import Tracking -from .rating import Rating - +from .webhook import Webhook CLIENT_API_URL = "https://api.hrflow.ai/v1/" @@ -32,10 +33,12 @@ def __init__( The API URL. Defaults to https://api.hrflow.ai/v1/ api_secret: - The API secret key. You can find it in your Hrflow.ai account. + The API secret key. You can find it in your + Hrflow.ai account. api_user: - The API user email. You can find it in your Hrflow.ai account. + The API user email. You can find it in your + Hrflow.ai account. webhook_secret: @@ -45,6 +48,7 @@ def __init__( self.api_url = api_url self.auth_header = {"X-API-KEY": api_secret, "X-USER-EMAIL": api_user} self.webhook_secret = webhook_secret + self.auth = Auth(self) self.job = Job(self) self.profile = Profile(self) self.text = Text(self) @@ -81,10 +85,12 @@ def get(self, resource_endpoint, query_params={}): The resource endpoint. For example: "job/indexing" query_params: - The query parameters to be sent to the API. It must be a dictionary. + The query parameters to be sent to the API. It + must be a dictionary. Returns - Make the corresponding GET request to the Hrflow API and returns the response object. + Make the corresponding GET request to the Hrflow API and returns the + response object. """ url = self._create_request_url(resource_endpoint) if query_params: @@ -104,16 +110,21 @@ def post(self, resource_endpoint, data={}, json={}, files=None): The resource endpoint. For example: "job/indexing" data: - The data payload (for multipart/formdata) to be sent to the API. It must be a dictionary. + The data payload (for multipart/formdata) to be + sent to the API. It must be a dictionary. json: - The json payload to be sent to the API. It must be a dictionary. + The json payload to be sent to the API. It must + be a dictionary. files: - The files payload to be sent to the API. It must be a dictionary. (ie. {"file": open("file.pdf", "rb")} + The files payload to be sent to the API. It must + be a dictionary. (ie. {"file": open("file.pdf", + "rb")} Returns: - Makes the corresponding POST request to the Hrflow API and returns the response object. + Makes the corresponding POST request to the Hrflow API and returns the + response object. """ url = self._create_request_url(resource_endpoint) if files: @@ -133,11 +144,12 @@ def patch(self, resource_endpoint, json={}): The resource endpoint. For example: "job/indexing" json: - The json payload to be sent to the API. It must be a dictionary. + The json payload to be sent to the API. It must + be a dictionary. Returns: - Makes the corresponding PATCH request to the Hrflow API and returns the response object. - + Makes the corresponding PATCH request to the Hrflow API and returns the + response object. """ url = self._create_request_url(resource_endpoint) data = self._validate_args(json) @@ -154,11 +166,12 @@ def put(self, resource_endpoint, json={}): The resource endpoint. For example: "job/indexing" json: - The json payload to be sent to the API. It must be a dictionary. + The json payload to be sent to the API. It must + be a dictionary. Returns: - Makes the corresponding PUT request to the Hrflow API and returns the response object. - + Makes the corresponding PUT request to the Hrflow API and returns the + response object. """ url = self._create_request_url(resource_endpoint) return req.put(url, headers=self.auth_header, json=json) diff --git a/hrflow/hrflow/job/__init__.py b/hrflow/hrflow/job/__init__.py index 8dd09cd..b2f80d3 100644 --- a/hrflow/hrflow/job/__init__.py +++ b/hrflow/hrflow/job/__init__.py @@ -1,14 +1,16 @@ -from .parsing import JobParsing +from .asking import JobAsking from .embedding import JobEmbedding -from .searching import JobSearching -from .scoring import JobScoring +from .parsing import JobParsing from .reasoning import JobReasoning +from .scoring import JobScoring +from .searching import JobSearching from .storing import JobStoring class Job: def __init__(self, client): self.client = client + self.asking = JobAsking(self.client) self.parsing = JobParsing(self.client) self.embedding = JobEmbedding(self.client) self.searching = JobSearching(self.client) diff --git a/hrflow/hrflow/job/asking.py b/hrflow/hrflow/job/asking.py new file mode 100644 index 0000000..583397c --- /dev/null +++ b/hrflow/hrflow/job/asking.py @@ -0,0 +1,44 @@ +import typing as t + +from ..utils import KEY_REGEX, validate_key, validate_reference, validate_response + + +class JobAsking: + def __init__(self, api): + self.client = api + + def get( + self, + board_key: str, + questions: t.List[str], + reference: t.Optional[str] = None, + key: t.Optional[str] = None, + ) -> t.Dict[str, t.Any]: + """ + Ask a question to a Job indexed in a Board. This endpoint allows asking a + question based on a Job object. + + Args: + board_key: + The key of the Board associated to the job + questions: + Questions based on the queried job + reference: + The Job reference chosen by the customer + key: + The Job unique identifier + + Returns: + `/job/asking` response + """ + + params = dict( + board_key=validate_key("Board", board_key, regex=KEY_REGEX), + reference=validate_reference(reference), + key=validate_key("Job", key, regex=KEY_REGEX), + questions=questions, + ) + + response = self.client.get("job/asking", query_params=params) + + return validate_response(response) diff --git a/hrflow/hrflow/job/reasoning.py b/hrflow/hrflow/job/reasoning.py index 33ddac5..14933d2 100644 --- a/hrflow/hrflow/job/reasoning.py +++ b/hrflow/hrflow/job/reasoning.py @@ -1,4 +1,4 @@ -class JobReasoning(): +class JobReasoning: """Manage embedding related profile calls.""" def __init__(self, api): diff --git a/hrflow/hrflow/job/scoring.py b/hrflow/hrflow/job/scoring.py index 264b090..dcdb2a0 100644 --- a/hrflow/hrflow/job/scoring.py +++ b/hrflow/hrflow/job/scoring.py @@ -1,18 +1,39 @@ import json -from ..utils import validate_key, validate_provider_keys, validate_limit, validate_page, validate_value, validate_response -from ..utils import ORDER_BY_VALUES, SORT_BY_VALUES, STAGE_VALUES +from ..utils import ( + ORDER_BY_VALUES, + SORT_BY_VALUES, + STAGE_VALUES, + validate_key, + validate_limit, + validate_page, + validate_provider_keys, + validate_response, + validate_value, +) -class JobScoring(): +class JobScoring: """Manage job related profile calls.""" def __init__(self, api): """Init.""" self.client = api - def list(self, board_keys=None, source_key=None, profile_key=None, use_agent=None, agent_key=None, stage=None, - page=1, limit=30, sort_by='created_at', order_by=None, **kwargs): + def list( + self, + board_keys=None, + source_key=None, + profile_key=None, + use_agent=None, + agent_key=None, + stage=None, + page=1, + limit=30, + sort_by="created_at", + order_by=None, + **kwargs, + ): """ Retrieve the scoring information. @@ -41,18 +62,19 @@ def list(self, board_keys=None, source_key=None, profile_key=None, use_agent=Non """ - query_params = {'board_keys': json.dumps(validate_provider_keys(board_keys)), - 'source_key': validate_key('Source', source_key), - 'profile_key': validate_key('Profile', profile_key), - 'use_agent': use_agent, - 'agent_key': validate_key('Agent', agent_key), - 'stage': validate_value(stage, STAGE_VALUES, "stage"), - 'limit': validate_limit(limit), - 'page': validate_page(page), - 'sort_by': validate_value(sort_by, SORT_BY_VALUES, "sort by"), - 'order_by': validate_value(order_by, ORDER_BY_VALUES, "order by") - } + query_params = { + "board_keys": json.dumps(validate_provider_keys(board_keys)), + "source_key": validate_key("Source", source_key), + "profile_key": validate_key("Profile", profile_key), + "use_agent": use_agent, + "agent_key": validate_key("Agent", agent_key), + "stage": validate_value(stage, STAGE_VALUES, "stage"), + "limit": validate_limit(limit), + "page": validate_page(page), + "sort_by": validate_value(sort_by, SORT_BY_VALUES, "sort by"), + "order_by": validate_value(order_by, ORDER_BY_VALUES, "order by"), + } params = {**query_params, **kwargs} - response = self.client.get('jobs/scoring', params) + response = self.client.get("jobs/scoring", params) return validate_response(response) diff --git a/hrflow/hrflow/job/searching.py b/hrflow/hrflow/job/searching.py index c80279b..3f56df1 100644 --- a/hrflow/hrflow/job/searching.py +++ b/hrflow/hrflow/job/searching.py @@ -1,15 +1,16 @@ import json from ..utils import ( - validate_provider_keys, + ORDER_BY_VALUES, + SORT_BY_VALUES, + STAGE_VALUES, validate_limit, validate_page, - validate_value, + validate_provider_keys, validate_response, + validate_value, ) -from ..utils import ORDER_BY_VALUES, SORT_BY_VALUES, STAGE_VALUES - class JobSearching: """Manage stage related profile calls.""" @@ -26,7 +27,7 @@ def list( limit=30, sort_by="created_at", order_by=None, - **kwargs + **kwargs, ): """ Retrieve the scoring information. diff --git a/hrflow/hrflow/job/storing.py b/hrflow/hrflow/job/storing.py index 480f9f5..272ae62 100644 --- a/hrflow/hrflow/job/storing.py +++ b/hrflow/hrflow/job/storing.py @@ -30,14 +30,16 @@ def __init__(self, api): def add_json(self, board_key, job_json): """This endpoint allows you to Index a Job object. - Note: If your Job is an unstructured text, make sure to parse it first before indexing it. + Note: If your Job is an unstructured text, make sure to parse it first before + indexing it. See how in 🧠 Parse a raw Text at: https://developers.hrflow.ai/ . Parameters ---------- board_key : string [required] Identification key of the Board attached to the Job. job_json : dict [required] - A dictionary representing the HrFlow.ai Job object. The dictionary should have the following fields: + A dictionary representing the HrFlow.ai Job object. The dictionary should + have the following fields: - key (str): Identification key of the Job. - reference (str): Custom identifier of the Job. @@ -53,20 +55,25 @@ def add_json(self, board_key, job_json): - description (str): Section description. - url (str): Job post original URL. - summary (str): Brief summary of the Job. - - created_at (str): Creation date of the Job in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ). + - created_at (str): Creation date of the Job in ISO 8601 format + (YYYY-MM-DDTHH:MM:SSZ). - skills (list[dict]): List of skills required for the Job. Each skill is represented by a dictionary with the following fields: - name (str): Skill name. - type (str): Skill type: `hard` or `soft`. - - value (any): Skill value. The value attached to the Skill. Example: 90/100 + - value (any): Skill value. The value attached to the Skill. + Example: 90/100 - languages (list[dict]): List of languages required for the Job. Each language is represented by a dictionary with the following fields: - name (str): Language name. - - value (any): Language value. The value attached to the Language. Example: fluent. + - value (any): Language value. The value attached to the Language. + Example: fluent. - cetifications (list[dict]): List of certifications required for the Job. - Each certification is represented by a dictionary with the following fields: + Each certification is represented by a dictionary with the following + fields: - name (str): Certification name. - - value (any): Certification value. The value attached to the Certification. Example: 4.5/5. + - value (any): Certification value. The value attached to the + Certification. Example: 4.5/5. - courses (list[dict]): List of courses required for the Job. Each course is represented by a dictionary with the following fields: - name (str): Course name. @@ -75,14 +82,16 @@ def add_json(self, board_key, job_json): Each task is represented by a dictionary with the following fields: - name (str): Task name. - value (any): Task value. The value attached to the Task. - - tags (list[dict]): List of tags added to the Job. Tags are a way we can extend the Job object with custom information. + - tags (list[dict]): List of tags added to the Job. Tags are a way we can + extend the Job object with custom information. Each tag is represented by a dictionary with the following fields: - name (str): The name of the Tag. Example: `is_active`. - value (any): The value of the Tag. Example: `True`. - metadata (list[dict]): Custom metadata added to the Job. Each metadata is represented by a dictionary with the following fields: - name (str): The name of the metadata. Example: interview-note - - value (any): The value of the metadata. Example: `The candidate was very good ...`. + - value (any): The value of the metadata. Example: `The candidate was + very good ...`. - ranges_float (list[dict]): List of float ranges added to the Job. Each range is represented by a dictionary with the following fields: - name (str): The name of the range. Example: salary. @@ -92,11 +101,14 @@ def add_json(self, board_key, job_json): - ranges_date (list[dict]): List of date ranges added to the Job. Each range is represented by a dictionary with the following fields: - name (str): The name of the range. Example: availability. - - value_min (str): The minimum value of the range in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ). Example: 2020-01-01. - - value_max (str): The maximum value of the range in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ). Example: 2020-03-01. + - value_min (str): The minimum value of the range in ISO 8601 format + (YYYY-MM-DDTHH:MM:SSZ). Example: 2020-01-01. + - value_max (str): The maximum value of the range in ISO 8601 format + (YYYY-MM-DDTHH:MM:SSZ). Example: 2020-03-01. - culture (str): The company culture description in the Job. - benefits (str): The job opening benefits description in the Job. - - responsibilities (str): The job opening responsibilities description in the Job. + - responsibilities (str): The job opening responsibilities description in + the Job. - requirements (str): The job opening requirements description in the Job. - interviews (str): The job opening interviews. Returns @@ -115,18 +127,21 @@ def edit(self, board_key, job_json, key=None): It requires : - source_key : The key of the source where the job is stored - job_json : The job data to update - The job object must meet the criteria of the HrFlow.ai job Object + The job object must meet the criteria of the HrFlow.ai + job Object Otherwise the Put request will return an error. - A key or a reference must be provided in the job object `job_json`, to identify the job to update. - The method will update the object already stored by the fields provided in the job_json. + A key or a reference must be provided in the job object + `job_json`, to identify the job to update. + The method will update the object already stored by the fields provided in + the job_json. """ if job_json is None: job_json = {} job_json["board_key"] = validate_key("Board", board_key) - # The argument key is kept for backward compatibility with previous versions of the SDK - # It should be removed in the future after a Major release + # The argument key is kept for backward compatibility with previous versions + # of the SDK. It should be removed in the future after a Major release. if key: job_json["key"] = validate_key("Job", key) @@ -202,7 +217,8 @@ def list( Args: board_keys: - The list of the keys of the Boards containing the targeted Jobs. Example : ["xxx", "yyy", "zzz"] + The list of the keys of the Boards containing the + targeted Jobs. Example : ["xxx", "yyy", "zzz"] name: The name of the targeted Jobs. key: @@ -214,23 +230,31 @@ def list( location_lon: The longitude of the targeted Jobs. location_dist: - The distance of the targeted Jobs. (Set a radius around the Jobs'' location address (in Km).) + The distance of the targeted Jobs. (Set a radius + around the Jobs'' location address (in Km).) return_job: - If set to true, the full JSON of each job in the array response will be returned, otherwise only the dates, the reference and the keys. + If set to true, the full JSON of each job in the + array response will be returned, otherwise only the + dates, the reference and the keys. page: The page number of the targeted Jobs. limit: The number of Jobs to return per page. order_by: - The order of the Jobs to return. Possible values are "asc" and "desc". + The order of the Jobs to return. Possible values are + "asc" and "desc". sort_by: - The field on which the Jobs will be sorted. Possible values are "created_at" or "updated_at". + The field on which the Jobs will be sorted. Possible + values are "created_at" or "updated_at". created_at_min: - The minimum date of creation of the targeted Jobs. Format : "YYYY-MM-DD". + The minimum date of creation of the targeted Jobs. + Format : "YYYY-MM-DD". created_at_max: - The maximum date of creation of the targeted Jobs. Format : "YYYY-MM-DD". + The maximum date of creation of the targeted Jobs. + Format : "YYYY-MM-DD". Returns: - Applies the params to filter on Jobs in the targeted Boards and returns the response from the endpoint. + Applies the params to filter on Jobs in the targeted Boards and returns + the response from the endpoint. Response examples : - Success response : { diff --git a/hrflow/hrflow/profile/__init__.py b/hrflow/hrflow/profile/__init__.py index 4d0ba89..44a8dec 100644 --- a/hrflow/hrflow/profile/__init__.py +++ b/hrflow/hrflow/profile/__init__.py @@ -1,12 +1,15 @@ """Profile related calls.""" + +from .asking import ProfileAsking from .attachment import ProfileAttachments +from .embedding import ProfileEmbedding from .parsing import ProfileParsing -from .storing import ProfileStoring +from .reasoning import ProfileReasoning from .revealing import ProfileRevealing -from .embedding import ProfileEmbedding -from .searching import ProfileSearching from .scoring import ProfileScoring -from .reasoning import ProfileReasoning +from .searching import ProfileSearching +from .storing import ProfileStoring +from .unfolding import ProfileUnfolding class Profile(object): @@ -22,6 +25,7 @@ def __init__(self, client): """ self.client = client + self.asking = ProfileAsking(self.client) self.attachment = ProfileAttachments(self.client) self.parsing = ProfileParsing(self.client) self.storing = ProfileStoring(self.client) @@ -30,3 +34,4 @@ def __init__(self, client): self.scoring = ProfileScoring(self.client) self.searching = ProfileSearching(self.client) self.reasoning = ProfileReasoning(self.client) + self.unfolding = ProfileUnfolding(self.client) diff --git a/hrflow/hrflow/profile/asking.py b/hrflow/hrflow/profile/asking.py new file mode 100644 index 0000000..3d59966 --- /dev/null +++ b/hrflow/hrflow/profile/asking.py @@ -0,0 +1,44 @@ +import typing as t + +from ..utils import KEY_REGEX, validate_key, validate_reference, validate_response + + +class ProfileAsking: + def __init__(self, api): + self.client = api + + def get( + self, + source_key: str, + questions: t.List[str], + reference: t.Optional[str] = None, + key: t.Optional[str] = None, + ) -> t.Dict[str, t.Any]: + """ + Ask a question to a Profile indexed in a Source. This endpoint allows asking a + question based on a Profile object. + + Args: + source_key: + The key of the Source associated to the profile. + questions: + Question based on the queried profile. + reference: str + The Profile reference chosen by the customer. + key: str + The Profile unique identifier + + Returns: + `/profile/asking` response + """ + + params = dict( + source_key=validate_key("Source", source_key, regex=KEY_REGEX), + reference=validate_reference(reference), + key=validate_key("Profile", key, regex=KEY_REGEX), + questions=questions, + ) + + response = self.client.get("profile/asking", query_params=params) + + return validate_response(response) diff --git a/hrflow/hrflow/profile/attachment.py b/hrflow/hrflow/profile/attachment.py index 623201b..ce34009 100644 --- a/hrflow/hrflow/profile/attachment.py +++ b/hrflow/hrflow/profile/attachment.py @@ -1,7 +1,7 @@ from ..utils import format_item_payload, validate_response -class ProfileAttachments(): +class ProfileAttachments: """Manage documents related profile calls.""" def __init__(self, api): @@ -27,5 +27,5 @@ def list(self, source_key, key=None, reference=None, email=None): """ query_params = format_item_payload("profile", source_key, key, reference, email) - response = self.client.get('profile/indexing/attachments', query_params) + response = self.client.get("profile/indexing/attachments", query_params) return validate_response(response) diff --git a/hrflow/hrflow/profile/embedding.py b/hrflow/hrflow/profile/embedding.py index f724430..b658a39 100644 --- a/hrflow/hrflow/profile/embedding.py +++ b/hrflow/hrflow/profile/embedding.py @@ -3,7 +3,7 @@ from ..utils import format_item_payload, validate_response -class ProfileEmbedding(): +class ProfileEmbedding: """Manage embedding related profile calls.""" def __init__(self, api): @@ -33,5 +33,5 @@ def get(self, source_key, key=None, reference=None, email=None, fields={}): query_params = format_item_payload("profile", source_key, key, reference, email) if fields: query_params["fields"] = json.dumps(fields) - response = self.client.get('profile/embedding', query_params) + response = self.client.get("profile/embedding", query_params) return validate_response(response) diff --git a/hrflow/hrflow/profile/exporter.py b/hrflow/hrflow/profile/exporter.py deleted file mode 100644 index 485d15d..0000000 --- a/hrflow/hrflow/profile/exporter.py +++ /dev/null @@ -1,27 +0,0 @@ -from libs.exporter.exporter.supervisor import Supervisor - - -class ProfileExporter(object): - """ - Class that interacts with hrflow API profiles endpoint. - """ - - def __init__(self, client): - """ - Initialize Exporter object with hrflow client. - - Args: - client: hrflow client instance - - Returns - Exporter instance object. - - """ - self.client = client - - def download(self, source_ids, target, v_level=None, n_worker=3, logfile=None): - """Use the api to add a new profile using profile_data.""" - export_supervisor = Supervisor(client=self.client, source_ids=source_ids, target=target, v_level=v_level, - n_worker=n_worker, logfile=logfile) - export_supervisor.start() - return True diff --git a/hrflow/hrflow/profile/importer.py b/hrflow/hrflow/profile/importer.py deleted file mode 100644 index 52c4d83..0000000 --- a/hrflow/hrflow/profile/importer.py +++ /dev/null @@ -1,32 +0,0 @@ -from libs.importer.importer.supervisor import Supervisor - - -class ProfileImporter(object): - """ - Class that interacts with hrflow API profiles endpoint. - - """ - - def __init__(self, client): - """ - Initialize Profile object with hrflow client. - - Args: - client: hrflow client instance - - Returns - Profile instance object. - - """ - self.client = client - - def upload(self, source_id, target, timestamp_reception=None, is_recurcive=True, silent=False, verbose=True, sleep=1, - n_worker=3, logfile=None): - """ - Use the api to add a new profile using profile_data. - """ - import_supervisor = Supervisor(client=self.client, source_id=source_id, target=target, is_recurcive=is_recurcive, - sleep=sleep, silent=silent, verbose=verbose, n_worker=n_worker, - timestamp_reception=timestamp_reception, logfile=logfile) - import_supervisor.start() - return True diff --git a/hrflow/hrflow/profile/parsing.py b/hrflow/hrflow/profile/parsing.py index 503caf9..cb6b59e 100644 --- a/hrflow/hrflow/profile/parsing.py +++ b/hrflow/hrflow/profile/parsing.py @@ -1,18 +1,37 @@ -import os import json +import os -from ..utils import format_item_payload, validate_key, validate_reference, get_files_from_dir, validate_response +from ..utils import ( + format_item_payload, + get_files_from_dir, + validate_key, + validate_reference, + validate_response, +) -class ProfileParsing(): +class ProfileParsing: """Manage parsing related profile calls.""" def __init__(self, api): """Init.""" self.client = api - def add_file(self, source_key, key=None, profile_file=None, profile_content_type=None, reference=None, created_at=None, - labels=[], tags=[], metadatas=[], sync_parsing=0, sync_parsing_indexing=1, webhook_parsing_sending=0): + def add_file( + self, + source_key, + key=None, + profile_file=None, + profile_content_type=None, + reference=None, + created_at=None, + labels=[], + tags=[], + metadatas=[], + sync_parsing=0, + sync_parsing_indexing=1, + webhook_parsing_sending=0, + ): """ Add a profile resume to a sourced key. @@ -28,7 +47,8 @@ def add_file(self, source_key, key=None, profile_file=None, profile_content_type reference: (default to None) reference to assign to the profile created_at: - original date of the application of the profile as ISO format + original date of the application of the + profile as ISO format labels: profile's label tags: @@ -48,25 +68,29 @@ def add_file(self, source_key, key=None, profile_file=None, profile_content_type """ payload = { - 'source_key': validate_key("Source", source_key), - 'key': validate_key("profile", key), - 'profile_content_type': profile_content_type, - 'reference': validate_reference(reference), - 'created_at': created_at, - 'labels': json.dumps(labels), - 'tags': json.dumps(tags), - 'metadatas': json.dumps(metadatas), - 'sync_parsing': sync_parsing, - 'sync_parsing_indexing': sync_parsing_indexing, - 'webhook_parsing_sending': webhook_parsing_sending + "source_key": validate_key("Source", source_key), + "key": validate_key("profile", key), + "profile_content_type": profile_content_type, + "reference": validate_reference(reference), + "created_at": created_at, + "labels": json.dumps(labels), + "tags": json.dumps(tags), + "metadatas": json.dumps(metadatas), + "sync_parsing": sync_parsing, + "sync_parsing_indexing": sync_parsing_indexing, + "webhook_parsing_sending": webhook_parsing_sending, } - response = self.client.post("profile/parsing/file", data=payload, files={"file": profile_file}) + response = self.client.post( + "profile/parsing/file", data=payload, files={"file": profile_file} + ) return validate_response(response) - def add_folder(self, source_key, dir_path, is_recurcive=False, created_at=None, sync_parsing=0): + def add_folder( + self, source_key, dir_path, is_recurcive=False, created_at=None, sync_parsing=0 + ): """Add all profile from a given directory.""" if not os.path.isdir(dir_path): - raise ValueError(dir_path + ' is not a directory') + raise ValueError(dir_path + " is not a directory") files_to_send = get_files_from_dir(dir_path, is_recurcive) succeed_upload = {} failed_upload = {} @@ -74,18 +98,21 @@ def add_folder(self, source_key, dir_path, is_recurcive=False, created_at=None, try: with open(file_path) as f: profile_file = f.read() - resp = self.add_file(source_key=source_key, profile_file=profile_file, created_at=created_at, - sync_parsing=sync_parsing) - if resp['code'] != 200 and resp['code'] != 201: - failed_upload[file_path] = ValueError('Invalid response: ' + str(resp)) + resp = self.add_file( + source_key=source_key, + profile_file=profile_file, + created_at=created_at, + sync_parsing=sync_parsing, + ) + if resp["code"] != 200 and resp["code"] != 201: + failed_upload[file_path] = ValueError( + "Invalid response: " + str(resp) + ) else: succeed_upload[file_path] = resp except BaseException as e: failed_upload[file_path] = e - result = { - 'success': succeed_upload, - 'fail': failed_upload - } + result = {"success": succeed_upload, "fail": failed_upload} return result def get(self, source_key=None, key=None, reference=None, email=None): @@ -107,5 +134,5 @@ def get(self, source_key=None, key=None, reference=None, email=None): """ query_params = format_item_payload("profile", source_key, key, reference, email) - response = self.client.get('profile/parsing', query_params) + response = self.client.get("profile/parsing", query_params) return validate_response(response) diff --git a/hrflow/hrflow/profile/reasoning.py b/hrflow/hrflow/profile/reasoning.py index 7a2b30a..2a15fbe 100644 --- a/hrflow/hrflow/profile/reasoning.py +++ b/hrflow/hrflow/profile/reasoning.py @@ -1,4 +1,4 @@ -class ProfileReasoning(): +class ProfileReasoning: """Manage embedding related profile calls.""" def __init__(self, api): diff --git a/hrflow/hrflow/profile/revealing.py b/hrflow/hrflow/profile/revealing.py index 151d8c2..04d206f 100644 --- a/hrflow/hrflow/profile/revealing.py +++ b/hrflow/hrflow/profile/revealing.py @@ -1,7 +1,7 @@ from ..utils import format_item_payload, validate_response -class ProfileRevealing(): +class ProfileRevealing: """Manage revealing related profile calls.""" def __init__(self, api): @@ -27,5 +27,5 @@ def get(self, source_key=None, key=None, reference=None, email=None): """ query_params = format_item_payload("profile", source_key, key, reference, email) - response = self.client.get('profile/revealing', query_params) + response = self.client.get("profile/revealing", query_params) return validate_response(response) diff --git a/hrflow/hrflow/profile/scoring.py b/hrflow/hrflow/profile/scoring.py index 215b952..60d02a8 100644 --- a/hrflow/hrflow/profile/scoring.py +++ b/hrflow/hrflow/profile/scoring.py @@ -1,19 +1,39 @@ import json -from ..utils import validate_key, validate_limit, validate_page, validate_provider_keys, validate_value, validate_response +from ..utils import ( + ORDER_BY_VALUES, + SORT_BY_VALUES, + STAGE_VALUES, + validate_key, + validate_limit, + validate_page, + validate_provider_keys, + validate_response, + validate_value, +) -from ..utils import ORDER_BY_VALUES, SORT_BY_VALUES, STAGE_VALUES - -class ProfileScoring(): +class ProfileScoring: """Manage stage related profile calls.""" def __init__(self, api): """Init.""" self.client = api - def list(self, source_keys=None, board_key=None, job_key=None, use_agent=1, agent_key=None, stage=None, page=1, - limit=30, sort_by='created_at', order_by=None, **kwargs): + def list( + self, + source_keys=None, + board_key=None, + job_key=None, + use_agent=1, + agent_key=None, + stage=None, + page=1, + limit=30, + sort_by="created_at", + order_by=None, + **kwargs, + ): """ Retrieve the scoring information. @@ -42,18 +62,19 @@ def list(self, source_keys=None, board_key=None, job_key=None, use_agent=1, agen """ - query_params = {'source_keys': json.dumps(validate_provider_keys(source_keys)), - 'board_key': validate_key('Board', board_key), - 'job_key': validate_key('Job', job_key), - 'use_agent': use_agent, - 'agent_key': validate_key('Agent', agent_key), - 'stage': validate_value(stage, STAGE_VALUES, "stage"), - 'limit': validate_limit(limit), - 'page': validate_page(page), - 'sort_by': validate_value(sort_by, SORT_BY_VALUES, "sort by"), - 'order_by': validate_value(order_by, ORDER_BY_VALUES, "order by") - } + query_params = { + "source_keys": json.dumps(validate_provider_keys(source_keys)), + "board_key": validate_key("Board", board_key), + "job_key": validate_key("Job", job_key), + "use_agent": use_agent, + "agent_key": validate_key("Agent", agent_key), + "stage": validate_value(stage, STAGE_VALUES, "stage"), + "limit": validate_limit(limit), + "page": validate_page(page), + "sort_by": validate_value(sort_by, SORT_BY_VALUES, "sort by"), + "order_by": validate_value(order_by, ORDER_BY_VALUES, "order by"), + } params = {**query_params, **kwargs} - response = self.client.get('profiles/scoring', params) + response = self.client.get("profiles/scoring", params) return validate_response(response) diff --git a/hrflow/hrflow/profile/searching.py b/hrflow/hrflow/profile/searching.py index dad73a7..5b00a19 100644 --- a/hrflow/hrflow/profile/searching.py +++ b/hrflow/hrflow/profile/searching.py @@ -1,18 +1,34 @@ import json -from ..utils import validate_provider_keys, validate_limit, validate_page, validate_value, validate_response - -from ..utils import ORDER_BY_VALUES, SORT_BY_VALUES, STAGE_VALUES - - -class ProfileSearching(): +from ..utils import ( + ORDER_BY_VALUES, + SORT_BY_VALUES, + STAGE_VALUES, + validate_limit, + validate_page, + validate_provider_keys, + validate_response, + validate_value, +) + + +class ProfileSearching: """Manage stage related profile calls.""" def __init__(self, api): """Init.""" self.client = api - def list(self, source_keys=None, stage=None, page=1, limit=30, sort_by='created_at', order_by=None, **kwargs): + def list( + self, + source_keys=None, + stage=None, + page=1, + limit=30, + sort_by="created_at", + order_by=None, + **kwargs, + ): """ Retrieve the scoring information. @@ -33,14 +49,15 @@ def list(self, source_keys=None, stage=None, page=1, limit=30, sort_by='created_ """ - query_params = {'source_keys': json.dumps(validate_provider_keys(source_keys)), - 'stage': validate_value(stage, STAGE_VALUES, "stage"), - 'limit': validate_limit(limit), - 'page': validate_page(page), - 'sort_by': validate_value(sort_by, SORT_BY_VALUES, "sort by"), - 'order_by': validate_value(order_by, ORDER_BY_VALUES, "oder by") - } + query_params = { + "source_keys": json.dumps(validate_provider_keys(source_keys)), + "stage": validate_value(stage, STAGE_VALUES, "stage"), + "limit": validate_limit(limit), + "page": validate_page(page), + "sort_by": validate_value(sort_by, SORT_BY_VALUES, "sort by"), + "order_by": validate_value(order_by, ORDER_BY_VALUES, "oder by"), + } params = {**query_params, **kwargs} - response = self.client.get('profiles/searching', params) + response = self.client.get("profiles/searching", params) return validate_response(response) diff --git a/hrflow/hrflow/profile/storing.py b/hrflow/hrflow/profile/storing.py index 1cd9923..ca9f405 100644 --- a/hrflow/hrflow/profile/storing.py +++ b/hrflow/hrflow/profile/storing.py @@ -30,11 +30,13 @@ def add_json(self, source_key, profile_json): source_key : string [required] Identification key of the Source attached to the Profile. profile_json : dict [required] - A dictionary representing the HrFlow.ai Profile object. The dictionary should have the following fields: + A dictionary representing the HrFlow.ai Profile object. The dictionary + should have the following fields: - key (str): Identification key of the Profile. - reference (str): Custom identifier of the Profile. - - text_language (str): Code language of the Profile. Example : `en` for English. + - text_language (str): Code language of the Profile. Example : `en` for + English. - text (str): Full text of the content of the Profile. - consent_algorithmic (dict) : Algorithmic consent status of the Profile. - owner (dict) : Owner of the Profile. @@ -44,7 +46,9 @@ def add_json(self, source_key, profile_json): - searching (bool) - scoring (bool) - upskilling (bool) - - created_at (str): Creation date of the Profile in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ). This could be the date of the creation of the Profile in your ATS. + - created_at (str): Creation date of the Profile in ISO 8601 format + (YYYY-MM-DDTHH:MM:SSZ). This could be the date of the creation of the + Profile in your ATS. ------------------- Profile's info ------------------- - info (dict): Object containing the Profile's info. @@ -53,7 +57,8 @@ def add_json(self, source_key, profile_json): - last_name (str): Last name of the Profile. - email (str): Email of the Profile. - phone (str): Phone number of the Profile. - - date_birth (str): Date of birth of the Profile in ISO 8601 format (YYYY-MM-DD). + - date_birth (str): Date of birth of the Profile in ISO 8601 format + (YYYY-MM-DD). - location (dict): Main location of the Profile. - text (str): Location text. - lat (float): Latitude coordinate. @@ -63,20 +68,26 @@ def add_json(self, source_key, profile_json): - picture (str): Url of the Profile's picture. - gender (str): `male`, `female` or `undefined`. - summary (str): Summary of the Profile. - ------------------- Profile's sections : skills, languages, interests ... ------------------- - - skills (list[dict]): List of skills details in the main skills sections of a resume or the Profile. + ------------------- Profile's sections : skills, languages, interests ... + ------------------- + - skills (list[dict]): List of skills details in the main skills sections + of a resume or the Profile. Each skill is represented by a dictionary with the following fields: - name (str): Skill name. - type (str): Skill type: `hard` or `soft`. - - value (any): Skill value. The value attached to the Skill. Example: 90/100 + - value (any): Skill value. The value attached to the Skill. Example: + 90/100 - languages (list[dict]): List of languages of the Profile. Each language is represented by a dictionary with the following fields: - name (str): Language name. - - value (any): Language value. The value attached to the Language. Example: fluent. + - value (any): Language value. The value attached to the Language. + Example: fluent. - cetifications (list[dict]): List of certifications of the Profile. - Each certification is represented by a dictionary with the following fields: + Each certification is represented by a dictionary with the following + fields: - name (str): Certification name. - - value (any): Certification value. The value attached to the Certification. Example: 4.5/5. + - value (any): Certification value. The value attached to the + Certification. Example: 4.5/5. - courses (list[dict]): List of courses of the Profile. Each course is represented by a dictionary with the following fields: - name (str): Course name. @@ -88,44 +99,67 @@ def add_json(self, source_key, profile_json): - interests (list[dict]): List of interests of the Profile. Each interest is represented by a dictionary with the following fields: - name (str): Interest name. Example : `music`. - - value (any): Interest value. The value attached to the Interest. Example: beginner. - ------------------- Profile's experiences and educations ------------------- - - experiences_duration (float): Total duration of the Profile's experiences in years. Example : 2.5 for 2 years and 6 months. - - educations_duration (float): Total duration of the Profile's educations in years. Example : 2.5 for 2 years and 6 months. + - value (any): Interest value. The value attached to the Interest. + Example: beginner. + ------------------- Profile's experiences and educations + ------------------- + - experiences_duration (float): Total duration of the Profile's + experiences in years. Example : 2.5 for 2 years and 6 months. + - educations_duration (float): Total duration of the Profile's educations + in years. Example : 2.5 for 2 years and 6 months. - experiences (list[dict]): List of the Profile's experiences. - Each experience is represented by a dictionary with the following fields: + Each experience is represented by a dictionary with the following + fields: - company (str): Name of the company. - title (str): Title of the experience. - description (str): Description of the experience. - location (dict): Same location object as in the Profile's info. - - date_start (str): Start date of the experience in ISO 8601 format (YYYY-MM-DD). - - date_end (str): End date of the experience in ISO 8601 format (YYYY-MM-DD). - - skills (list[str]): List of skills used in the experience. Same format as the Profile's skills. - - tasks (list[str]): List of tasks performed in the experience. Same format as the Profile's tasks. - - certifications (list[str]): List of certifications obtained in the experience. Same format as the Profile's certifications. - - courses (list[str]): List of courses followed in the experience. Same format as the Profile's courses. + - date_start (str): Start date of the experience in ISO 8601 format + (YYYY-MM-DD). + - date_end (str): End date of the experience in ISO 8601 format + (YYYY-MM-DD). + - skills (list[str]): List of skills used in the experience. Same + format as the Profile's skills. + - tasks (list[str]): List of tasks performed in the experience. Same + format as the Profile's tasks. + - certifications (list[str]): List of certifications obtained in the + experience. Same format as the Profile's certifications. + - courses (list[str]): List of courses followed in the experience. + Same format as the Profile's courses. - educations (list[dict]): List of the Profile's educations. - Each education is represented by a dictionary with the following fields: + Each education is represented by a dictionary with the following + fields: - school (str): Name of the school. - title (str): Title of the education. - description (str): Description of the education. - location (dict): Same location object as in the Profile's info. - - date_start (str): Start date of the education in ISO 8601 format (YYYY-MM-DD). - - date_end (str): End date of the education in ISO 8601 format (YYYY-MM-DD). - - skills (list[str]): List of skills used in the education. Same format as the Profile's skills. - - tasks (list[str]): List of tasks performed in the education. Same format as the Profile's tasks. - - certifications (list[str]): List of certifications obtained in the education. Same format as the Profile's certifications. - - courses (list[str]): List of courses followed in the education. Same format as the Profile's courses. - ------------------- Profile's attachments, tags and metadatas ------------------- - - attachments (list[dict]): List of the Profile's attachments. This field currently is internally handeled by HrFlow.ai. - - tags (list[str]): List of the Profile's tags. Tags are used to extend the Profile's information. For example, a tag could be `salary_expectation`. + - date_start (str): Start date of the education in ISO 8601 format + (YYYY-MM-DD). + - date_end (str): End date of the education in ISO 8601 format + (YYYY-MM-DD). + - skills (list[str]): List of skills used in the education. Same + format as the Profile's skills. + - tasks (list[str]): List of tasks performed in the education. Same + format as the Profile's tasks. + - certifications (list[str]): List of certifications obtained in the + education. Same format as the Profile's certifications. + - courses (list[str]): List of courses followed in the education. Same + format as the Profile's courses. + ------------------- Profile's attachments, tags and metadatas + ------------------- + - attachments (list[dict]): List of the Profile's attachments. This field + currently is internally handeled by HrFlow.ai. + - tags (list[str]): List of the Profile's tags. Tags are used to extend + the Profile's information. For example, a tag could be `salary_expectation`. Each tag is represented by a dictionary with the following fields: - name (str): The name of the Tag. Example: `is_active`. - value (any): The value of the Tag. Example: `True`. - - metadata (list[dict]): Custom metadata added to the Job. They are similar to tags, but used for non indexable/searchable information. + - metadata (list[dict]): Custom metadata added to the Job. They are similar + to tags, but used for non indexable/searchable information. Each metadata is represented by a dictionary with the following fields: - name (str): The name of the metadata. Example: `cover_letter`. - - value (any): The value of the metadata. Example: `I am applying for this job because...`. + - value (any): The value of the metadata. Example: `I am applying for + this job because...`. Returns ------- @@ -143,14 +177,17 @@ def edit(self, source_key, profile_json, key=None): It requires : - source_key : The key of the source where the profile is stored - profile_json : The profile data to update - The profile object must meet the criteria of the HrFlow.ai Profile Object + The profile object must meet the criteria of the HrFlow.ai + Profile Object Otherwise the Put request will return an error. - A key or a reference must be provided in the profile object `profile_json`, to identify the profile to update. - The method will update the object already stored by the fields provided in the profile_json. + A key or a reference must be provided in the profile + object `profile_json`, to identify the profile to update. + The method will update the object already stored by the fields provided in the + profile_json. """ profile_json["source_key"] = validate_key("Source", source_key) - # The argument key is kept for backward compatibility with previous versions of the SDK - # It should be removed in the future after a Major release + # The argument key is kept for backward compatibility with previous versions of + # the SDK. It should be removed in the future after a Major release if key: profile_json["key"] = validate_key("Profile", key) @@ -159,8 +196,10 @@ def edit(self, source_key, profile_json, key=None): def get(self, source_key, key=None, reference=None): """ - 💾 Get a Profile indexed in a Source (https://api.hrflow.ai/v1/profile/indexing). - Profiles can either be retrieved using this method by their key or their reference. + 💾 Get a Profile indexed in a Source + (https://api.hrflow.ai/v1/profile/indexing). + Profiles can either be retrieved using this method by their key or their + reference. One of the two values must be provided. Args: @@ -181,8 +220,8 @@ def get(self, source_key, key=None, reference=None): def archive(self, source_key, key=None, reference=None, is_archive=1, email=None): """ - This method allows to archive (is_archive=1) or unarchive (is_archive=0)a profile - in HrFlow.ai. + This method allows to archive (is_archive=1) or unarchive (is_archive=0) a + profile in HrFlow.ai. The profile is identified by either its key or its reference, at least one of the two values must be provided. @@ -231,11 +270,13 @@ def list( Args: source_keys: - The list of the keys of the Sources containing the targeted Profiles. Example : ["xxx", "yyy", "zzz"] + The list of the keys of the Sources containing the + targeted Profiles. Example : ["xxx", "yyy", "zzz"] name: The name of the targeted Profiles. key: - The key (profile's unique identifier) of the targeted Profiles. + The key (profile's unique identifier) of the targeted + Profiles. reference: The reference of the targeted Profiles. location_lat: @@ -243,23 +284,31 @@ def list( location_lon: The longitude of the targeted Profiles. location_dist: - The distance of the targeted Profiles. (Set a radius around the Profiles'' location address (in Km).) + The distance of the targeted Profiles. (Set a radius + around the Profiles'' location address (in Km).) return_profile: - If set to true, the full JSON of each profile in the array response will be returned, otherwise only the dates, the reference and the keys. + If set to true, the full JSON of each profile in the + array response will be returned, otherwise only the + dates, the reference and the keys. page: The page number of the targeted Profiles. limit: The number of Profiles to return per page. order_by: - The order of the Profiles to return. Possible values are "asc" and "desc". + The order of the Profiles to return. Possible values + are "asc" and "desc". sort_by: - The field on which the Profiles will be sorted. Possible values are "created_at" or "updated_at". + The field on which the Profiles will be sorted. + Possible values are "created_at" or "updated_at". created_at_min: - The minimum date of creation of the targeted Profiles. Format : "YYYY-MM-DD". + The minimum date of creation of the targeted Profiles. + Format : "YYYY-MM-DD". created_at_max: - The maximum date of creation of the targeted Profiles. Format : "YYYY-MM-DD". + The maximum date of creation of the targeted Profiles. + Format : "YYYY-MM-DD". Returns: - Applies the params to filter on Profiles in the targeted Sources and returns the response from the endpoint. + Applies the params to filter on Profiles in the targeted Sources and + returns the response from the endpoint. Response examples : - Success response : { diff --git a/hrflow/hrflow/profile/unfolding.py b/hrflow/hrflow/profile/unfolding.py new file mode 100644 index 0000000..7e82ea7 --- /dev/null +++ b/hrflow/hrflow/profile/unfolding.py @@ -0,0 +1,49 @@ +import typing as t + +from ..utils import KEY_REGEX, validate_key, validate_reference, validate_response + + +class ProfileUnfolding: + def __init__(self, api): + self.client = api + + def get( + self, + source_key: str, + reference: t.Optional[str] = None, + key: t.Optional[str] = None, + max_steps: int = 1, + job_text: t.Optional[str] = None, + ) -> t.Dict[str, t.Any]: + """ + Unfold the career path of a Profile. This endpoint allows predicting the + future experiences and educations of a profile. + + Args: + source_key: + The key of the Source associated to the profile. + key: + The Profile unique identifier. + reference: + The Profile reference chosen by the customer. + max_steps: + Number of predicted experiences to get into the target + job position. + job_text: + Target job description + + Returns: + `/profile/unholding` response + """ + + params = dict( + source_key=validate_key("Source", source_key, regex=KEY_REGEX), + reference=validate_reference(reference), + key=validate_key("Key", key, regex=KEY_REGEX), + max_steps=max_steps, + job_text=job_text, + ) + + response = self.client.get("profile/unfolding", query_params=params) + + return validate_response(response) diff --git a/hrflow/hrflow/rating/__init__.py b/hrflow/hrflow/rating/__init__.py index 6034523..2859658 100644 --- a/hrflow/hrflow/rating/__init__.py +++ b/hrflow/hrflow/rating/__init__.py @@ -79,20 +79,25 @@ def post( created_at=None, ): """ - This endpoint allows you to rate a Profile (resp. a Job) for Job (resp. a Profile) + This endpoint allows you to rate a Profile (resp. a Job) for Job (resp. a + Profile) as a recruiter (resp. a candidate) with a score between 0 and 1. Visit : https://developers.hrflow.ai/reference for more information. - The job_key and the job_reference cannot be null at the same time in the Request Parameters. + The job_key and the job_reference cannot be null at the same time in the + Request Parameters. The same for the profile_key and profile_reference. Args: score: The score is an evaluation fit between 0 to 1 . - If you're using stars in your system please use the following conversion: - 5 stars = 1.0 , 4 stars=0.8 , 3 stars=0.6, 2 stars=0.4, 1 star= 0.2 . + If you're using stars in your system please use + the following conversion: + 5 stars = 1.0 , 4 stars=0.8 , 3 stars=0.6, 2 + stars=0.4, 1 star= 0.2 . role: - Role of the user rating the job role in {recruiter, candidate, employee, manager}. + Role of the user rating the job role in + {recruiter, candidate, employee, manager}. board_key: The key of Board attached to the given Job. source_key: @@ -100,22 +105,30 @@ def post( job_key: job identifier (key) job_reference: - The Job reference chosen by the customer or an external system. - If you use the job_key you do not need to specify the job_reference and vice versa. + The Job reference chosen by the customer or an + external system. + If you use the job_key you do not need to specify + the job_reference and vice versa. profile_key: profile identifier (key) profile_reference: - The Profile reference chosen by the customer or an external system. - If you use the profile_key you do not need to specify the profile_reference and vice versa. + The Profile reference chosen by the customer or an + external system. + If you use the profile_key you do not need to + specify the profile_reference and vice versa. author_email: author email comment: comment created_at: ISO Date of the rating. - Format : yyyy-MM-dd'T'HH:mm:ss.SSSXXX — for example, "2000-10-31T01:30:00.000-05:00" - It associates a creation date to the profile (ie: this can be for example the original date of the application of the profile). - If not provided the creation date will be now by default. + Format : yyyy-MM-dd'T'HH:mm:ss.SSSXXX — for + example, "2000-10-31T01:30:00.000-05:00" + It associates a creation date to the profile (ie: + this can be for example the original date of the + application of the profile). + If not provided the creation date will be now by + default. """ args = locals() diff --git a/hrflow/hrflow/source/__init__.py b/hrflow/hrflow/source/__init__.py index be7a342..95b9a0c 100644 --- a/hrflow/hrflow/source/__init__.py +++ b/hrflow/hrflow/source/__init__.py @@ -1,31 +1,35 @@ -from ..utils import validate_key, validate_page, validate_limit, validate_value, validate_response - -from ..utils import ORDER_BY_VALUES +from ..utils import ( + ORDER_BY_VALUES, + validate_key, + validate_limit, + validate_page, + validate_response, + validate_value, +) class Source(object): - def __init__(self, client): self.client = client - def list(self, name=None, page=1, limit=30, sort_by='date', order_by='desc'): + def list(self, name=None, page=1, limit=30, sort_by="date", order_by="desc"): """ - Search sources for given filters. - - Args: - name: - name - page: - page - limit: - limit - sort_by: - sort_by - order_by: - order_by - - Returns - Result of source's search + Search sources for given filters. + + Args: + name: + name + page: + page + limit: + limit + sort_by: + sort_by + order_by: + order_by + + Returns + Result of source's search """ query_params = {} @@ -40,15 +44,15 @@ def list(self, name=None, page=1, limit=30, sort_by='date', order_by='desc'): def get(self, key=None): """ - Get source given a source id. + Get source given a source id. - Args: - source_key: - source_key - Returns - Source if exists + Args: + source_key: + source_key + Returns + Source if exists """ query_params = {"key": validate_key("Source", key)} - response = self.client.get('source', query_params) + response = self.client.get("source", query_params) return validate_response(response) diff --git a/hrflow/hrflow/text/__init__.py b/hrflow/hrflow/text/__init__.py index c3225bc..2e242bd 100644 --- a/hrflow/hrflow/text/__init__.py +++ b/hrflow/hrflow/text/__init__.py @@ -1,11 +1,11 @@ """Profile related calls.""" -from .parsing import TextParsing -from .linking import TextLinking -from .revealing import TextRevealing + from .embedding import TextEmbedding -from .tagging import TextTagging -from .ocr import TextOCR from .imaging import TextImaging +from .linking import TextLinking +from .ocr import TextOCR +from .parsing import TextParsing +from .tagging import TextTagging class Text(object): @@ -26,7 +26,6 @@ def __init__(self, client): self.parsing = TextParsing(self.client) self.linking = TextLinking(self.client) self.embedding = TextEmbedding(self.client) - self.revealing = TextRevealing(self.client) self.tagging = TextTagging(self.client) self.ocr = TextOCR(self.client) self.imaging = TextImaging(self.client) diff --git a/hrflow/hrflow/text/imaging.py b/hrflow/hrflow/text/imaging.py index 8957b41..c1b90ec 100644 --- a/hrflow/hrflow/text/imaging.py +++ b/hrflow/hrflow/text/imaging.py @@ -14,9 +14,12 @@ def post(self, text, width=256): Args: text: - Job text that describes the image to be generated. Ideally it should includes a "Job title". + Job text that describes the image to be generated. + Ideally it should includes a "Job title". width: - Width of the image to be generated. Default is 256. (The width and height of the image should be among the following pixel values : [256, 512, 1024 ]) + Width of the image to be generated. Default is 256. + (The width and height of the image should be among + the following pixel values : [256, 512, 1024 ]) Returns A public url to the generated image. diff --git a/hrflow/hrflow/text/ocr.py b/hrflow/hrflow/text/ocr.py index a804843..90ae40b 100644 --- a/hrflow/hrflow/text/ocr.py +++ b/hrflow/hrflow/text/ocr.py @@ -2,7 +2,7 @@ class TextOCR: - """Manage Text extraction from documents using in-house advance OCR related calls.""" + "Manage Text extraction from documents using in-house advance OCR related calls." def __init__(self, api): """Init.""" @@ -10,8 +10,10 @@ def __init__(self, api): def post(self, file): """ - This endpoint allows you to extract a the text from a document across all formats (pdf, docx, png, and more). - Supported extensions by the Profile Parsing API are .pdf, .png, .jpg, .jpeg, .bmp, .doc, .docx, .odt, .rtf, .odp, ppt, and .pptx . + This endpoint allows you to extract a the text from a document across all + formats (pdf, docx, png, and more). + Supported extensions by the Profile Parsing API are .pdf, .png, .jpg, .jpeg, + .bmp, .doc, .docx, .odt, .rtf, .odp, ppt, and .pptx. Args: file: diff --git a/hrflow/hrflow/text/parsing.py b/hrflow/hrflow/text/parsing.py index ba96afe..1fa215b 100644 --- a/hrflow/hrflow/text/parsing.py +++ b/hrflow/hrflow/text/parsing.py @@ -1,3 +1,5 @@ +import typing as t + from ..utils import validate_response @@ -8,17 +10,21 @@ def __init__(self, api): """Init.""" self.client = api - def post(self, text): + def post(self, texts: t.List[str]) -> t.Dict[str, t.Any]: """ - Extract over 50 data point from any raw input text. + Parse a raw Text. Extract over 50 data point from any raw input text. Args: - text: - text - Returns - Parsed entities from the text. + texts: + Parse a list of texts. Each text can be: the full text + of a Job, a Resume, a Profile, an experience, a Job and more. + Returns: + `/text/parsing` response """ - payload = {"text": text} + + payload = dict(texts=texts) + response = self.client.post("text/parsing", json=payload) + return validate_response(response) diff --git a/hrflow/hrflow/text/revealing.py b/hrflow/hrflow/text/revealing.py deleted file mode 100644 index bf6e170..0000000 --- a/hrflow/hrflow/text/revealing.py +++ /dev/null @@ -1,24 +0,0 @@ -from ..utils import validate_response - - -class TextRevealing: - """Manage revealing related calls.""" - - def __init__(self, api): - """Init.""" - self.client = api - - def post(self, text): - """ - Predict missing & likely hard-skills and soft-skills. - - Args: - text: - text - Returns - Revealing - - """ - payload = {"text": text} - response = self.client.post("text/revealing", json=payload) - return validate_response(response) diff --git a/hrflow/hrflow/text/tagging.py b/hrflow/hrflow/text/tagging.py index 1167565..95ed2c1 100644 --- a/hrflow/hrflow/text/tagging.py +++ b/hrflow/hrflow/text/tagging.py @@ -1,3 +1,5 @@ +import typing as t + from ..utils import validate_response @@ -8,37 +10,75 @@ def __init__(self, api): """Init.""" self.client = api - def post(self, text, algorithm_key, top_n=1, output_lang="en"): + def post( + self, + algorithm_key: str, + text: t.Optional[str] = None, + texts: t.Optional[t.List[str]] = None, + context: t.Optional[str] = None, + labels: t.Optional[t.List[str]] = None, + top_n: t.Optional[int] = 1, + output_lang: t.Optional[str] = "en", + ) -> t.Dict[str, t.Any]: """ - Predict most likely tags for a text with our library of AI algorithms. - - Args: - text: - Target text input. Example: the full text of a Job, a Resume , a Profile, an experience, a Job and more - algorithm_key: - AI tagging algorithm you want to apply to the input text. - Five taggers have been released through the Tagging API. We are actively working on bringing out more taggers. - Here is a list of all the currently available taggers: (beaware that the list is subject to change refer to developers.hrflow.ai for the latest list) - - tagger-rome-family : Grand domaines of job the French ROME - - tagger-rome-subfamily : Domaines of job the French ROME - - tagger-rome-category : Metiers of job the French ROME - - tagger-rome-jobtitle : Appellations of job the French ROME - - revealing : Skills referential defined by HrFlow.ai - - top_n: - Number of predicted tags that will be returned. Default is 1. - output_lang: - Language of the predicted tags. Default is "en" English. - Returns - Predictions tags with probabilities + Tag a Text. Predict most likely tags for a text with our library of AI + algorithms. + + Args: + algorithm_key: + AI tagging algorithm you want to apply to + the input text. Six taggers have been released + through the Tagging API. We are actively working + on bringing out more taggers. + Here is a list of all the currently available + taggers (beaware that the list is subject to + change refer to developers.hrflow.ai for the + latest list): + - tagger-rome-family: Grand domaines of job the French ROME + - tagger-rome-subfamily: Domaines of job the French ROME + - tagger-rome-category: Metiers of job the French ROME + - tagger-rome-jobtitle: Appellations of job the French ROME + - tagger-hrflow-skills: Skills referential defined by HrFlow.ai + - tagger-hrflow-labels: User defined labels, if any + + texts: + Tag a list of texts. Each text can be: the + full text of a Job, a Resume, a Profile, an + experience, a Job and more. + + context: + A context for given labels if + algorithm_key="tagger-hrflow-labels". + labels: + List of output tags if + algorithm_key="tagger-hrflow-labels". + + top_n: + Number of predicted tags that will be returned. + + output_lang: + Language of the returned tags. + + Returns: + `/text/tagging` response """ - payload = { - "text": text, - "algorithm_key": algorithm_key, - "top_n": top_n, - "output_lang": output_lang, - } + payload = dict( + algorithm_key=algorithm_key, + context=context, + labels=labels, + output_lang=output_lang, + top_n=top_n, + ) + + if texts is None and text is not None: + payload["text"] = text + elif text is None and texts is not None: + payload["texts"] = texts + elif text is None and texts is None: + raise ValueError("Either text or texts must be provided.") + else: + raise ValueError("Only one of text or texts must be provided.") response = self.client.post("text/tagging", json=payload) return validate_response(response) diff --git a/hrflow/hrflow/tracking/__init__.py b/hrflow/hrflow/tracking/__init__.py index 15fc1a8..9635ad4 100644 --- a/hrflow/hrflow/tracking/__init__.py +++ b/hrflow/hrflow/tracking/__init__.py @@ -80,22 +80,28 @@ def post( created_at=None, ): """ - This endpoint allows you to track a Profile (resp. a Job) for Job (resp. a Profile) as a recruiter + This endpoint allows you to track a Profile (resp. a Job) for Job (resp. a + Profile) as a recruiter (resp. a candidate) with a specific action Visit : https://developers.hrflow.ai/reference for more information. - Note : The job_key and the job_reference cannot be null at the same time in the Request Parameters. + Note : The job_key and the job_reference cannot be null at the same time in + the Request Parameters. The same for the profile_key and profile_reference . Args: action: - The 'action' refers to a unique identifier for a profile or job stage. - This can be a specific stage ID within a CRM, ATS, or Job site. - Examples of such stages include "view," "apply," "hire," or any other stage relevant to your system. + The 'action' refers to a unique identifier for a + profile or job stage. + This can be a specific stage ID within a CRM, + ATS, or Job site. + Examples of such stages include "view," "apply," + "hire," or any other stage relevant to your system. role: - Role of the user rating the job (role: recruiter, candidate, employee, manager). + Role of the user rating the job (role: recruiter, + candidate, employee, manager). board_key: The key of Board attached to the given Job. source_key: @@ -103,22 +109,31 @@ def post( job_key: The Job's unique identifier. job_reference: - The Job's reference chosen by the customer / external system. - If you use the job_key you do not need to specify the job_reference and vice versa. + The Job's reference chosen by the customer / + external system. + If you use the job_key you do not need to specify + the job_reference and vice versa. profile_key: The Profile's unique identifier. profile_reference: - The Profile's reference chosen by the customer / external system. - If you use the profile_key you do not need to specify the profile_reference and vice versa. + The Profile's reference chosen by the customer / + external system. + If you use the profile_key you do not need to + specify the profile_reference and vice versa. author_email: - Email of the HrFlow.ai user who rated the profile for the job. + Email of the HrFlow.ai user who rated the profile + for the job. comment: Comment explaining the reason behind the score. created_at: ISO Date of the rating. - Format : yyyy-MM-dd'T'HH:mm:ss.SSSXXX — for example, "2000-10-31T01:30:00.000-05:00" - It associates a creation date to the profile (ie: this can be for example the original date of the application of the profile). - If not provided the creation date will be now by default. + Format : yyyy-MM-dd'T'HH:mm:ss.SSSXXX — for + example, "2000-10-31T01:30:00.000-05:00" + It associates a creation date to the profile (ie: + this can be for example the original date of the + application of the profile). + If not provided the creation date will be now by + default. """ args = locals() diff --git a/hrflow/hrflow/utils/__init__.py b/hrflow/hrflow/utils/__init__.py index d82e58f..06866b8 100644 --- a/hrflow/hrflow/utils/__init__.py +++ b/hrflow/hrflow/utils/__init__.py @@ -1,6 +1,8 @@ import os -import json +import re +import typing as t +KEY_REGEX = r"^[0-9a-f]{40}$" STAGE_VALUES = [None, "new", "yes", "later", "no"] SORT_BY_VALUES = [ "created_at", @@ -50,7 +52,8 @@ def format_item_payload(item, provider_key, key, reference=None, email=None): def validate_boolean(name, value): """ - This function validates the fact that the value is a boolean. If not, it raises a TypeError. + This function validates the fact that the value is a boolean. If not, it raises a + TypeError. If the given value is a string that can be converted to a boolean, it converts it. :param name: The name of the variable to validate :param value: The value to validate @@ -66,10 +69,13 @@ def validate_boolean(name, value): return value if isinstance(value, bool) else bool(int(value)) -def validate_key(obj, value): +def validate_key(obj, value, regex=None): if not isinstance(value, str) and value is not None: raise TypeError(obj + " key must be string") + if regex and not bool(re.match(regex, value)): + raise ValueError(f"{obj} key must match {regex}") + return value diff --git a/hrflow/hrflow/webhook/__init__.py b/hrflow/hrflow/webhook/__init__.py index c7d806e..4be1c24 100644 --- a/hrflow/hrflow/webhook/__init__.py +++ b/hrflow/hrflow/webhook/__init__.py @@ -1,31 +1,31 @@ """Webhook support.""" -import hmac + import hashlib -import json +import hmac import inspect +import json import sys from . import base64Wrapper as base64W -from . import bytesutils -from . import hmacutils +from . import bytesutils, hmacutils -EVENT_PROFILE_PARSE_SUCCESS = 'profile.parse.success' -EVENT_PROFILE_PARSE_ERROR = 'profile.parse.error' -EVENT_PROFILE_SCORE_SUCCESS = 'profile.score.success' -EVENT_PROFILE_SCORE_ERROR = 'profile.score.error' -EVENT_JOB_TRAIN_SUCCESS = 'job.train.success' -EVENT_JOB_TRAIN_ERROR = 'job.train.error' -EVENT_JOB_TRAIN_START = 'job.train.start' -EVENT_JOB_SCORE_SUCCESS = 'job.score.success' -EVENT_JOB_SCORE_ERROR = 'job.score.error' -EVENT_JOB_SCORE_START = 'job.score.start' -ACTION_STAGE_SUCCESS = 'action.stage.success' -ACTION_STAGE_ERROR = 'action.stage.error' -ACTION_RATING_SUCCESS = 'action.rating.success' -ACTION_RATING_ERROR = 'action.rating.error' +EVENT_PROFILE_PARSE_SUCCESS = "profile.parse.success" +EVENT_PROFILE_PARSE_ERROR = "profile.parse.error" +EVENT_PROFILE_SCORE_SUCCESS = "profile.score.success" +EVENT_PROFILE_SCORE_ERROR = "profile.score.error" +EVENT_JOB_TRAIN_SUCCESS = "job.train.success" +EVENT_JOB_TRAIN_ERROR = "job.train.error" +EVENT_JOB_TRAIN_START = "job.train.start" +EVENT_JOB_SCORE_SUCCESS = "job.score.success" +EVENT_JOB_SCORE_ERROR = "job.score.error" +EVENT_JOB_SCORE_START = "job.score.start" +ACTION_STAGE_SUCCESS = "action.stage.success" +ACTION_STAGE_ERROR = "action.stage.error" +ACTION_RATING_SUCCESS = "action.rating.success" +ACTION_RATING_ERROR = "action.rating.error" -SIGNATURE_HEADER = 'HTTP-HRFLOW-SIGNATURE' +SIGNATURE_HEADER = "HTTP-HRFLOW-SIGNATURE" class Webhook(object): @@ -53,21 +53,21 @@ def __init__(self, client): def check(self, url, type): """ - Get response from api for POST webhook/check. + Get response from api for POST webhook/check. - Args: - url: - url id - type: - profile id + Args: + url: + url id + type: + profile id - Returns - Webhook information + Returns + Webhook information """ data = {} - data['url'] = url - data['type'] = type + data["url"] = url + data["type"] = type response = self.client.post("webhook/check", json=data) return response.json() @@ -79,25 +79,25 @@ def test(self): def setHandler(self, event_name, callback): """Set an handler for given event.""" if event_name not in self.handlers: - raise ValueError('{} is not a valid event'.format(event_name)) + raise ValueError("{} is not a valid event".format(event_name)) if callable(event_name): - raise TypeError('{} is not callable'.format(callback)) + raise TypeError("{} is not callable".format(callback)) self.handlers[event_name] = callback def isHandlerPresent(self, event_name): """Check if an event has an handler.""" if event_name not in self.handlers: - raise ValueError('{} is not a valid event'.format(event_name)) + raise ValueError("{} is not a valid event".format(event_name)) return self.handlers[event_name] is not None def removeHandler(self, event_name): """Remove handler for given event.""" if event_name not in self.handlers: - raise ValueError('{} is not a valid event'.format(event_name)) + raise ValueError("{} is not a valid event".format(event_name)) self.handlers[event_name] = None def _strtr(self, inp, fr, to): - res = '' + res = "" for c in inp: for idx, c_to_replace in enumerate(fr): if c == c_to_replace and idx < len(to): @@ -110,7 +110,7 @@ def _get_signature_header(self, signature_header, request_headers): return signature_header if SIGNATURE_HEADER in request_headers: return request_headers[SIGNATURE_HEADER] - raise ValueError('Error: No {} given'.format(SIGNATURE_HEADER)) + raise ValueError("Error: No {} given".format(SIGNATURE_HEADER)) def _get_fct_number_of_arg(self, fct): """Get the number of argument of a fuction.""" @@ -122,36 +122,39 @@ def _get_fct_number_of_arg(self, fct): def handle(self, request_headers={}, signature_header=None): """Handle request.""" if self.client.webhook_secret is None: - raise ValueError('Error: no webhook secret.') + raise ValueError("Error: no webhook secret.") encoded_header = self._get_signature_header(signature_header, request_headers) decoded_request = self._decode_request(encoded_header) - if 'type' not in decoded_request: + if "type" not in decoded_request: raise ValueError("Error invalid request: no type field found.") - handler = self._getHandlerForEvent(decoded_request['type']) + handler = self._getHandlerForEvent(decoded_request["type"]) if handler is None: return - if (self._get_fct_number_of_arg(handler) == 1): + if self._get_fct_number_of_arg(handler) == 1: handler(decoded_request) return - handler(decoded_request, decoded_request['type']) + handler(decoded_request, decoded_request["type"]) def _base64Urldecode(self, inp): - inp = self._strtr(inp, '-_', '+/') - byte_inp = base64W.decodebytes(bytesutils.strtobytes(inp, 'ascii')) - return byte_inp.decode('ascii') + inp = self._strtr(inp, "-_", "+/") + byte_inp = base64W.decodebytes(bytesutils.strtobytes(inp, "ascii")) + return byte_inp.decode("ascii") def _is_signature_valid(self, signature, payload): - utf8_payload = bytesutils.strtobytes(payload, 'utf8') - utf8_wb_secret = bytesutils.strtobytes(self.client.webhook_secret, 'utf8') + utf8_payload = bytesutils.strtobytes(payload, "utf8") + utf8_wb_secret = bytesutils.strtobytes(self.client.webhook_secret, "utf8") hasher = hmac.new(utf8_wb_secret, utf8_payload, hashlib.sha256) exp_sign_digest = hasher.hexdigest() return hmacutils.compare_digest(exp_sign_digest, signature) def _decode_request(self, encoded_request): - tmp = encoded_request.split('.', 2) + tmp = encoded_request.split(".", 2) if len(tmp) < 2: - raise ValueError("Error invalid request. Maybe it's not the 'HTTP-HRFLOW-SIGNATURE' field") + raise ValueError( + "Error invalid request. Maybe it's not the 'HTTP-HRFLOW-SIGNATURE'" + " field" + ) encoded_sign = tmp[0] payload = tmp[1] sign = self._base64Urldecode(encoded_sign) @@ -162,6 +165,6 @@ def _decode_request(self, encoded_request): def _getHandlerForEvent(self, event_name): if event_name not in self.handlers: - raise ValueError('{} is not a valid event'.format(event_name)) + raise ValueError("{} is not a valid event".format(event_name)) handler = self.handlers[event_name] return handler diff --git a/hrflow/hrflow/webhook/base64Wrapper.py b/hrflow/hrflow/webhook/base64Wrapper.py index e5b9813..0b1ebd8 100755 --- a/hrflow/hrflow/webhook/base64Wrapper.py +++ b/hrflow/hrflow/webhook/base64Wrapper.py @@ -1,6 +1,7 @@ """A wrapper for base64 (python2 compatibility).""" -import sys + import base64 +import sys def _decodebytes_py3(input): diff --git a/hrflow/hrflow/webhook/hmacutils.py b/hrflow/hrflow/webhook/hmacutils.py index abcc95d..15f2858 100755 --- a/hrflow/hrflow/webhook/hmacutils.py +++ b/hrflow/hrflow/webhook/hmacutils.py @@ -1,7 +1,7 @@ """Some utils for hash(py2 compatibility).""" -import sys import hmac +import sys def _compare_digest_py2(a, b): diff --git a/poetry.lock b/poetry.lock index 2099ec2..67e2aec 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,65 @@ # This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "black" +version = "23.12.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-23.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67f19562d367468ab59bd6c36a72b2c84bc2f16b59788690e02bbcb140a77175"}, + {file = "black-23.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbd75d9f28a7283b7426160ca21c5bd640ca7cd8ef6630b4754b6df9e2da8462"}, + {file = "black-23.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:593596f699ca2dcbbbdfa59fcda7d8ad6604370c10228223cd6cf6ce1ce7ed7e"}, + {file = "black-23.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:12d5f10cce8dc27202e9a252acd1c9a426c83f95496c959406c96b785a92bb7d"}, + {file = "black-23.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e73c5e3d37e5a3513d16b33305713237a234396ae56769b839d7c40759b8a41c"}, + {file = "black-23.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba09cae1657c4f8a8c9ff6cfd4a6baaf915bb4ef7d03acffe6a2f6585fa1bd01"}, + {file = "black-23.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace64c1a349c162d6da3cef91e3b0e78c4fc596ffde9413efa0525456148873d"}, + {file = "black-23.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:72db37a2266b16d256b3ea88b9affcdd5c41a74db551ec3dd4609a59c17d25bf"}, + {file = "black-23.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fdf6f23c83078a6c8da2442f4d4eeb19c28ac2a6416da7671b72f0295c4a697b"}, + {file = "black-23.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39dda060b9b395a6b7bf9c5db28ac87b3c3f48d4fdff470fa8a94ab8271da47e"}, + {file = "black-23.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7231670266ca5191a76cb838185d9be59cfa4f5dd401b7c1c70b993c58f6b1b5"}, + {file = "black-23.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:193946e634e80bfb3aec41830f5d7431f8dd5b20d11d89be14b84a97c6b8bc75"}, + {file = "black-23.12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcf91b01ddd91a2fed9a8006d7baa94ccefe7e518556470cf40213bd3d44bbbc"}, + {file = "black-23.12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:996650a89fe5892714ea4ea87bc45e41a59a1e01675c42c433a35b490e5aa3f0"}, + {file = "black-23.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdbff34c487239a63d86db0c9385b27cdd68b1bfa4e706aa74bb94a435403672"}, + {file = "black-23.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:97af22278043a6a1272daca10a6f4d36c04dfa77e61cbaaf4482e08f3640e9f0"}, + {file = "black-23.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ead25c273adfad1095a8ad32afdb8304933efba56e3c1d31b0fee4143a1e424a"}, + {file = "black-23.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c71048345bdbced456cddf1622832276d98a710196b842407840ae8055ade6ee"}, + {file = "black-23.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a832b6e00eef2c13b3239d514ea3b7d5cc3eaa03d0474eedcbbda59441ba5d"}, + {file = "black-23.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:6a82a711d13e61840fb11a6dfecc7287f2424f1ca34765e70c909a35ffa7fb95"}, + {file = "black-23.12.0-py3-none-any.whl", hash = "sha256:a7c07db8200b5315dc07e331dda4d889a56f6bf4db6a9c2a526fa3166a81614f"}, + {file = "black-23.12.0.tar.gz", hash = "sha256:330a327b422aca0634ecd115985c1c7fd7bdb5b5a2ef8aa9888a82e2ebe9437a"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + [[package]] name = "certifi" version = "2023.11.17" @@ -110,6 +170,20 @@ files = [ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + [[package]] name = "colorama" version = "0.4.6" @@ -135,6 +209,22 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "flake8" +version = "6.1.0" +description = "the modular source code checker: pep8 pyflakes and co" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"}, + {file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.11.0,<2.12.0" +pyflakes = ">=3.1.0,<3.2.0" + [[package]] name = "idna" version = "3.6" @@ -157,6 +247,42 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + [[package]] name = "packaging" version = "23.2" @@ -168,6 +294,32 @@ files = [ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.1.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, + {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + [[package]] name = "pluggy" version = "1.3.0" @@ -183,6 +335,164 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pycodestyle" +version = "2.11.1" +description = "Python style guide checker" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, + {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, +] + +[[package]] +name = "pydantic" +version = "2.5.3" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-2.5.3-py3-none-any.whl", hash = "sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4"}, + {file = "pydantic-2.5.3.tar.gz", hash = "sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.14.6" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.14.6" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.14.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9"}, + {file = "pydantic_core-2.14.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c"}, + {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66"}, + {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590"}, + {file = "pydantic_core-2.14.6-cp310-none-win32.whl", hash = "sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7"}, + {file = "pydantic_core-2.14.6-cp310-none-win_amd64.whl", hash = "sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87"}, + {file = "pydantic_core-2.14.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4"}, + {file = "pydantic_core-2.14.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937"}, + {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622"}, + {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2"}, + {file = "pydantic_core-2.14.6-cp311-none-win32.whl", hash = "sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2"}, + {file = "pydantic_core-2.14.6-cp311-none-win_amd64.whl", hash = "sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23"}, + {file = "pydantic_core-2.14.6-cp311-none-win_arm64.whl", hash = "sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6"}, + {file = "pydantic_core-2.14.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec"}, + {file = "pydantic_core-2.14.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd"}, + {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91"}, + {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c"}, + {file = "pydantic_core-2.14.6-cp312-none-win32.whl", hash = "sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786"}, + {file = "pydantic_core-2.14.6-cp312-none-win_amd64.whl", hash = "sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40"}, + {file = "pydantic_core-2.14.6-cp312-none-win_arm64.whl", hash = "sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e"}, + {file = "pydantic_core-2.14.6-cp37-none-win32.whl", hash = "sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6"}, + {file = "pydantic_core-2.14.6-cp37-none-win_amd64.whl", hash = "sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391"}, + {file = "pydantic_core-2.14.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149"}, + {file = "pydantic_core-2.14.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d"}, + {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1"}, + {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60"}, + {file = "pydantic_core-2.14.6-cp38-none-win32.whl", hash = "sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe"}, + {file = "pydantic_core-2.14.6-cp38-none-win_amd64.whl", hash = "sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8"}, + {file = "pydantic_core-2.14.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab"}, + {file = "pydantic_core-2.14.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0"}, + {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9"}, + {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411"}, + {file = "pydantic_core-2.14.6-cp39-none-win32.whl", hash = "sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975"}, + {file = "pydantic_core-2.14.6-cp39-none-win_amd64.whl", hash = "sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e"}, + {file = "pydantic_core-2.14.6.tar.gz", hash = "sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pyflakes" +version = "3.1.0" +description = "passive checker of Python programs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"}, + {file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"}, +] + [[package]] name = "pytest" version = "7.4.3" @@ -205,6 +515,17 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-timestamper" +version = "0.0.9" +description = "Pytest plugin to add a timestamp prefix to the pytest output" +optional = false +python-versions = "*" +files = [ + {file = "pytest-timestamper-0.0.9.tar.gz", hash = "sha256:1e02f14d20cb8444a24de356d8c818a180559f0cb61119935fc6d64c6e6f963d"}, + {file = "pytest_timestamper-0.0.9-py3-none-any.whl", hash = "sha256:229ba49f4dce27207bf9c5dc8531c983bc1c6a92e1de740567f914d6ff4b0b17"}, +] + [[package]] name = "python-dotenv" version = "1.0.0" @@ -262,6 +583,17 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "typing-extensions" +version = "4.9.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, +] + [[package]] name = "urllib3" version = "2.1.0" @@ -280,5 +612,5 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" -python-versions = "^3.8" -content-hash = "4674d5e86f4c93df1d756ec9250c575dbc4ebdfa3fb873cdeea4fb863b3d446b" +python-versions = "^3.8.1" +content-hash = "8d2c15fd6e91baf356c2cfad7421e5f135c500c84b9c8de030c38db497e91266" diff --git a/pyproject.toml b/pyproject.toml index 0358b7a..fea4185 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "hrflow" -version = "3.1.2" +version = "3.2.0" description = "Python hrflow.ai API package" authors = ["HrFlow.ai "] license = "MIT" @@ -15,13 +15,53 @@ packages = [{include = "hrflow"}] "Changelog" = "https://github.com/Riminder/python-hrflow-api/releases" [tool.poetry.dependencies] -python = "^3.8" +python = "^3.8.1" requests = "^2.31.0" python-magic = "^0.4.27" [tool.poetry.group.dev.dependencies] pytest = "^7.4.3" python-dotenv = "^1.0.0" +pydantic = "^2.5.2" +isort = "^5.13.1" +black = "^23.12.0" +flake8 = "^6.1.0" +pytest-timestamper = "^0.0.9" + +[tool.black] +line-length = 88 +target-version = ["py37", "py38", "py39", "py310"] +preview = true + +[tool.isort] +profile = "black" + +[tool.pytest.ini_options] +markers = [ + "archive", + "asking", + "auth", + "editing", + "embedding", + "geocoding", + "hawk", + "imaging", + "indexing", + "job", + "linking", + "mozart", + "ocr", + "parsing", + "parsing_file_async", + "parsing_file_sync", + "profile", + "quicksilver", + "scoring", + "searching", + "tagging", + "text", + "unfolding" +] [build-system] requires = ["poetry-core"] diff --git a/tests/test_auth.py b/tests/test_auth.py new file mode 100644 index 0000000..7244c01 --- /dev/null +++ b/tests/test_auth.py @@ -0,0 +1,119 @@ +import pytest +from requests import codes as http_codes + +from hrflow import Hrflow + +from .utils.schemas import AuthResponse +from .utils.tools import _var_from_env_get + + +@pytest.mark.auth +def test_valid_all(): + model = AuthResponse.model_validate( + Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY"), + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + ) + assert model.code == http_codes.ok + + +@pytest.mark.auth +def test_valid_read(): + model = AuthResponse.model_validate( + Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY_READ"), + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + ) + assert model.code == http_codes.ok + + +@pytest.mark.skip( + reason="write permission key, for now, can not be used for a GET request" +) +@pytest.mark.auth +def test_valid_write(): + model = AuthResponse.model_validate( + Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY_WRITE"), + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + ) + assert model.code == http_codes.ok + + +# All the keys below, in raw text, are mock + + +@pytest.mark.auth +def test_invalid_valid_askw(): + model = AuthResponse.model_validate( + Hrflow( + api_secret="askw_d86bb249fff3ac66765f04d43c611675", + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + ) + assert model.code == http_codes.unauthorized + + +@pytest.mark.auth +def test_api_secret_regex_42(): + with pytest.raises(ValueError): + Hrflow( + api_secret="42", api_user=_var_from_env_get("HRFLOW_USER_EMAIL") + ).auth.get() + + +@pytest.mark.auth +def test_api_secret_regex_not_hex(): + with pytest.raises(ValueError): + Hrflow( + api_secret="ask_xa62249b693f2b4cc29524624abfc659", + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + + +@pytest.mark.auth +def test_api_secret_regex_basic_key(): + with pytest.raises(ValueError): + Hrflow( + api_secret="b2631028fab36393d8bf05ca143b75e3424ea78e", + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + + +@pytest.mark.auth +def test_api_secret_regex_valid_with_padding_start(): + with pytest.raises(ValueError): + Hrflow( + api_secret=" ask_d89a3523b8b5c34b24e8831239bb6ba0", + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + + +@pytest.mark.auth +def test_api_secret_regex_valid_with_padding_end(): + with pytest.raises(ValueError): + Hrflow( + api_secret="ask_7f24675fbaadfaeb1e9ea57201b1b92c ", + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + + +@pytest.mark.auth +def test_api_secret_too_short(): + with pytest.raises(ValueError): + Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY")[:-1], + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() + + +@pytest.mark.auth +def test_api_secret_too_long(): + with pytest.raises(ValueError): + Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY") + "f", + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).auth.get() diff --git a/tests/test_hello.py b/tests/test_hello.py deleted file mode 100644 index c670c52..0000000 --- a/tests/test_hello.py +++ /dev/null @@ -1,5 +0,0 @@ -from dotenv import load_dotenv -load_dotenv() - -def test_hello(): - pass \ No newline at end of file diff --git a/tests/test_job.py b/tests/test_job.py new file mode 100644 index 0000000..23ce1d6 --- /dev/null +++ b/tests/test_job.py @@ -0,0 +1,241 @@ +import typing as t +from uuid import uuid4 + +import pytest +from requests import codes as http_codes + +from hrflow import Hrflow + +from .utils.schemas import ( + JobArchiveResponse, + JobAskingResponse, + JobIndexingResponse, + JobsScoringResponse, + JobsSearchingResponse, +) +from .utils.tools import ( + _check_same_keys_equality, + _indexed_response_get, + _now_iso8601_get, + _var_from_env_get, +) + + +@pytest.fixture(scope="module") +def hrflow_client(): + return Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY"), + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ) + + +def _job_get() -> t.Dict[str, t.Any]: + return dict( + reference=str(uuid4()), + name="r&d engineer", + location=dict(text="7 rue 4 septembre paris", lat=48.869179, lng=2.33814), + sections=[ + dict( + name="Description", + title="Description", + description=( + "As an AI Researcher Intern at HrFlow.ai, you'll play a vital role" + " in driving the next phase of our exciting expansion. Your role" + " involves developing innovative AI models and algorithms to tackle" + " intricate HR challenges. Collaborating with fellow researchers" + " and engineers, you'll help guide the technical direction and" + " architecture of our AI solutions." + ), + ) + ], + url="https://www.linkedin.com/jobs/search/?currentJobId=3718625295", + summary=( + "As an AI Researcher Intern at HrFlow.ai, you'll play a vital role in" + " driving the next phase of our exciting expansion. Your role involves" + " developing innovative AI models and algorithms to tackle intricate HR" + " challenges. Collaborating with fellow researchers and engineers, you'll" + " help guide the technical direction and architecture of our AI solutions." + ), + created_at=_now_iso8601_get(), + skills=[dict(name="Deep Learning", type="hard", value="95/100")], + languages=[dict(name="French", value="Fluent")], + certifications=[dict(name="ISO 27001", value="Individual")], + courses=[dict(name="Statistical Learning", value="On campus")], + tasks=[dict(name="Developing innovative AI models", value="Innovating")], + tags=[dict(name="Curios", value="1")], + metadatas=[ + dict(name="Interview note", value="Today, I met an amazing candidate...") + ], + ranges_float=[ + dict(name="salary", value_min=1234.56, value_max=6543.21, unit="euros") + ], + ranges_date=[ + dict( + name="dates", + value_min="2023-06-01T23:00:00.000Z", + value_max="2023-09-01T23:00:00.000Z", + ) + ], + culture="We love AI engineering, problem-solving, and business.", + responsibilities=( + "Designing, implementing, and optimizing AI models and algorithms that" + " solve complex HR challenges. Analyzing and evaluating the performance of" + " AI models and algorithms. Collaborating with other researchers and" + " engineers to improve the overall performance and accuracy of our AI" + " solutions. Staying up-to-date with the latest developments in AI research" + " and technology. Communicating and presenting research findings to" + " internal and external stakeholders." + ), + requirements=( + "Enrolled in an advanced degree in Computer Science, Artificial" + " Intelligence, or a related field. Proficiency in developing and" + " implementing AI models and algorithms. Strong programming skills in" + " Python. Experience with deep learning frameworks like TensorFlow," + " PyTorch, or Keras. Solid grasp of machine learning fundamentals and" + " statistical analysis. Exceptional problem-solving and analytical" + " abilities. Effective communication and collaboration skills." + ), + interviews=( + "Interview with one of our lead AI Researcher to discuss your experience" + " and qualifications in more detail. Interview with our Chief Executive" + " Officer to discuss your fit within our organization and your career" + " goals." + ), + benefits=( + "Go fast and learn a lot. High-impact position and responsibilities without" + " any day being the same. Competitive salary and variable compensation. Gym" + " club & public transportation. Fun & smart colleagues. Latest hardware." + ), + ) + + +@pytest.mark.job +@pytest.mark.indexing +def test_job_indexing_basic(hrflow_client): + job = _job_get() + model = JobIndexingResponse.model_validate( + hrflow_client.job.storing.add_json( + board_key=_var_from_env_get("HRFLOW_BOARD_KEY"), + job_json=job, + ) + ) + assert model.code == http_codes.created + assert model.data is not None + _check_same_keys_equality(job, model.data) + + +@pytest.mark.job +@pytest.mark.searching +def test_job_searching_basic(hrflow_client): + model = JobsSearchingResponse.model_validate( + hrflow_client.job.searching.list( + board_keys=[_var_from_env_get("HRFLOW_BOARD_KEY")], + limit=5, # allows to bypass the bug with archived jobs + ) + ) + assert model.code == http_codes.ok + assert len(model.data.jobs) == model.meta.count + + +@pytest.mark.job +@pytest.mark.scoring +def test_job_scoring_basic(hrflow_client): + model = JobsScoringResponse.model_validate( + hrflow_client.job.scoring.list( + algorithm_key=_var_from_env_get("HRFLOW_ALGORITHM_KEY"), + board_keys=[_var_from_env_get("HRFLOW_BOARD_KEY")], + profile_key=_var_from_env_get("HRFLOW_PROFILE_KEY"), + source_key=_var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC"), + limit=5, # allows to bypass the bug with archived jobs + ) + ) + assert model.code == http_codes.ok + assert len(model.data.jobs) == len(model.data.predictions) == model.meta.count + + +@pytest.mark.job +@pytest.mark.asking +def test_job_asking_basic(hrflow_client): + BOARD_KEY = _var_from_env_get("HRFLOW_BOARD_KEY") + model = JobAskingResponse.model_validate( + hrflow_client.job.asking.get( + board_key=BOARD_KEY, + key=_indexed_response_get(hrflow_client, BOARD_KEY, _job_get()).data.key, + questions=[ + "What is the company proposing this job offer ?", + ], + ) + ) + assert model.code == http_codes.ok + assert len(model.data) == 1 + assert "hrflow.ai" in model.data[0].lower() + + +@pytest.mark.skip(reason="backend: multiple questions are not correctly handled yet") +@pytest.mark.job +@pytest.mark.asking +def test_job_asking_multiple_questions(hrflow_client): + BOARD_KEY = _var_from_env_get("HRFLOW_BOARD_KEY") + questions = [ + "What is the job title ?", + "What is the company proposing this job offer ?", + "What is the job location address ?", + "What are the expected skills for this job ?", + ] + model = JobAskingResponse.model_validate( + hrflow_client.job.asking.get( + board_key=BOARD_KEY, + key=_indexed_response_get(hrflow_client, BOARD_KEY, _job_get()).data.key, + questions=questions, + ) + ) + assert model.code == http_codes.ok + assert len(model.data) == len(questions) + assert "r&d engineer" in model.data[0].lower() + assert "hrflow.ai" in model.data[1].lower() + assert "7 rue 4 septembre" in model.data[2].lower() + assert "deep learning" in model.data[3].lower() + + +@pytest.mark.job +@pytest.mark.asking +def test_job_asking_no_questions(hrflow_client): + BOARD_KEY = _var_from_env_get("HRFLOW_BOARD_KEY") + model = JobAskingResponse.model_validate( + hrflow_client.job.asking.get( + board_key=BOARD_KEY, + key=_indexed_response_get(hrflow_client, BOARD_KEY, _job_get()).data.key, + questions=None, + ) + ) + assert model.code == http_codes.bad_request + + +@pytest.mark.job +@pytest.mark.archive +def test_job_archive_basic(hrflow_client): + BOARD_KEY = _var_from_env_get("HRFLOW_BOARD_KEY") + mock_key = _indexed_response_get(hrflow_client, BOARD_KEY, _job_get()).data.key + model = JobArchiveResponse.model_validate( + hrflow_client.job.storing.archive(board_key=BOARD_KEY, key=mock_key) + ) + assert model.code == http_codes.ok + assert model.data.key == mock_key + + +@pytest.mark.job +@pytest.mark.editing +def test_job_editing_basic(hrflow_client): + BOARD_KEY = _var_from_env_get("HRFLOW_BOARD_KEY") + mock_job = _indexed_response_get(hrflow_client, BOARD_KEY, _job_get()).data + mock_job.interviews = ( + f"To access the interview call you must use the token {uuid4()}." + ) + model = JobIndexingResponse.model_validate( + hrflow_client.job.storing.edit( + board_key=BOARD_KEY, + job_json=mock_job.model_dump(), + ) + ) + assert model.code == http_codes.ok + assert model.data.interviews == mock_job.interviews diff --git a/tests/test_profile.py b/tests/test_profile.py new file mode 100644 index 0000000..c1cf4d2 --- /dev/null +++ b/tests/test_profile.py @@ -0,0 +1,643 @@ +import json +import typing as t +from time import sleep +from uuid import uuid4 + +import pytest +from requests import codes as http_codes + +from hrflow import Hrflow + +from .utils.schemas import ( + ProfileArchiveResponse, + ProfileAskingResponse, + ProfileIndexingResponse, + ProfileParsingFileResponse, + ProfilesScoringResponse, + ProfilesSearchingResponse, + ProfileUnfoldingResponse, +) +from .utils.tools import ( + _check_same_keys_equality, + _file_get, + _indexed_response_get, + _now_iso8601_get, + _var_from_env_get, +) + +_MAX_RETRIES = 5 +_ASYNC_RETRY_INTERVAL_SECONDS = 5 +_ASYNC_TIMEOUT_SECONDS = 60 + + +@pytest.fixture(scope="module") +def hrflow_client(): + return Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY"), + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ) + + +def _profile_get() -> t.Dict[str, t.Any]: + return dict( + reference=str(uuid4()), + text_language="en", + text=( + "Harry James PotterSorcerer Apprenticedate of birth: June 26th 1997Number" + " 4, Privet Drive, Little Whingingemail: harry.potter@hogwarts.netphone:" + " 0747532699ExperiencesApril 2002 - July 2002 Hogwarts School of Witchcraft" + " and WizardryMagic InvestigatorSolving mysteries about the Sorcerer's" + " stone.teamplayer, empathy.EducationsDecember 2001 - December" + " 2002 Hogwarts" + " School of Witchcraft and WizardrySorcerer ApprenticeFirst year of" + " study.witchcraft, levitation, lycanthropy.Skillswitchcraftdark" + " artsperseveranceempathyInterestsquidditchwizard chess" + ), + created_at=_now_iso8601_get(), + info=dict( + full_name="Harry James Potter", + first_name="Harry James", + last_name="Potter", + email="harry.potter@hogwarts.net", + phone="0747532699", + driving_license="Class B", + date_birth="1997-06-26T00:00:00+0000", + location=dict(text="Hogwarts", lat=12.345678, lng=-87.654321), + urls=[dict(type="github", url="https://github.com/Riminder")], + picture="https://path.to/picture", + summary="Sorcerer Apprentice", + gender="male", + ), + experiences_duration=0.2493150684931507, + educations_duration=1.0, + experiences=[ + dict( + company="Hogwarts School of Witchcraft and Wizardry", + title="Magic Investigator", + description="Solving mysteries about the Sorcerer's stone.", + location=dict(text="Hogwarts", lat=12.345678, lng=-87.654321), + logo="https://path.to/logo", + # The experience date string must not end with +0000 + # Ideally, it should follow the format yyyy-mm-ddTHH:MM:SS + date_start="2002-04-01T00:00:00", + date_end="2002-07-01T00:00:00", + skills=[dict(name="Teamplayer", type="hard", value="90/100")], + certifications=[dict(name="Wizardry", value="Individual")], + courses=[dict(name="Advanced Potion-Making", value="On campus")], + tasks=[dict(name="Defeat the Basilisk", value="Bravery")], + languages=[dict(name="English", value="Fluent")], + interests=[dict(name="Levitation", value="Amateur")], + ) + ], + educations=[ + dict( + school="Hogwarts School of Witchcraft and Wizardry", + title="Sorcerer Apprentice", + description="First year of study.", + location=dict(text="Hogwarts", lat=12.345678, lng=-87.654321), + logo="https://path.to/logo", + date_start="2001-12-01T00:00:00+0000", + date_end="2002-12-01T00:00:00+0000", + skills=[dict(name="Levitation", type="hard", value="88/100")], + certifications=[dict(name="Wizardry", value="Individual")], + courses=[dict(name="Advanced Potion-Making", value="On campus")], + tasks=[dict(name="Defeat the Basilisk", value="Bravery")], + interests=[dict(name="Levitation", value="Amateur")], + languages=[dict(name="English", value="Fluent")], + ) + ], + skills=[dict(name="Lycanthropy", type="hard", value="93/100")], + languages=[dict(name="English", value="Native")], + certifications=[dict(name="Wizardry", value="Individual")], + courses=[dict(name="Advanced Potion-Making", value="On campus")], + tasks=[dict(name="Defeat the Basilisk", value="Bravery")], + interests=[dict(name="Levitation", value="Amateur")], + tags=[dict(name="Brave", value="1")], + metadatas=[ + dict( + name="Defeat the Basilisk", + value=( + "To defeat the Basilisk, a few key elements: a weapon, courage and" + " determination, quick reflexes and support." + ), + ) + ], + ) + + +@pytest.mark.profile +@pytest.mark.parsing_file_sync +@pytest.mark.quicksilver +def test_profile_parsing_file_quicksilver_sync_basic(hrflow_client): + s3_url = """https://riminder-documents-eu-2019-12.s3-eu-west-1.amazonaws.com/\ +teams/fc9d40fd60e679119130ea74ae1d34a3e22174f2/sources/07065e555609a231752a586afd6\ +495c951bbae6b/profiles/1fed6e15b2df4465b1e406adabd0075d3214bc18/parsing/resume.pdf""" + file = _file_get(s3_url, "profile_sync") + model = ProfileParsingFileResponse.model_validate( + hrflow_client.profile.parsing.add_file( + source_key=_var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC"), + profile_file=file, + ) + ) + assert model.code == http_codes.created + assert model.data.profile + profile = model.data.profile.model_dump() + if profile.get("info"): + info = profile["info"] + full_name_lower = info["full_name"].lower() + assert "nico" in full_name_lower and "durant" in full_name_lower + assert "nico" in info["first_name"].lower() + assert "durant" in info["last_name"].lower() + assert info["phone"] == "+33631245722" + assert info["driving_license"] == "B" + assert info["email"] == "exempledecv@cvmaker.com" + if profile.get("languages"): + languages_str_lower = json.dumps(profile["languages"]).lower() + assert ( + "espagnol" in languages_str_lower + or "allemand" in languages_str_lower + or "anglais" in languages_str_lower + ) + if profile.get("skills"): + skills_str_lower = json.dumps(profile["skills"]).lower() + assert ( + "word" in skills_str_lower + or "excel" in skills_str_lower + or "power point" in skills_str_lower + or "photoshop" in skills_str_lower + ) + if profile.get("educations"): + educations_str_lower = json.dumps( + profile["educations"], ensure_ascii=False + ).lower() + assert "ecole de commerce" in educations_str_lower + assert ( + "comptabilité" in educations_str_lower or "gestion" in educations_str_lower + ) + assert ( + "audit" in educations_str_lower + or "droit des affaires" in educations_str_lower + ) + assert "baccalauréat général économique" in educations_str_lower + assert "lycée" in educations_str_lower + assert "paris" in educations_str_lower + assert "bureau des étudiants" in educations_str_lower + if profile.get("experiences"): + experiences_str_lower = json.dumps( + profile["experiences"], ensure_ascii=False + ).lower() + assert "paris" in experiences_str_lower + assert ( + "vendeur" in experiences_str_lower + or "sport magasin" in experiences_str_lower + ) + assert ( + "accueil des clients" in experiences_str_lower + or "gestion du stock" in experiences_str_lower + or "gestion de la caisse" in experiences_str_lower + or "rangement du stock" in experiences_str_lower + ) + assert "animateur" in experiences_str_lower + assert ( + "accueil des vacanciers" in experiences_str_lower + or "animation d'ateliers pour les jeunes de 8 à 10 ans" + in experiences_str_lower + or "soutien administratif" in experiences_str_lower + ) + assert ( + "camping sable & me" in experiences_str_lower + or "juan-les-bains" in experiences_str_lower + ) + assert "baby-sitting" in experiences_str_lower + assert ( + "garde d'enfants âgés de 5 ans et 7 ans" in experiences_str_lower + or "sortie d'école" in experiences_str_lower + or "aide aux devoirs" in experiences_str_lower + or "préparation de repas" in experiences_str_lower + or "jeux éducatifs" in experiences_str_lower + ) + + +@pytest.mark.profile +@pytest.mark.parsing_file_sync +@pytest.mark.hawk +def test_profile_parsing_file_hawk_sync_basic(hrflow_client): + s3_url = """https://riminder-documents-eu-2019-12.s3-eu-west-1.amazonaws.com/\ +teams/fc9d40fd60e679119130ea74ae1d34a3e22174f2/sources/07065e555609a231752a586afd6\ +495c951bbae6b/profiles/1fed6e15b2df4465b1e406adabd0075d3214bc18/parsing/resume.pdf""" + file = _file_get(s3_url, "profile_sync") + model = ProfileParsingFileResponse.model_validate( + hrflow_client.profile.parsing.add_file( + source_key=_var_from_env_get("HRFLOW_SOURCE_KEY_HAWK_SYNC"), + profile_file=file, + ) + ) + assert model.code == http_codes.created + assert model.data.profile + profile = model.data.profile.model_dump() + if profile.get("info"): + info = profile["info"] + full_name_lower = info["full_name"].lower() + assert "nico" in full_name_lower and "durant" in full_name_lower + assert "nico" in info["first_name"].lower() + assert "durant" in info["last_name"].lower() + assert info["phone"] == "+33631245722" + assert info["driving_license"] == "B" + assert info["email"] == "exempledecv@cvmaker.com" + if profile.get("languages"): + languages_str_lower = json.dumps(profile["languages"]).lower() + assert ( + "espagnol" in languages_str_lower + or "allemand" in languages_str_lower + or "anglais" in languages_str_lower + ) + if profile.get("skills"): + skills_str_lower = json.dumps(profile["skills"]).lower() + assert ( + "word" in skills_str_lower + or "excel" in skills_str_lower + or "power point" in skills_str_lower + or "photoshop" in skills_str_lower + ) + if profile.get("educations"): + educations_str_lower = json.dumps( + profile["educations"], ensure_ascii=False + ).lower() + assert "ecole de commerce" in educations_str_lower + assert ( + "comptabilité" in educations_str_lower or "gestion" in educations_str_lower + ) + assert ( + "audit" in educations_str_lower + or "droit des affaires" in educations_str_lower + ) + assert "baccalauréat général économique" in educations_str_lower + assert "lycée" in educations_str_lower + assert "paris" in educations_str_lower + assert "bureau des étudiants" in educations_str_lower + if profile.get("experiences"): + experiences_str_lower = json.dumps( + profile["experiences"], ensure_ascii=False + ).lower() + assert "paris" in experiences_str_lower + assert ( + "vendeur" in experiences_str_lower + or "sport magasin" in experiences_str_lower + ) + assert ( + "accueil des clients" in experiences_str_lower + or "gestion du stock" in experiences_str_lower + or "gestion de la caisse" in experiences_str_lower + or "rangement du stock" in experiences_str_lower + ) + assert "animateur" in experiences_str_lower + assert ( + "accueil des vacanciers" in experiences_str_lower + or "animation d'ateliers pour les jeunes de 8 à 10 ans" + in experiences_str_lower + or "soutien administratif" in experiences_str_lower + ) + assert ( + "camping sable & me" in experiences_str_lower + or "juan-les-bains" in experiences_str_lower + ) + assert "baby-sitting" in experiences_str_lower + assert ( + "garde d'enfants âgés de 5 ans et 7 ans" in experiences_str_lower + or "sortie d'école" in experiences_str_lower + or "aide aux devoirs" in experiences_str_lower + or "préparation de repas" in experiences_str_lower + or "jeux éducatifs" in experiences_str_lower + ) + + +@pytest.mark.profile +@pytest.mark.parsing_file_async +@pytest.mark.quicksilver +def test_profile_parsing_file_quicksilver_async_basic(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_ASYNC") + s3_url = """https://riminder-documents-eu-2019-12.s3-eu-west-1.amazonaws.com/\ +teams/fc9d40fd60e679119130ea74ae1d34a3e22174f2/sources/06d96aab2661b16eaf4d34d385d\ +3c2b0cf00c0eb/profiles/d79768fb63013a8bdd04e7e8742cc84afd428a87/parsing/resume.pdf""" + file = _file_get(s3_url, "profile_async") + reference = str(uuid4()) + model = ProfileParsingFileResponse.model_validate( + hrflow_client.profile.parsing.add_file( + source_key=SOURCE_KEY, + profile_file=file, + reference=reference, + ) + ) + assert model.code == http_codes.accepted + assert _ASYNC_RETRY_INTERVAL_SECONDS > 0 + for _ in range(max(0, _ASYNC_TIMEOUT_SECONDS // _ASYNC_RETRY_INTERVAL_SECONDS)): + model = ProfileIndexingResponse.model_validate( + hrflow_client.profile.storing.get( + source_key=SOURCE_KEY, reference=reference + ) + ) + if model.code == http_codes.ok: + break + sleep(_ASYNC_RETRY_INTERVAL_SECONDS) + assert model.code == http_codes.ok or pytest.fail( + "failed to retrieve an asynchronously parsed profile with" + f" timeout={_ASYNC_TIMEOUT_SECONDS} and" + f" interval={_ASYNC_RETRY_INTERVAL_SECONDS}" + ) + assert model.data is not None + profile = model.data.model_dump() + assert "john" in profile["info"]["full_name"].lower() + assert "john@smith.com" in profile["info"]["email"].lower() + assert profile["info"]["phone"].count("5") >= 9 + location_text_lower = profile["info"]["location"]["text"].lower() + assert ( + "141 highway street road" in location_text_lower + or "scottsdale" in location_text_lower + or "hawaii" in location_text_lower + ) + skills_str_lower = json.dumps(profile["skills"]).lower() + assert ( + "web development" in skills_str_lower + or "adobe photoshop" in skills_str_lower + or "adobe dreamweaver" in skills_str_lower + or "indesign" in skills_str_lower + or "illustrator" in skills_str_lower + or "after effects" in skills_str_lower + or "css" in skills_str_lower + or "javascript" in skills_str_lower + or "responsive web design" in skills_str_lower + or "php" in skills_str_lower + or "jquery" in skills_str_lower + or "wordpress" in skills_str_lower + or "cmd/sharepoint" in skills_str_lower + or "animated gifs" in skills_str_lower + or "web banners" in skills_str_lower + or "project management" in skills_str_lower + or "technical writing" in skills_str_lower + or "seo" in skills_str_lower + ) + educations_str_lower = json.dumps(profile["educations"]).lower() + assert ( + "masters of information systems" in educations_str_lower + or "bachelors of science" in educations_str_lower + ) + experiences_str_lower = json.dumps(profile["experiences"]).lower() + assert "web designer intern" in experiences_str_lower + assert "scottsdale, hawaii" in experiences_str_lower + assert ( + "html" in experiences_str_lower + or "css" in experiences_str_lower + or "jquery" in experiences_str_lower + ) + + +@pytest.mark.profile +@pytest.mark.parsing_file_async +@pytest.mark.mozart +def test_profile_parsing_file_mozart_async_basic(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_MOZART_ASYNC") + s3_url = """https://riminder-documents-eu-2019-12.s3-eu-west-1.amazonaws.com/\ +teams/fc9d40fd60e679119130ea74ae1d34a3e22174f2/sources/06d96aab2661b16eaf4d34d385d\ +3c2b0cf00c0eb/profiles/d79768fb63013a8bdd04e7e8742cc84afd428a87/parsing/resume.pdf""" + file = _file_get(s3_url, "profile_async") + reference = str(uuid4()) + model = ProfileParsingFileResponse.model_validate( + hrflow_client.profile.parsing.add_file( + source_key=SOURCE_KEY, + profile_file=file, + reference=reference, + ) + ) + assert model.code == http_codes.accepted + assert _ASYNC_RETRY_INTERVAL_SECONDS > 0 + for _ in range(max(0, _ASYNC_TIMEOUT_SECONDS // _ASYNC_RETRY_INTERVAL_SECONDS)): + model = ProfileIndexingResponse.model_validate( + hrflow_client.profile.storing.get( + source_key=SOURCE_KEY, reference=reference + ) + ) + if model.code == http_codes.ok: + break + sleep(_ASYNC_RETRY_INTERVAL_SECONDS) + assert model.code == http_codes.ok or pytest.fail( + "failed to retrieve an asynchronously parsed profile with" + f" timeout={_ASYNC_TIMEOUT_SECONDS} and" + f" interval={_ASYNC_RETRY_INTERVAL_SECONDS}" + ) + assert model.data is not None + profile = model.data.model_dump() + assert "john" in profile["info"]["full_name"].lower() + assert "john@smith.com" in profile["info"]["email"].lower() + assert profile["info"]["phone"].count("5") >= 9 + location_text_lower = profile["info"]["location"]["text"].lower() + assert ( + "141 highway street road" in location_text_lower + or "scottsdale" in location_text_lower + or "hawaii" in location_text_lower + ) + skills_str_lower = json.dumps(profile["skills"]).lower() + assert ( + "web development" in skills_str_lower + or "adobe photoshop" in skills_str_lower + or "adobe dreamweaver" in skills_str_lower + or "indesign" in skills_str_lower + or "illustrator" in skills_str_lower + or "after effects" in skills_str_lower + or "css" in skills_str_lower + or "javascript" in skills_str_lower + or "responsive web design" in skills_str_lower + or "php" in skills_str_lower + or "jquery" in skills_str_lower + or "wordpress" in skills_str_lower + or "cmd/sharepoint" in skills_str_lower + or "animated gifs" in skills_str_lower + or "web banners" in skills_str_lower + or "project management" in skills_str_lower + or "technical writing" in skills_str_lower + or "seo" in skills_str_lower + ) + educations_str_lower = json.dumps(profile["educations"]).lower() + assert ( + "masters of information systems" in educations_str_lower + or "bachelors of science" in educations_str_lower + ) + experiences_str_lower = json.dumps(profile["experiences"]).lower() + assert "web designer intern" in experiences_str_lower + assert "scottsdale, hawaii" in experiences_str_lower + assert ( + "html" in experiences_str_lower + or "css" in experiences_str_lower + or "jquery" in experiences_str_lower + ) + + +@pytest.mark.profile +@pytest.mark.indexing +def test_profile_indexing_basic(hrflow_client): + profile = _profile_get() + model = ProfileIndexingResponse.model_validate( + hrflow_client.profile.storing.add_json( + source_key=_var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC"), + profile_json=profile, + ) + ) + assert model.code == http_codes.created + assert model.data is not None + _check_same_keys_equality(profile, model.data) + + +@pytest.mark.profile +@pytest.mark.searching +def test_profiles_searching_basic(hrflow_client): + model = ProfilesSearchingResponse.model_validate( + hrflow_client.profile.searching.list( + source_keys=[_var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC")], + limit=5, # allows to bypass the bug with archived profiles + ) + ) + assert model.code == http_codes.ok + + +@pytest.mark.profile +@pytest.mark.scoring +def test_profiles_scoring_basic(hrflow_client): + model = ProfilesScoringResponse.model_validate( + hrflow_client.profile.scoring.list( + algorithm_key=_var_from_env_get("HRFLOW_ALGORITHM_KEY"), + board_key=_var_from_env_get("HRFLOW_BOARD_KEY"), + source_keys=[_var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC")], + job_key=_var_from_env_get("HRFLOW_JOB_KEY"), + limit=5, # allows to bypass the bug with archived profiles + ) + ) + assert model.code == http_codes.ok + + +@pytest.mark.profile +@pytest.mark.asking +def test_profile_asking_basic(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC") + model = ProfileAskingResponse.model_validate( + hrflow_client.profile.asking.get( + source_key=SOURCE_KEY, + key=_indexed_response_get( + hrflow_client, SOURCE_KEY, _profile_get() + ).data.key, + questions=[ + "What is the full name of the profile ?", + ], + ) + ) + assert model.code == http_codes.ok + assert len(model.data) == 1 + assert "harry james potter" in model.data[0].lower() + + +@pytest.mark.skip(reason="backend: multiple questions are not correctly handled yet") +@pytest.mark.profile +@pytest.mark.asking +def test_profile_asking_multiple_questions(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC") + questions = [ + "What is the full name of the profile ?", + "Does the applicant have a driver's licence ?", + "What year did the profile finish school ?", + ] + model = ProfileAskingResponse.model_validate( + hrflow_client.profile.asking.get( + source_key=SOURCE_KEY, + questions=questions, + key=_indexed_response_get( + hrflow_client, SOURCE_KEY, _profile_get() + ).data.key, + ) + ) + assert model.code == http_codes.ok + assert len(model.data) == len(questions) + assert "harry james potter" in model.data[0].lower() + assert "yes" in model.data[0].lower() + assert "2002" in model.data[2] + + +@pytest.mark.profile +@pytest.mark.asking +def test_profile_asking_no_question(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC") + model = ProfileAskingResponse.model_validate( + hrflow_client.profile.asking.get( + source_key=SOURCE_KEY, + key=_indexed_response_get( + hrflow_client, SOURCE_KEY, _profile_get() + ).data.key, + questions=None, + ) + ) + assert model.code == http_codes.bad_request + + +@pytest.mark.profile +@pytest.mark.unfolding +def test_profile_unfolding_basic(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC") + profile = _profile_get() + if profile.get("experiences") and len(profile["experiences"]) == 1: + profile["experiences"].append(profile["experiences"][0].copy()) # shallow copy + last_experience = profile["experiences"][-1] + for dkey in ["date_start", "date_end"]: + datestr = last_experience.get(dkey) + if datestr is not None: # +1 year + last_experience[dkey] = str(int(datestr[:4]) + 1) + datestr[4:] + for _ in range(_MAX_RETRIES): + model = ProfileUnfoldingResponse.model_validate( + hrflow_client.profile.unfolding.get( + source_key=SOURCE_KEY, + key=_indexed_response_get(hrflow_client, SOURCE_KEY, profile).data.key, + ) + ) + if model.code != http_codes.server_error: + break + assert model.code == http_codes.ok + assert len(model.data.experiences) == 1 + + +@pytest.mark.profile +@pytest.mark.unfolding +def test_profile_unfolding_no_experience(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC") + profile = _profile_get() + profile["experiences"] = list() + model = ProfileUnfoldingResponse.model_validate( + hrflow_client.profile.unfolding.get( + source_key=SOURCE_KEY, + key=_indexed_response_get(hrflow_client, SOURCE_KEY, profile).data.key, + ) + ) + assert model.code == http_codes.bad_request + + +@pytest.mark.profile +@pytest.mark.archive +def test_profile_archive_basic(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC") + mock_key = _indexed_response_get(hrflow_client, SOURCE_KEY, _profile_get()).data.key + model = ProfileArchiveResponse.model_validate( + hrflow_client.profile.storing.archive(source_key=SOURCE_KEY, key=mock_key) + ) + assert model.code == http_codes.ok + assert model.data.key == mock_key + + +@pytest.mark.profile +@pytest.mark.editing +def test_profile_editing_basic(hrflow_client): + SOURCE_KEY = _var_from_env_get("HRFLOW_SOURCE_KEY_QUICKSILVER_SYNC") + mock_profile = _indexed_response_get(hrflow_client, SOURCE_KEY, _profile_get()).data + mock_profile.text = f"The password of my bitcoin wallet is {uuid4()}." + model = ProfileIndexingResponse.model_validate( + hrflow_client.profile.storing.edit( + source_key=SOURCE_KEY, + profile_json=mock_profile.model_dump(), + ) + ) + assert model.code == http_codes.ok + assert model.data.text == mock_profile.text diff --git a/tests/test_text.py b/tests/test_text.py new file mode 100644 index 0000000..bcb98ee --- /dev/null +++ b/tests/test_text.py @@ -0,0 +1,381 @@ +import typing as t + +import pytest +import requests + +from hrflow import Hrflow + +from .utils.enums import TAGGING_ALGORITHM +from .utils.schemas import ( + TextEmbeddingResponse, + TextImagingResponse, + TextLinkingResponse, + TextOCRResponse, + TextParsingResponse, + TextTaggingDataItem, + TextTaggingReponse, +) +from .utils.tools import _file_get, _var_from_env_get + +TAGGING_TEXTS = [ + ( + "Data Insights Corp. is seeking a Senior Data Scientist for a" + " contract-to-direct position. You will be responsible for designing and" + " implementing advanced machine learning algorithms and playing a key role in" + " shaping our data science initiatives. The CDI arrangement offers a pathway to" + " a full-time role" + ), + ( + "DataTech Solutions is hiring a Data Scientist for a fixed-term contract of 12" + " months. You will work on various data analysis and modeling projects and" + " assisting in short-term projects; with the possibility of extension or" + " permanent roles" + ), +] + + +@pytest.fixture(scope="module") +def hrflow_client(): + return Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY"), + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ) + + +@pytest.mark.text +@pytest.mark.embedding +def test_embedding_basic(hrflow_client): + text = "I love using embeddings in order do transfer learning with my AI algorithms" + model = TextEmbeddingResponse.model_validate( + hrflow_client.text.embedding.post(text=text) + ) + assert model.code == requests.codes.ok + assert len(model.data) > 0 + + +@pytest.mark.text +@pytest.mark.embedding +def test_embedding_no_text(hrflow_client): + model = TextEmbeddingResponse.model_validate( + hrflow_client.text.embedding.post(text=None) + ) + assert model.code == requests.codes.bad_request + assert "null" in model.message.lower() + + +def _image_sizes_get(content: bytes) -> int: + w = int.from_bytes(content[16:20], byteorder="big") + h = int.from_bytes(content[20:24], byteorder="big") + return w, h + + +def _content_is_png(content: bytes) -> bool: + return content.startswith(b"\x89PNG\r\n\x1a\n") + + +def _imaging_test_valid_size(width: t.Literal[256, 512]): + model = TextImagingResponse.model_validate( + Hrflow( + api_secret=_var_from_env_get("HRFLOW_API_KEY"), + api_user=_var_from_env_get("HRFLOW_USER_EMAIL"), + ).text.imaging.post(text="plumber", width=width) + ) + assert model.code == requests.codes.ok + response = requests.get(model.data.image_url) + assert response.status_code == requests.codes.ok + assert _content_is_png(response.content) + assert _image_sizes_get(response.content) == (width, width) + + +@pytest.mark.text +@pytest.mark.imaging +def test_imaging_basic_256(): + _imaging_test_valid_size(256) + + +@pytest.mark.text +@pytest.mark.imaging +def test_imaging_basic_512(): + _imaging_test_valid_size(512) + + +@pytest.mark.text +@pytest.mark.imaging +def test_imaging_unsupported_size(hrflow_client): + model = TextImagingResponse.model_validate( + hrflow_client.text.imaging.post(text="mechanic", width=111) + ) + assert model.code == requests.codes.bad_request + assert "111" in model.message + + +@pytest.mark.text +@pytest.mark.imaging +def test_imaging_no_text(hrflow_client): + model = TextImagingResponse.model_validate( + hrflow_client.text.imaging.post(text=None, width=256) + ) + assert model.code == requests.codes.bad_request + assert "null" in model.message.lower() + + +@pytest.mark.text +@pytest.mark.linking +def test_linking_basic(hrflow_client): + top_n = 7 + model = TextLinkingResponse.model_validate( + hrflow_client.text.linking.post(word="ai", top_n=top_n) + ) + assert model.code == requests.codes.ok + assert len(model.data) == top_n + + +@pytest.mark.text +@pytest.mark.linking +def test_linking_no_text(hrflow_client): + model = TextLinkingResponse.model_validate( + hrflow_client.text.linking.post(word=None, top_n=1) + ) + assert model.code == requests.codes.bad_request + assert "null" in model.message.lower() + + +@pytest.mark.text +@pytest.mark.linking +def test_linking_zero(hrflow_client): + model = TextLinkingResponse.model_validate( + hrflow_client.text.linking.post(word="ai", top_n=0) + ) + assert model.code == requests.codes.ok + assert len(model.data) == 0 + + +@pytest.mark.text +@pytest.mark.linking +@pytest.mark.skip(reason="backend: negative top_n not correctly handled yet") +def test_linking_negative_amount(hrflow_client): + model = TextLinkingResponse.model_validate( + hrflow_client.text.linking.post(word="ai", top_n=-42) + ) + assert model.code == requests.codes.bad_request + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_family_with_text_param(hrflow_client): + model = TextTaggingReponse.model_validate( + hrflow_client.text.tagging.post( + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_FAMILY, + text=TAGGING_TEXTS[0], + top_n=2, + ) + ) + assert model.code == requests.codes.ok + assert isinstance(model.data, TextTaggingDataItem) + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_family_with_texts_param(hrflow_client): + model = TextTaggingReponse.model_validate( + hrflow_client.text.tagging.post( + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_FAMILY, + texts=TAGGING_TEXTS, + top_n=2, + ) + ) + assert model.code == requests.codes.ok + assert isinstance(model.data, list) + assert len(model.data) == len(TAGGING_TEXTS) + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_family_with_text_and_texts_param(hrflow_client): + try: + TextTaggingReponse.model_validate( + hrflow_client.text.tagging.post( + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_FAMILY, + text=TAGGING_TEXTS[0], + texts=TAGGING_TEXTS, + top_n=2, + ) + ) + pytest.fail("Should have raised a ValueError") + except ValueError: + pass + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_family_without_text_or_texts_param(hrflow_client): + try: + TextTaggingReponse.model_validate( + hrflow_client.text.tagging.post( + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_FAMILY, + top_n=2, + ) + ) + pytest.fail("Should have raised a ValueError") + except ValueError: + pass + + +def _tagging_test( + hrflow_client: Hrflow, + algorithm_key: TAGGING_ALGORITHM, + texts: t.List[str], + context: t.Optional[str] = None, + labels: t.Optional[t.List[str]] = None, + top_n: t.Optional[int] = 1, +) -> TextTaggingReponse: + model = TextTaggingReponse.model_validate( + hrflow_client.text.tagging.post( + algorithm_key=algorithm_key, + texts=texts, + context=context, + labels=labels, + top_n=top_n, + ) + ) + assert model.code == requests.codes.ok + assert len(model.data) == len(texts) + if algorithm_key == TAGGING_ALGORITHM.TAGGER_HRFLOW_LABELS: + assert all( + all( + tag in labels or pytest.fail(f"{tag} not in {labels}") + for tag in item.tags + ) + and ( + all( + id.isnumeric() or pytest.fail(f"{id} is not numerical") + for id in item.ids + ) + ) + for item in model.data + ) + return model + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_family_basic(hrflow_client): + _tagging_test( + hrflow_client=hrflow_client, + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_FAMILY, + texts=TAGGING_TEXTS, + top_n=2, + ) + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_subfamily_basic(hrflow_client): + _tagging_test( + hrflow_client=hrflow_client, + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_SUBFAMILY, + texts=TAGGING_TEXTS, + top_n=3, + ) + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_category_basic(hrflow_client): + _tagging_test( + hrflow_client=hrflow_client, + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_CATEGORY, + texts=TAGGING_TEXTS, + top_n=4, + ) + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_rome_jobtitle_basic(hrflow_client): + _tagging_test( + hrflow_client=hrflow_client, + algorithm_key=TAGGING_ALGORITHM.TAGGER_ROME_JOBTITLE, + texts=TAGGING_TEXTS, + top_n=5, + ) + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_hrflow_skills_basic(hrflow_client): + _tagging_test( + hrflow_client=hrflow_client, + algorithm_key=TAGGING_ALGORITHM.TAGGER_HRFLOW_SKILLS, + texts=TAGGING_TEXTS, + top_n=6, + ) + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_hrflow_labels_basic(hrflow_client): + model = _tagging_test( + hrflow_client=hrflow_client, + algorithm_key=TAGGING_ALGORITHM.TAGGER_HRFLOW_LABELS, + texts=TAGGING_TEXTS, + context=( + "The CDI is a Contrat à Durée Indeterminée - essentially an open-ended or" + " permanent employment contract. The CDD is a Contrat à Durée Determinée -" + " a fixed-term or temporary employment contract. These are the two most" + " common types but by no means the only form of French employment contract." + " The contracts have to be drawn up by the employer, who must ensure that" + " it's legally the correct type for the circumstances." + ), + labels=["CDI", "CDD"], + ) + assert model.data[0].tags[0] == "CDI" + assert model.data[1].tags[0] == "CDD" + + +@pytest.mark.text +@pytest.mark.tagging +def test_tagger_hrflow_labels_no_context(hrflow_client): + model = _tagging_test( + hrflow_client=hrflow_client, + algorithm_key=TAGGING_ALGORITHM.TAGGER_HRFLOW_LABELS, + texts=[ + ( + "In the quantum gardens of knowledge, she cultivates algorithms," + " weaving threads of brilliance through the binary blooms, a sorceress" + " of AI enchantment." + ), + ( + "In the neural realms of innovation, he navigates the data currents," + " sculpting insights from the digital ether, a virtuoso of AI" + " exploration." + ), + ], + labels=["male", "female"], + ) + assert model.data[0].tags[0] == "female" + assert model.data[1].tags[0] == "male" + + +@pytest.mark.text +@pytest.mark.ocr +def test_ocr_basic(hrflow_client): + s3_url = """https://riminder-documents-eu-2019-12.s3-eu-west-1.amazonaws.com/\ +teams/fc9d40fd60e679119130ea74ae1d34a3e22174f2/sources/07065e555609a231752a586afd6\ +495c951bbae6b/profiles/52e3c23a5f21190c59f53c41b5630ecb5d414f94/parsing/resume.pdf""" + file = _file_get(s3_url, "ocr") + assert file is not None + model = TextOCRResponse.model_validate(hrflow_client.text.ocr.post(file=file)) + assert model.code == requests.codes.ok + assert "ocr" in model.message.lower() + + +@pytest.mark.text +@pytest.mark.parsing +def test_parsing_basic(hrflow_client): + texts = ["John Doe can be contacted on john.doe@hrflow.ai"] + model = TextParsingResponse.model_validate( + hrflow_client.text.parsing.post(texts=texts) + ) + assert model.code == requests.codes.ok + assert len(model.data) == len(texts) diff --git a/tests/utils/enums.py b/tests/utils/enums.py new file mode 100644 index 0000000..48a1bb4 --- /dev/null +++ b/tests/utils/enums.py @@ -0,0 +1,16 @@ +from enum import Enum + + +class TAGGING_ALGORITHM(str, Enum): + TAGGER_ROME_FAMILY = "tagger-rome-family" + TAGGER_ROME_SUBFAMILY = "tagger-rome-subfamily" + TAGGER_ROME_CATEGORY = "tagger-rome-category" + TAGGER_ROME_JOBTITLE = "tagger-rome-jobtitle" + TAGGER_HRFLOW_SKILLS = "tagger-hrflow-skills" + TAGGER_HRFLOW_LABELS = "tagger-hrflow-labels" + + +class PERMISSION(str, Enum): + ALL = "all" + WRITE = "write" + READ = "read" diff --git a/tests/utils/schemas.py b/tests/utils/schemas.py new file mode 100644 index 0000000..e63a76d --- /dev/null +++ b/tests/utils/schemas.py @@ -0,0 +1,641 @@ +import typing as t +from math import ceil + +from pydantic import ( + BaseModel, + confloat, + conint, + conlist, + constr, + field_validator, + model_validator, +) +from pytest import fail + +from hrflow.hrflow.utils import KEY_REGEX + +from .enums import PERMISSION + + +class HrFlowAPIResponse(BaseModel): + code: conint(ge=100, le=599) + message: str + model_config: t.Dict = dict(validate_assignment=True) + + +class Pagination(BaseModel): + page: conint(ge=0) + maxPage: conint(ge=0) + count: conint(ge=0) + total: conint(ge=0) + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + page = values.get("page") + max_page = values.get("maxPage") + count = values.get("count") + total = values.get("total") + per_page = total / max_page + + assert page <= max_page + + if page == max_page: + assert count <= per_page + else: + assert ceil(total / count) == max_page + assert count >= per_page + + return values + + +class HrFlowAPIResponseWithPagination(HrFlowAPIResponse): + meta: Pagination + + +# Text API + + +class TextImagingData(BaseModel): + image_url: str + + +class TextImagingResponse(HrFlowAPIResponse): + data: t.Optional[TextImagingData] = None + + +class TextEmbeddingDataItem(BaseModel): + embedding: conlist(float, min_length=2048, max_length=2048) + + +class TextEmbeddingResponse(HrFlowAPIResponse): + data: t.Optional[t.List[TextEmbeddingDataItem]] = None + + +_LINKING_DATA_ITEM_TYPE = conlist(t.Any, min_length=2, max_length=2) +_LINKING_DATA_TYPE = t.List[_LINKING_DATA_ITEM_TYPE] + + +class TextLinkingResponse(HrFlowAPIResponse): + data: t.Optional[_LINKING_DATA_TYPE] = None + + @field_validator("data") + @classmethod + def _check_data(cls, data: _LINKING_DATA_TYPE) -> _LINKING_DATA_TYPE: + assert all( + isinstance(item[0], str) + and isinstance(item[1], (int, float)) + and item[1] >= 0 + and item[1] <= 1 + for item in data + ) + return data + + +class TextTaggingDataItem(BaseModel): + ids: t.List[str] + predictions: t.List[float] + tags: t.List[str] + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.List[t.Any]]) -> t.Dict[str, t.List[t.Any]]: + if isinstance(values, list): + return [cls._check(item) for item in values] + li = len(values.get("ids")) + lp = len(values.get("predictions")) + lt = len(values.get("tags")) + assert li == lp == lt or fail( + f"len(ids)={li} is expected to be same as len(predictions)={lp} and same as" + f" len(tags)={lt}" + ) + return values + + +class TextTaggingReponse(HrFlowAPIResponse): + data: t.Optional[t.Union[t.List[TextTaggingDataItem], TextTaggingDataItem]] = None + + +class TextParsingDataItemEntity(BaseModel): + start: conint(ge=0) + end: conint(ge=0) + label: str + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + start = values.get("start") + end = values.get("end") + assert start <= end or fail( + f"start={start} is expected to be smaller than end={end}" + ) + return values + + +class TextParsingDataItemParsing(BaseModel): + certifications: t.List[str] + companies: t.List[str] + courses: t.List[str] + dates: t.List[str] + durations: t.List[str] + education_titles: t.List[str] + emails: t.List[str] + first_names: t.List[str] + interests: t.List[str] + job_titles: t.List[str] + languages: t.List[str] + last_names: t.List[str] + locations: t.List[str] + phones: t.List[str] + schools: t.List[str] + skills_hard: t.List[str] + skills_soft: t.List[str] + tasks: t.List[str] + + +class TextParsingDataItem(BaseModel): + entities: t.List[TextParsingDataItemEntity] + parsing: TextParsingDataItemParsing + text: str + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + text = values.get("text") + entities = values.get("entities") + parsing = values.get("parsing") + + for entity in entities: + parsing_key_name = entity["label"] + + # entity label to parsing object key name + if parsing_key_name.startswith("skill"): + parsing_key_name = "skills" + parsing_key_name[:5] + elif parsing_key_name == "company": + parsing_key_name = "companies" + else: # most of them + parsing_key_name += "s" + + parsed = text[entity["start"] : entity["end"]] + holder = parsing[parsing_key_name] + + assert parsed in holder or fail( + f"parsed='{parsed}' is expected to be in holder='{holder}'" + ) + + return values + + +class TextParsingResponse(HrFlowAPIResponse): + data: t.Optional[t.List[TextParsingDataItem]] = None + + +class TextOCRDataItemPage(BaseModel): + page_number: conint(ge=0) + sections: t.List[str] + + +_BASE64_PDF_TYPE = constr(pattern=r"^[A-Za-z0-9+/]*={0,2}$", strict=True) + + +class TextOCRDataItem(BaseModel): + text_language: str + text: str + pages: t.List[TextOCRDataItemPage] + base64_pdf: _BASE64_PDF_TYPE + + +class TextOCRResponse(HrFlowAPIResponse): + data: t.Optional[TextOCRDataItem] + + +# Auth API + + +class AuthResponseData(BaseModel): + team_name: str + team_subdomain: str + request_origin: t.Optional[str] = None + api_key_permission: PERMISSION + + +class AuthResponse(HrFlowAPIResponse): + data: t.Optional[AuthResponseData] = None + + +# HrFlow.ai object definitions + + +class Board(BaseModel): + key: constr(pattern=KEY_REGEX) + name: str + type: str + subtype: str + environment: str + + +class Fields(BaseModel): + category: t.Optional[str] = None + city: t.Optional[str] = None + city_district: t.Optional[str] = None + country: t.Optional[str] = None + country_region: t.Optional[str] = None + entrance: t.Optional[str] = None + house: t.Optional[str] = None + house_number: t.Optional[str] = None + island: t.Optional[str] = None + level: t.Optional[str] = None + near: t.Optional[str] = None + po_box: t.Optional[str] = None + postcode: t.Optional[str] = None + road: t.Optional[str] = None + staircase: t.Optional[str] = None + state: t.Optional[str] = None + state_district: t.Optional[str] = None + suburb: t.Optional[str] = None + text: t.Optional[str] = None + unit: t.Optional[str] = None + world_region: t.Optional[str] = None + + +class Location(BaseModel): + text: t.Optional[str] = None + lat: t.Optional[confloat(ge=-90, le=90)] = None + lng: t.Optional[confloat(ge=-180, le=180)] = None + gmaps: t.Optional[str] = None + fields: t.Optional[ + t.Union[ + Fields, + conlist( + t.Any, + min_length=0, + max_length=0, + ), + ] + ] = None + + +class Section(BaseModel): + name: t.Optional[str] = None + title: t.Optional[str] = None + description: t.Optional[str] = None + + +class GeneralEntity(BaseModel): + name: t.Optional[str] = None + value: t.Optional[str] = None + + +class Skill(GeneralEntity): + type: t.Optional[str] = None + + +class Language(GeneralEntity): + pass + + +class Certification(GeneralEntity): + pass + + +class Course(GeneralEntity): + pass + + +class Task(GeneralEntity): + pass + + +class Tag(GeneralEntity): + pass + + +class Metadata(GeneralEntity): + pass + + +class Interest(GeneralEntity): + pass + + +class RangeFloat(BaseModel): + name: t.Optional[str] = None + value_min: t.Optional[float] = None + value_max: t.Optional[float] = None + unit: t.Optional[str] = None + + +class RangeDate(BaseModel): + name: t.Optional[str] = None + value_min: t.Optional[str] = None + value_max: t.Optional[str] = None + + +class Job(BaseModel): + id: conint(ge=0) + key: t.Optional[constr(pattern=KEY_REGEX)] = None + reference: t.Optional[str] = None + board_key: str + board: Board + name: t.Optional[str] = None + url: t.Optional[str] = None + summary: t.Optional[str] = None + location: t.Optional[Location] = None + updated_at: t.Optional[str] = None + created_at: t.Optional[str] = None + sections: t.Optional[t.List[Section]] = None + skills: t.Optional[t.List[Skill]] = None + languages: t.Optional[t.List[Language]] = None + certifications: t.Optional[t.List[Certification]] = None + courses: t.Optional[t.List[Course]] = None + tasks: t.Optional[t.List[Task]] = None + tags: t.Optional[t.List[Tag]] = None + metadatas: t.Optional[t.List[Metadata]] = None + ranges_float: t.Optional[t.List[RangeFloat]] = None + ranges_date: t.Optional[t.List[RangeDate]] = None + culture: t.Optional[str] = None + benefits: t.Optional[str] = None + responsibilities: t.Optional[str] = None + requirements: t.Optional[str] = None + interviews: t.Optional[str] = None + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + board_key = values.get("board_key") + board = values.get("board") + assert board_key == board["key"] or fail( + f"job.board_key='{board_key}' is expected to be the same as" + f" job.board.key='{board['key']}'" + ) + return values + + +class Source(Board): # same fields + pass + + +class Url(BaseModel): + type: t.Optional[str] = None + url: t.Optional[str] = None + + +class Info(BaseModel): + full_name: t.Optional[str] = None + first_name: t.Optional[str] = None + last_name: t.Optional[str] = None + email: t.Optional[str] = None + phone: t.Optional[str] = None + driving_license: t.Optional[str] = None + date_birth: t.Optional[str] = None + location: t.Optional[Location] = None + urls: t.Optional[t.List[Url]] = None + picture: t.Optional[str] = None + gender: t.Optional[str] = None + summary: t.Optional[str] = None + + +class E(BaseModel): + key: t.Optional[constr(pattern=KEY_REGEX)] = None + logo: t.Optional[str] = None + title: t.Optional[str] = None + description: t.Optional[str] = None + location: t.Optional[Location] = None + date_start: t.Optional[str] = None + date_end: t.Optional[str] = None + skills: t.Optional[t.List[Skill]] = None + certifications: t.Optional[t.List[Certification]] = None + courses: t.Optional[t.List[Course]] = None + tasks: t.Optional[t.List[Task]] = None + languages: t.Optional[t.List[Language]] = None + interests: t.Optional[t.List[Interest]] = None + + +class Experience(E): + company: t.Optional[str] = None + + +class Education(E): + school: t.Optional[str] = None + + +class Attachment(BaseModel): + type: t.Optional[str] = None + alt: t.Optional[constr(pattern=KEY_REGEX)] + file_size: t.Optional[conint(ge=0)] = None + file_name: t.Optional[str] = None + original_file_name: t.Optional[str] = None + extension: t.Optional[str] = None + public_url: t.Optional[str] = None + updated_at: t.Optional[str] = None + created_at: t.Optional[str] = None + + +class Profile(BaseModel): + id: t.Optional[conint(ge=0)] = None + key: t.Optional[constr(pattern=KEY_REGEX)] = None + reference: t.Optional[str] = None + source_key: str + source: Source + updated_at: t.Optional[str] = None + created_at: t.Optional[str] = None + info: t.Optional[Info] = None + text_language: t.Optional[str] = None + text: t.Optional[str] = None + experiences_duration: t.Optional[confloat(ge=0)] = None + educations_duration: t.Optional[confloat(ge=0)] = None + experiences: t.Optional[t.List[Experience]] = None + educations: t.Optional[t.List[Education]] = None + skills: t.Optional[t.List[Skill]] = None + languages: t.Optional[t.List[Language]] = None + certifications: t.Optional[t.List[Certification]] = None + courses: t.Optional[t.List[Course]] = None + tasks: t.Optional[t.List[Task]] = None + interests: t.Optional[t.List[Interest]] = None + tags: t.Optional[t.List[Tag]] = None + metadatas: t.Optional[t.List[Metadata]] = None + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + source_key = values["source_key"] + source = values["source"] + assert source_key == source["key"] or fail( + f"profile.source_key='{source_key}' is expected to be the same as" + f" profile.source.key='{source['key']}'" + ) + return values + + +# Utils to factorise searching (or scoring) jobs (or profiles) responses + + +def _validate_searching_result(values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + """ + Verifies if the returned number of jobs (or profiles) matches the value declared + in the `meta.count` field. + + Args: + values (dict): The dumped jobs (or profiles) searching response Pydantic + object. + + Returns: + The `values` if the response passes the test; otherwise, the test will be + skipped. + """ + + expected = values["meta"]["count"] + objects_key = "jobs" if "jobs" in values["data"] else "profiles" + actual = len(values["data"][objects_key]) + + assert actual == expected or fail( + f"len(data.{objects_key})={actual} is expected to be same as" + f" model.count={expected}" + ) + + return values + + +def _validate_scoring_result(values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + """ + Verifies if the returned number of jobs (or profiles) AND predictions matches the + value declared in the `meta.count` field. + + Args: + values (dict): The dumped jobs (or profiles) scoring response Pydantic + object. + + Returns: + The `values` if the response passes the test; otherwise, the test will be + skipped. + """ + + expected = values["meta"]["count"] + predictions_amount = len(values["data"]["predictions"]) + objects_key = "jobs" if "jobs" in values["data"] else "profiles" + objects_amount = len(values["data"][objects_key]) + + assert objects_amount == predictions_amount == expected or fail( + f"len(data.predictions)={predictions_amount} and" + f" len(data.{objects_key})={objects_amount} are expected to be the same as" + f" meta.count={expected}" + ) + + return values + + +# Job API + + +class JobIndexingResponse(HrFlowAPIResponse): + data: t.Optional[Job] + + +class JobsSearchingData(BaseModel): + jobs: t.List[Job] + + +class JobsSearchingResponse(HrFlowAPIResponseWithPagination): + data: JobsSearchingData + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + return _validate_searching_result(values) + + +class JobsScoringData(BaseModel): + predictions: t.List[conlist(confloat(ge=0, le=1), min_length=2, max_length=2)] + jobs: t.List[Job] + + +class JobsScoringResponse(HrFlowAPIResponseWithPagination): + data: JobsScoringData + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + return _validate_scoring_result(values) + + +class JobAskingResponse(HrFlowAPIResponse): + data: t.Optional[t.List[str]] = None + + +class JobArchiveData(BaseModel): + key: constr(pattern=KEY_REGEX) + + +class JobArchiveResponse(HrFlowAPIResponse): + data: t.Optional[JobArchiveData] = None + + +# Profile API + + +class ProfileIndexingResponse(HrFlowAPIResponse): + data: t.Optional[Profile] = None + + +class ProfileParsingFileDataItem(BaseModel): + profile: t.Optional[Profile] = None + + +class ProfileParsingFileResponse(HrFlowAPIResponse): + data: t.Optional[ + t.Union[ + ProfileParsingFileDataItem, + conlist( # for async + t.Any, + min_length=0, + max_length=0, + ), + ] + ] = None + + +class ProfilesSearchingData(BaseModel): + profiles: t.List[Profile] + + +class ProfilesSearchingResponse(HrFlowAPIResponseWithPagination): + data: ProfilesSearchingData + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + return _validate_searching_result(values) + + +class ProfilesScoringData(BaseModel): + predictions: t.List[conlist(confloat(ge=0, le=1), min_length=2, max_length=2)] + profiles: t.List[Profile] + + +class ProfilesScoringResponse(HrFlowAPIResponseWithPagination): + data: ProfilesScoringData + + @model_validator(mode="before") + @classmethod + def _check(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + return _validate_scoring_result(values) + + +class ProfileAskingResponse(HrFlowAPIResponse): + data: t.Optional[t.List[str]] = None + + +class ProfileUnfoldingData(BaseModel): + experiences: t.List[Experience] + + +class ProfileUnfoldingResponse(HrFlowAPIResponse): + data: t.Optional[ProfileUnfoldingData] = None + + +class ProfileArchieveData(BaseModel): + key: constr(pattern=KEY_REGEX) + + +class ProfileArchiveResponse(HrFlowAPIResponse): + data: t.Optional[ProfileArchieveData] = None diff --git a/tests/utils/tools.py b/tests/utils/tools.py new file mode 100644 index 0000000..cff0ce0 --- /dev/null +++ b/tests/utils/tools.py @@ -0,0 +1,200 @@ +import os +import typing as t +from datetime import datetime, timezone + +import requests +from dotenv import load_dotenv +from pydantic import BaseModel +from pytest import fail, skip + +from hrflow import Hrflow + +from .schemas import JobIndexingResponse, ProfileIndexingResponse + +_env_loaded = False + + +def _var_from_env_get(varname: str) -> str: + """ + Gets the value of the specified variable (`varname`) from the environment. + + Args: + varname (str): The name of the variable to retrieve. + + Returns: + The value corresponding to `varname` in the environment if found; otherwise, + the test calling this function will be skipped. + """ + + # this allows to load the environment once + global _env_loaded + if not _env_loaded: + load_dotenv() + _env_loaded = True + + value = os.environ.get(varname) + if value is None: + skip(f"{varname} was not found in the environment") + + return value + + +def _now_iso8601_get() -> str: + return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S%z") + + +def _iso8601_to_datetime(datestr: str) -> t.Optional[datetime]: + try: + return datetime.fromisoformat(datestr) + except Exception: + pass + + +def _file_get(url: str, file_name: t.Optional[str] = None) -> t.Optional[bytes]: + """ + Gets the file corresponding to the specified `url`. If tests/assets/`file_name` + does not exist, it will be downloaded from `url` and stored for reuse, basically, + it will be locally cached. + + Args: + url (str): The download URL of the file. + file_name (optional[str]): The name to assign to the file. + + Returns: + The content of the file if it exists; otherwise, returns `None`. + """ + + if file_name is None: # deduce from the url + file_name = url[url.rfind("/") + 1 :] + + # look up for its cached version + dir_path = "tests/assets" + file_path = os.path.join(dir_path, file_name) + if os.path.isfile(file_path): + with open(file_path, "rb") as file: + return file.read() + + response = requests.get(url) + + if response.status_code != requests.codes.ok: + return + + file_data = response.content + + if not os.path.isdir(dir_path): + os.mkdir(dir_path) + + # cache the content + with open(file_path, "wb+") as file: + file.write(file_data) + + return file_data + + +def _check_same_keys_equality(source: t.Dict[str, t.Any], target: BaseModel): + """ + Performs a shallow equality check between the keys at the same levels in the + dictionaries `source` and `target`. + + Args: + source (dict): The dictionary from which `target` was derived, using indexing + or other methods. + target (BaseModel): The Pydantic response object corresponding to `source`. + + Returns: + None + """ + + dumped = target.model_dump() # easier to compare dict vs dict + + def _fail_message_get(key, source_value, target_value, is_complex=False): + return ( + f"{'complex' if is_complex else 'primitive'} comparison failed: '{key}' is" + f" expected to be '{source_value}', got '{target_value}'" + ) + + def _compare(source: t.Dict[str, t.Any], target: t.Dict[str, t.Any]): + for key in source: + + # compare only the keys that are present in both AND at the same level + if key not in target: + continue + + source_value = source[key] + target_value = target[key] + source_value_t = type(source_value) + target_value_t = type(target_value) + + # type comparison + if source_value_t != target_value_t: + fail( + f"'{key}' expected to be of type '{source_value_t}', got" + f" '{target_value_t}'" + ) + + # list vs list comparison + # lists are expected to be homogenous + # if list type is dict, each item will be compared with a recursive call + # otherwise, perform basic python comparison == + elif isinstance(source_value, list): + + # both lists must be of same size + assert len(source_value) == len(target_value) or fail( + f"'{key}' is expected to be of length {len(source_value)}, but it" + f" is {len(target_value)}" + ) + + if len(source_value) == 0: + continue + + if isinstance(source_value[0], dict): + for ii in range(len(source_value)): + _compare(source_value[ii], target_value[ii]) + else: # basic python comparisong should be enough + assert source_value == target_value or fail( + _fail_message_get( + key, source_value, target_value, is_complex=True + ) + ) + + # recursive call + elif isinstance(source_value, dict): + _compare(source_value, target_value) + + # basic python comparison for primitive types + else: + assert source_value == target_value or fail( + _fail_message_get(key, source_value, target_value, is_complex=False) + ) + + _compare(source, dumped) + + +def _indexed_response_get( + hf: Hrflow, holder_key: str, json: t.Dict[str, t.Any] +) -> t.Union[JobIndexingResponse, ProfileIndexingResponse]: + """ + Abstract function for indexing one-time jobs (or profiles). This function is + primarily used to avoid dependencies on specific object keys when performing + tasks such as archiving or editing. + + Args: + hf (Hrflow): The Hrflow API class. + holder_key (str): The key of the board (or source). + json (dict): The JSON object of the job (or source). + + Returns: + The response Pydantic class. + """ + + is_job = "info" not in json + + model = (JobIndexingResponse if is_job else ProfileIndexingResponse).model_validate( + getattr(hf, "job" if is_job else "profile").storing.add_json(holder_key, json) + ) + + assert ( + model.code == requests.codes.created + ), f"{model.code=} != {requests.codes.created=}, {model.message=}" + + return model