From 7caea073204dd5935ea226c8fd35a31cddf416c8 Mon Sep 17 00:00:00 2001 From: Tiko Date: Mon, 12 Feb 2024 00:18:05 +0100 Subject: [PATCH 1/2] fix artifacts assignment lifecycle --- README.md | 4 ++-- analyzer/analyzer.py | 14 +++++++++----- analyzer/testItem.py | 2 +- analyzer/testRunConfig.py | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 9224dd0..aea8838 100644 --- a/README.md +++ b/README.md @@ -110,7 +110,7 @@ def page(context, request): artifact_url = pytest.s3_connector.upload_file(screenshot_path, filename) # or # artifact_url = pytest.s3_connector.upload_file_object(file_bytes, key, bucket_name) - request.node.testomatio = {"artifacts": [artifact_url]} + request.node.stash["artifact_urls"] = [artifact_url] page.close() ``` @@ -121,7 +121,7 @@ If you prefer to use pytest hooks - add `pytest_runtest_makereport` hook in your ```python def pytest_runtest_makereport(item, call): artifact_url = pytest.s3_connector.upload_file(screenshot_path, filename) - item.testomatio = {"artifacts": [artifact_url]} + item.stash["artifact_urls"] = [artifact_url] ``` Eny environments used in test run. Should be placed in comma separated list, NO SPACES ALLOWED. diff --git a/analyzer/analyzer.py b/analyzer/analyzer.py index 65d0a7b..0eda4fa 100644 --- a/analyzer/analyzer.py +++ b/analyzer/analyzer.py @@ -150,7 +150,7 @@ def pytest_runtest_makereport(item: Item, call: CallInfo): 'message': None, 'stack': None, 'example': None, - 'artifacts': None, + 'artifacts': test_item.artifacts, 'steps': None, 'code': None, } @@ -179,8 +179,12 @@ def pytest_runtest_makereport(item: Item, call: CallInfo): else: request['example'] = 'object' # to avoid json serialization error - if request['status']: - pytest.analyzer_test_run_config.status_request.append(request) + if item.nodeid not in pytest.analyzer_test_run_config.status_request: + pytest.analyzer_test_run_config.status_request[item.nodeid] = request + else: + for key, value in request.items(): + if value is not None: + pytest.analyzer_test_run_config.status_request[item.nodeid][key] = value def pytest_runtest_logfinish(nodeid, location): @@ -189,11 +193,11 @@ def pytest_runtest_logfinish(nodeid, location): elif not pytest.analyzer_test_run_config.test_run_id: return - for request in pytest.analyzer_test_run_config.status_request: + for nodeid, request in pytest.analyzer_test_run_config.status_request.items(): if request['status']: connector = pytest.connector connector.update_test_status(run_id=pytest.analyzer_test_run_config.test_run_id, **request) - pytest.analyzer_test_run_config.status_request = [] + pytest.analyzer_test_run_config.status_request = {} def pytest_sessionfinish(session: Session, exitstatus: int): diff --git a/analyzer/testItem.py b/analyzer/testItem.py index f59c673..d3a7d15 100644 --- a/analyzer/testItem.py +++ b/analyzer/testItem.py @@ -20,7 +20,7 @@ def __init__(self, item: Item): # straitforward way, does not work with test packages # self.source_code = getsource(item.function) self.class_name = item.cls.__name__ if item.cls else None - self.artifacts = item.testomatio.get('artifacts', []) if hasattr(item, 'testomatio') else [] + self.artifacts = item.stash.get("artifact_urls", []) def to_dict(self) -> dict: result = dict() diff --git a/analyzer/testRunConfig.py b/analyzer/testRunConfig.py index 09d8aac..0d37d5d 100644 --- a/analyzer/testRunConfig.py +++ b/analyzer/testRunConfig.py @@ -11,7 +11,7 @@ def __init__(self, title: str = None, self.environment = environment self.group_title = group_title self.parallel = parallel - self.status_request = [] + self.status_request = {} def to_dict(self) -> dict: result = dict() From 3c7bbde984a6d7874db16600cf37b40ca37b60f5 Mon Sep 17 00:00:00 2001 From: Tiko Date: Mon, 12 Feb 2024 08:22:10 +0100 Subject: [PATCH 2/2] update readme --- README.md | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index aea8838..bcc521b 100644 --- a/README.md +++ b/README.md @@ -90,14 +90,29 @@ Artifacts. You would need to decide when and where you want to upload your test artifacts to cloud storage -Using pytest fixtures might be a good choice, ex.: +Upload page screenshot when test fails, using fixtures [reference](https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures) ```python +# content of conftest.py +import pytest +from typing import Dict +from pytest import StashKey, CollectReport +from playwright.sync_api import Page + +phase_report_key = StashKey[Dict[str, CollectReport]]() + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_makereport(item, call): + rep = yield + item.stash.setdefault(phase_report_key, {})[rep.when] = rep + return rep + + @pytest.fixture(scope="function") -def page(context, request): - page = context.new_page() +def handle_artifacts(page: Page, request): yield - if request.node.rep_call.failed: + report = request.node.stash[phase_report_key] + if ("call" not in report) or report["setup"].failed or report["call"].failed: random_string = ''.join(random.choices(string.ascii_letters + string.digits, k=8)) filename = f"{random_string}.png" @@ -168,6 +183,9 @@ def test_example(): ## Change log +### 1.5.0 - Fixes artifacts in fixtures lifecycle +- Earlier, artifacts added in pytest fixtures where scipped by analyser + ### 1.4.0 - Fixes artifacts and test sync with Testomatio - Fixes artifacts uploads - Fixes test id resolution when syncing local test with Testomatio