From 52d4f4a762822031ddbecf82773283d4c112c98f Mon Sep 17 00:00:00 2001 From: Davide Canton Date: Sat, 24 Aug 2024 15:59:31 +0200 Subject: [PATCH 1/2] Report correct outcome in the replay file (#64) --- src/pytest_replay/__init__.py | 26 ++++++++------- tests/test_replay.py | 60 +++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 11 deletions(-) diff --git a/src/pytest_replay/__init__.py b/src/pytest_replay/__init__.py index 93e4638..742fcfd 100644 --- a/src/pytest_replay/__init__.py +++ b/src/pytest_replay/__init__.py @@ -53,7 +53,8 @@ def __init__(self, config): skip_cleanup = config.getoption("skip_cleanup", False) if not skip_cleanup: self.cleanup_scripts() - self.node_start_time = dict() + self.node_start_time = {} + self.node_outcome = {} self.session_start_time = config.replay_start_time def cleanup_scripts(self): @@ -88,16 +89,19 @@ def pytest_runtest_logstart(self, nodeid): def pytest_runtest_makereport(self, item): report = yield result = report.get_result() - if self.dir and result.when == "teardown": - json_content = json.dumps( - { - "nodeid": item.nodeid, - "start": self.node_start_time[item.nodeid], - "finish": time.perf_counter() - self.session_start_time, - "outcome": result.outcome, - } - ) - self.append_test_to_script(item.nodeid, json_content) + if self.dir: + if result.when == "call": + self.node_outcome[item.nodeid] = result.outcome + elif result.when == "teardown": + json_content = json.dumps( + { + "nodeid": item.nodeid, + "start": self.node_start_time[item.nodeid], + "finish": time.perf_counter() - self.session_start_time, + "outcome": self.node_outcome.pop(item.nodeid), + } + ) + self.append_test_to_script(item.nodeid, json_content) def pytest_collection_modifyitems(self, items, config): replay_file = config.getoption("replay_file") diff --git a/tests/test_replay.py b/tests/test_replay.py index 6bb18b2..3ff3d39 100644 --- a/tests/test_replay.py +++ b/tests/test_replay.py @@ -1,5 +1,6 @@ import itertools as it import json +import re import pytest @@ -270,3 +271,62 @@ def test_filter_out_tests_not_in_file(testdir): ], consecutive=True, ) + + +def test_replay_file_outcome_is_correct(testdir): + """Tests that the outcomes in the replay file are correct.""" + testdir.makepyfile( + test_module=""" + def test_success(): + pass + + def test_failure(): + assert False + """ + ) + dir = testdir.tmpdir / "replay" + result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}") + assert result.ret != 0 + + contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()] + assert len(contents) == 4 + + assert "test_success" in contents[1]["nodeid"] + assert contents[1]["outcome"] == "passed" + + assert "test_failure" in contents[3]["nodeid"] + assert contents[3]["outcome"] == "failed" + + +def test_replay_file_outcome_is_correct_xdist(testdir): + """Tests that the outcomes in the replay file are correct when running in parallel.""" + testdir.makepyfile( + test_module=""" + import pytest + + @pytest.mark.parametrize('i', range(10)) + def test_val(i): + assert i < 5 + """ + ) + dir = testdir.tmpdir / "replay" + procs = 2 + result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}", f"-n {procs}") + assert result.ret != 0 + + contents = [ + s + for n in range(procs) + for s in (dir / f".pytest-replay-gw{n}.txt").read().splitlines() + ] + pattern = re.compile(r"test_val\[(\d+)\]") + for content in contents: + parsed = json.loads(content) + if "outcome" not in parsed: + continue + + i = int(pattern.search(parsed["nodeid"]).group(1)) + if i < 5: + assert parsed["outcome"] == "passed", i + else: + assert parsed["outcome"] == "failed", i From 9ed915ab5bc19adeb21e39aaef0acbd8e20947b6 Mon Sep 17 00:00:00 2001 From: Davide Canton Date: Tue, 27 Aug 2024 09:58:58 +0200 Subject: [PATCH 2/2] Applied suggestions from review and added test for outcomes (#64) --- src/pytest_replay/__init__.py | 7 ++- tests/test_replay.py | 87 ++++++++++++++++++++++++++++++++--- 2 files changed, 85 insertions(+), 9 deletions(-) diff --git a/src/pytest_replay/__init__.py b/src/pytest_replay/__init__.py index 742fcfd..4b350ba 100644 --- a/src/pytest_replay/__init__.py +++ b/src/pytest_replay/__init__.py @@ -90,9 +90,12 @@ def pytest_runtest_makereport(self, item): report = yield result = report.get_result() if self.dir: - if result.when == "call": + current = self.node_outcome.setdefault(item.nodeid, result.outcome) + if not result.passed and current != "failed": + # do not overwrite a failed outcome with a skipped one self.node_outcome[item.nodeid] = result.outcome - elif result.when == "teardown": + + if result.when == "teardown": json_content = json.dumps( { "nodeid": item.nodeid, diff --git a/tests/test_replay.py b/tests/test_replay.py index 3ff3d39..63c17cc 100644 --- a/tests/test_replay.py +++ b/tests/test_replay.py @@ -277,11 +277,28 @@ def test_replay_file_outcome_is_correct(testdir): """Tests that the outcomes in the replay file are correct.""" testdir.makepyfile( test_module=""" + import pytest + def test_success(): pass def test_failure(): assert False + + @pytest.fixture + def failing_teardown_fixture(): + yield + assert False + + def test_failure_fixture_teardown(failing_teardown_fixture): + assert True + + @pytest.fixture + def failing_setup_fixture(): + assert False + + def test_failure_fixture_setup(failing_setup_fixture): + assert True """ ) dir = testdir.tmpdir / "replay" @@ -289,13 +306,13 @@ def test_failure(): assert result.ret != 0 contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()] - assert len(contents) == 4 - - assert "test_success" in contents[1]["nodeid"] - assert contents[1]["outcome"] == "passed" - - assert "test_failure" in contents[3]["nodeid"] - assert contents[3]["outcome"] == "failed" + outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r} + assert outcomes == { + "test_module.py::test_success": "passed", + "test_module.py::test_failure": "failed", + "test_module.py::test_failure_fixture_teardown": "failed", + "test_module.py::test_failure_fixture_setup": "failed", + } def test_replay_file_outcome_is_correct_xdist(testdir): @@ -330,3 +347,59 @@ def test_val(i): assert parsed["outcome"] == "passed", i else: assert parsed["outcome"] == "failed", i + + +def test_outcomes_in_replay_file(testdir): + """Tests that checks how the outcomes are handled in the report hook when the various + phases yield failure or skipped.""" + testdir.makepyfile( + test_module=""" + import pytest + + @pytest.fixture() + def skip_setup(): + pytest.skip("skipping") + yield + + @pytest.fixture() + def skip_teardown(): + yield + pytest.skip("skipping") + + @pytest.fixture() + def fail_setup(): + assert False + + @pytest.fixture() + def fail_teardown(): + yield + assert False + + def test_skip_fail(skip_setup, fail_teardown): + pass + + def test_fail_skip(fail_setup, skip_teardown): + pass + + def test_skip_setup(skip_setup): + pass + + def test_skip_teardown(skip_teardown): + pass + + def test_test_fail_skip_teardown(skip_teardown): + assert False + """ + ) + dir = testdir.tmpdir / "replay" + testdir.runpytest_subprocess(f"--replay-record-dir={dir}") + + contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()] + outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r} + assert outcomes == { + "test_module.py::test_skip_fail": "skipped", + "test_module.py::test_fail_skip": "failed", + "test_module.py::test_skip_setup": "skipped", + "test_module.py::test_skip_teardown": "skipped", + "test_module.py::test_test_fail_skip_teardown": "failed", + }