diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..859d250 --- /dev/null +++ b/.env.example @@ -0,0 +1,2 @@ +TESTOMATIO_URL=https://beta.testomat.io +TESTOMATIO= \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a564dbd..10497e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,27 @@ +## 2.9.0 (2024-12-30) + +### Fix +- support test parameters that comes from the fixtures +- Fix shared runs +- pytestomatio plugin usage with xdist, add tests, sync tests +- Parallel run must be True all the time so that testomatio doesn't create new test runs when update test status +- enforce artifacts to be returning inline when requested +- add_artifacts depends on the pytest node +- Fix uploading artifacts to the bucket with user defined path +- read S3 creads from env acc to the testomatio docs + +### Feat +- upload artifacts in bulk +- resolve content type for uploaded artifacts +- support private and public artifact configuration +- Support --test-id parameters that accepts testomatio test id to filter tests +- send labels and tags on the test run update call +- support HTTP_PROXY, HTTPS_PROXY + +### Refactor +- Smoke tests +- Use system temp folder when resolving concurrent test run with xdist + ## 2.8.1 (2024-08-14) ## 2.8.1rc2 (2024-08-12) diff --git a/README.md b/README.md index 00bc20d..ad5b42d 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,12 @@ def test_example(): - Fix test duration ## Contribution -1. `pip install -e .` -2. `cz commit` -3. `cz bump` -4. `git push remoteName branchName --tags` \ No newline at end of file +Use python 3.12 + +1. `pip install ".[dev]"` +1. `python ./smoke.py` +1. Test things manually +1. Verify no regression bugs +1. `cz commit` +1. `cz bump` +1. `git push remoteName branchName --tags` \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index d6ebce7..572a0ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,10 +10,10 @@ name = "cz_conventional_commits" tag_format = "$version" version_scheme = "pep440" version_provider = "pep621" -update_changelog_on_bump = true +update_changelog_on_bump = false [project] name = "pytestomatio" -version = "2.8.1" +version = "2.9.0" dependencies = [ "requests>=2.29.0", @@ -21,8 +21,7 @@ dependencies = [ "boto3>=1.28.28", "libcst==1.1.0", "commitizen>=3.18.1", - "autopep8>=2.1.0", - "pytest-xdist>=3.6.1" + "autopep8>=2.1.0" ] authors = [ @@ -45,4 +44,19 @@ classifiers = [ "Bug Tracker" = "https://github.com/testomatio/pytestomatio/issues" [project.entry-points.pytest11] -pytestomatio = "pytestomatio.main" \ No newline at end of file +pytestomatio = "pytestomatio.main" + +[project.optional-dependencies] +dev = [ + "pytest>=7.2.0", + "pytest-testdox>=2.0.0", + "pytest-xdist==3.6.1", + "python-dotenv==1.0.1", + "toml==0.10.2" +] + +[tool.pytest.ini_options] +testpaths = ["tests"] +markers = [ + "smoke: indicates smoke tests" +] diff --git a/pytestomatio/connect/connector.py b/pytestomatio/connect/connector.py index 781d156..608353d 100644 --- a/pytestomatio/connect/connector.py +++ b/pytestomatio/connect/connector.py @@ -10,7 +10,7 @@ class Connector: - def __init__(self, base_url: str = 'https://app.testomat.io', api_key: str = None): + def __init__(self, base_url: str = '', api_key: str = None): self.base_url = base_url self.session = requests.Session() self.session.verify = True diff --git a/pytestomatio/main.py b/pytestomatio/main.py index 4aaf169..e41c9e5 100644 --- a/pytestomatio/main.py +++ b/pytestomatio/main.py @@ -1,30 +1,42 @@ -import os, pytest, logging, json -import time -from pytest import Parser, Session, Config, Item, CallInfo, hookimpl +import os, pytest, logging, json, time + +from pytest import Parser, Session, Config, Item, CallInfo from pytestomatio.connect.connector import Connector -from pytestomatio.decor.decorator_updater import update_tests -from pytestomatio.testomatio.testRunConfig import TestRunConfig -from pytestomatio.testing.testItem import TestItem from pytestomatio.connect.s3_connector import S3Connector -from .testomatio.testomatio import Testomatio -from pytestomatio.utils.helper import add_and_enrich_tests, get_test_mapping, collect_tests +from pytestomatio.testing.testItem import TestItem +from pytestomatio.decor.decorator_updater import update_tests + +from pytestomatio.utils.helper import add_and_enrich_tests, get_test_mapping, collect_tests, read_env_s3_keys from pytestomatio.utils.parser_setup import parser_options -from pytestomatio.utils import helper from pytestomatio.utils import validations from xdist.plugin import is_xdist_controller, get_xdist_worker_id +from pytestomatio.testomatio.testRunConfig import TestRunConfig +from pytestomatio.testomatio.testomatio import Testomatio +from pytestomatio.testomatio.filter_plugin import TestomatioFilterPlugin + +import pdb + log = logging.getLogger(__name__) log.setLevel('INFO') metadata_file = 'metadata.json' decorator_name = 'testomatio' testomatio = 'testomatio' +TESTOMATIO_URL = 'https://app.testomat.io' def pytest_addoption(parser: Parser) -> None: parser_options(parser, testomatio) +def pytest_collection(session): + """Capture original collected items before any filters are applied.""" + # This hook is called after initial test collection, before other filters. + # We'll store the items in a session attribute for later use. + session._pytestomatio_original_collected_items = [] + + def pytest_configure(config: Config): config.addinivalue_line( "markers", "testomatio(arg): built in marker to connect test case with testomat.io by unique id" @@ -34,11 +46,9 @@ def pytest_configure(config: Config): if option == 'debug': return - is_parallel = config.getoption('numprocesses') is not None - - pytest.testomatio = Testomatio(TestRunConfig(is_parallel)) + pytest.testomatio = Testomatio(TestRunConfig()) - url = config.getini('testomatio_url') + url = os.environ.get('TESTOMATIO_URL') or config.getini('testomatio_url') or TESTOMATIO_URL project = os.environ.get('TESTOMATIO') pytest.testomatio.connector = Connector(url, project) @@ -54,40 +64,33 @@ def pytest_configure(config: Config): run_id = pytest.testomatio.test_run_config.test_run_id if not run_id: run_details = pytest.testomatio.connector.create_test_run(**run.to_dict()) - run_id = run_details.get('uid') - run.save_run_id(run_id) - else: - # for xdist - worker process - do nothing - pass - - + if run_details: + run_id = run_details.get('uid') + run.save_run_id(run_id) + else: + log.error("Failed to create testrun on Testomat.io") + # Mark our pytest_collection_modifyitems hook to run last, + # so that it sees the effect of all built-in and other filters first. + # This ensures we only apply our OR logic after other filters have done their job. + config.pluginmanager.register(TestomatioFilterPlugin(), "testomatio_filter_plugin") +@pytest.hookimpl(tryfirst=True) def pytest_collection_modifyitems(session: Session, config: Config, items: list[Item]) -> None: if config.getoption(testomatio) is None: return - - # Filter by --test-ids if provided - test_ids_option = config.getoption("test_id") - if test_ids_option: - test_ids = test_ids_option.split("|") - # Remove "@" from the start of test IDs if present - test_ids = [test_id.lstrip("@") for test_id in test_ids] - selected_items = [] - deselected_items = [] - - for item in items: - # Check if the test has the marker with the ID we are looking for - for marker in item.iter_markers(name="testomatio"): - marker_id = marker.args[0].strip("@") # Strip "@" from the marker argument - if marker_id in test_ids: - selected_items.append(item) - break - else: - deselected_items.append(item) - items[:] = selected_items - config.hook.pytest_deselected(items=deselected_items) + # Store a copy of all initially collected items (the first time this hook runs) + # The first call to this hook happens before built-in filters like -k, -m fully apply. + # By the time this runs, items might still be unfiltered or only partially filtered. + # To ensure we get the full original list, we use pytest_collection hook above. + if not session._pytestomatio_original_collected_items: + # The initial call here gives us the full collected list of tests + session._pytestomatio_original_collected_items = items[:] + + # At this point, if other plugins or internal filters like -m and -k run, + # they may modify `items` (removing some tests). We run after them by using a hook wrapper + # or a trylast marker to ensure our logic runs after most filters. meta, test_files, test_names = collect_tests(items) match config.getoption(testomatio): @@ -119,15 +122,12 @@ def pytest_collection_modifyitems(session: Session, config: Config, items: list[ if run_details is None: raise Exception('Test run failed to create. Reporting skipped') - artifact = run_details.get('artifacts') - if artifact: - s3_details = helper.read_env_s3_keys(artifact) + s3_details = read_env_s3_keys(run_details) - if all(s3_details): - pytest.testomatio.s3_connector = S3Connector(*s3_details) - pytest.testomatio.s3_connector.login() - else: - pytest.testomatio.s3_connector = S3Connector() + if all(s3_details): + pytest.testomatio.s3_connector = S3Connector(*s3_details) + pytest.testomatio.s3_connector.login() + case 'debug': with open(metadata_file, 'w') as file: data = json.dumps([i.to_dict() for i in meta], indent=4) @@ -136,7 +136,6 @@ def pytest_collection_modifyitems(session: Session, config: Config, items: list[ case _: raise Exception('Unknown pytestomatio parameter. Use one of: add, remove, sync, debug') - def pytest_runtest_makereport(item: Item, call: CallInfo): pytest.testomatio_config_option = item.config.getoption(testomatio) if pytest.testomatio_config_option is None or pytest.testomatio_config_option != 'report': @@ -165,6 +164,7 @@ def pytest_runtest_makereport(item: Item, call: CallInfo): 'code': None, } + # TODO: refactor it and use TestItem setter to upate those attributes if call.when in ['setup', 'call']: if call.excinfo is not None: if call.excinfo.typename == 'Skipped': @@ -178,7 +178,7 @@ def pytest_runtest_makereport(item: Item, call: CallInfo): request['status'] = 'passed' if call.when == 'call' else request['status'] if hasattr(item, 'callspec'): - request['example'] = item.callspec.params + request['example'] = test_item.safe_params(item.callspec.params) if item.nodeid not in pytest.testomatio.test_run_config.status_request: pytest.testomatio.test_run_config.status_request[item.nodeid] = request diff --git a/pytestomatio/testing/testItem.py b/pytestomatio/testing/testItem.py index b9655cc..15ce31e 100644 --- a/pytestomatio/testing/testItem.py +++ b/pytestomatio/testing/testItem.py @@ -95,15 +95,33 @@ def _get_resync_test_title(self, name: str) -> str: else: return name - def _get_test_parameter_key(self, item: Item) -> bool: - params = [] + def _get_test_parameter_key(self, item: Item): + """Return a list of parameter names for a given test item.""" + param_names = set() + + # 1) Look for @pytest.mark.parametrize for mark in item.iter_markers('parametrize'): - is_list = mark.args[0].find(',') > -1 - if is_list: - params.extend([p.strip() for p in mark.args[0].split(',')]) - else: - params.append(mark.args[0]) - return params + # mark.args[0] is often a string like "param1,param2" + # or just "param1" if there's only one. + if len(mark.args) > 0 and isinstance(mark.args[0], str): + arg_string = mark.args[0] + # If the string has commas, split it into multiple names + if ',' in arg_string: + param_names.update(name.strip() for name in arg_string.split(',')) + else: + param_names.add(arg_string.strip()) + + # 2) Look for fixture parameterization (including dynamically generated) + # via callspec, which holds *all* final parameters for an item. + callspec = getattr(item, 'callspec', None) + if callspec: + # callspec.params is a dict: fixture_name -> parameter_value + # We only want fixture names, not the values. + param_names.update(callspec.params.keys()) + + # Return them as a list, or keep it as a set—whatever you prefer. + return list(param_names) + def _resolve_parameter_key_in_test_name(self, item: Item, test_name: str) -> str: test_params = self._get_test_parameter_key(item) @@ -120,25 +138,36 @@ def _resolve_parameter_key_in_test_name(self, item: Item, test_name: str) -> str def _resolve_parameter_value_in_test_name(self, item: Item, test_name: str) -> str: param_keys = self._get_test_parameter_key(item) sync_title = self._get_sync_test_title(item) + if not param_keys: return test_name if not item.callspec: return test_name - + pattern = r'\$\{(.*?)\}' def repl(match): key = match.group(1) - value = item.callspec.params.get(key, '') - if type(value) is bytes: - string_value = value.decode('utf-8') - elif isinstance(value, (str, int, float, bool)): - string_value = str(value) - else: - string_value = 'Unsupported type' + + string_value = self._to_string_value(value) # TODO: handle "value with space" on testomatio BE https://github.com/testomatio/check-tests/issues/147 - return sub(r"[\.\s]", "_", string_value) # Temporary fix for spaces in parameter values + return sub(r"[\.\s]", "_", string_value) # Temporary fix for spaces in parameter values test_name = sub(pattern, repl, sync_title) - return test_name \ No newline at end of file + return test_name + + def _to_string_value(self, value): + if callable(value): + return value.__name__ if hasattr(value, "__name__") else "anonymous_function" + elif isinstance(value, bytes): + return value.decode('utf-8') + elif isinstance(value, (str, int, float, bool)) or value is None: + return str(value) + else: + return str(value) # Fallback to a string representation + + # TODO: leverage as an attribute setter + def safe_params(self, params): + return {key: self._to_string_value(value) for key, value in params.items()} + diff --git a/pytestomatio/testomatio/filter_plugin.py b/pytestomatio/testomatio/filter_plugin.py new file mode 100644 index 0000000..6797a3b --- /dev/null +++ b/pytestomatio/testomatio/filter_plugin.py @@ -0,0 +1,61 @@ +import pytest + +class TestomatioFilterPlugin: + @pytest.hookimpl(trylast=True) + def pytest_collection_modifyitems(self, session, config, items): + # By now all other filters (like -m, -k, name-based) have been applied + # and `items` is the filtered set after all their conditions. + test_ids_str = config.getoption("test_id") + if not test_ids_str: + # No custom IDs specified, nothing to do + return + + test_ids = test_ids_str.split("|") + # Remove "@" from the start of test IDs if present + test_ids = [test_id.lstrip("@T") for test_id in test_ids] + if not test_ids: + return + + # Now let's find all tests that match these test IDs from the original full list. + # We use the originally collected tests to avoid losing tests filtered out by others. + original_items = session._pytestomatio_original_collected_items + testomatio_matched = [] + + for item in original_items: + # Check for testomatio marker + for marker in item.iter_markers(name="testomatio"): + + marker_id = marker.args[0].lstrip("@T") # Strip "@" from the marker argument + if marker_id in test_ids: + testomatio_matched.append(item) + break + + # We'll check common filters: -k, -m and a few others. + # If they are empty or None, they are not active. + + other_filters_active = bool( + config.option.keyword or # -k + config.option.markexpr or # -m + getattr(config.option, 'last_failed', False) or + getattr(config.option, 'ff', False) or + getattr(config.option, 'lf', False) or + False + ) + + if other_filters_active and "not" in config.option.keyword: + # If a "not" keyword filter exist - it means we have exclusion filter applied. + # In such scenario we respect the exclusion filters in a way + # that we accept tests with requested test ids as long as such tests do not fall into exclusion filter + items[:] = [item for item in testomatio_matched if item in items] + return + + if other_filters_active: + # If other filters are applied, use OR logic: + # the final set is all items that passed previous filters plus those matched by test-ids + # preserving original order of test + items[:] = items + [item for item in testomatio_matched if item not in items] + return + + # If no other filters are applied, test-ids filter acts as an exclusive filter: + # only run tests that match the given test IDs + items[:] = testomatio_matched diff --git a/pytestomatio/testomatio/testomatio.py b/pytestomatio/testomatio/testomatio.py index 551cc84..c929e87 100644 --- a/pytestomatio/testomatio/testomatio.py +++ b/pytestomatio/testomatio/testomatio.py @@ -7,9 +7,9 @@ class Testomatio: def __init__(self, test_run_config: TestRunConfig = None, s3_connector: S3Connector = None) -> None: - self.s3_connector: S3Connector or None = s3_connector - self.test_run_config: TestRunConfig or None = test_run_config - self.connector: Connector or None = None + self.s3_connector: S3Connector = s3_connector + self.test_run_config: TestRunConfig = test_run_config + self.connector: Connector = None def upload_file(self, file_path: str, key: str = None, bucket_name: str = None) -> str: if self.test_run_config.test_run_id is None: diff --git a/pytestomatio/utils/parser_setup.py b/pytestomatio/utils/parser_setup.py index dc77d08..6a64821 100644 --- a/pytestomatio/utils/parser_setup.py +++ b/pytestomatio/utils/parser_setup.py @@ -74,4 +74,4 @@ def parser_options(parser: Parser, testomatio='testomatio') -> None: help="Filter tests by Test IDs (e.g., single test id 'T00C73028' or multiply 'T00C73028|T00C73029') """ ) - parser.addini('testomatio_url', 'testomat.io base url', default='https://app.testomat.io') + parser.addini('testomatio_url', 'testomat.io base url') diff --git a/pytestomatio/utils/validations.py b/pytestomatio/utils/validations.py index fe11618..32ce0c7 100644 --- a/pytestomatio/utils/validations.py +++ b/pytestomatio/utils/validations.py @@ -1,6 +1,8 @@ import os from typing import Literal from pytest import Config +from _pytest.config.exceptions import UsageError + def validate_option(config: Config) -> Literal['sync', 'report', 'remove', 'debug', None]: @@ -10,7 +12,12 @@ def validate_option(config: Config) -> Literal['sync', 'report', 'remove', 'debu if os.getenv('TESTOMATIO') is None: raise ValueError('TESTOMATIO env variable is not set') - if config.getoption('numprocesses') and option in ('sync', 'debug', 'remove'): - raise ValueError('Testomatio does not support parallel sync, remove or report. Remove --numprocesses option') + xdist_plugin = config.pluginmanager.getplugin('xdist') + if xdist_plugin and option in ('sync', 'debug', 'remove'): + if config.option.numprocesses == 0: + return + + raise UsageError("The 'sync' mode does not support parallel execution! " + "In order to synchronise test run command sync as '--testomatio sync -n 0'") return option diff --git a/smoke.py b/smoke.py new file mode 100644 index 0000000..8208a26 --- /dev/null +++ b/smoke.py @@ -0,0 +1,57 @@ +import subprocess +import os +import toml + +def get_version_from_pyproject(): + try: + # Load the pyproject.toml file + pyproject_data = toml.load("pyproject.toml") + # Extract the version from the project metadata + return pyproject_data.get("project", {}).get("version", "unknown") + except FileNotFoundError: + print("pyproject.toml not found. Using default version.") + return "unknown" + except Exception as e: + print(f"An error occurred while reading pyproject.toml: {e}") + return "unknown" + +def run_pytest(): + # Get version from pyproject.toml + version = get_version_from_pyproject() + + # Set environment variables + env = os.environ.copy() + env["TESTOMATIO_SHARED_RUN"] = "1" + env["TESTOMATIO_TITLE"] = f"smoke-{version}" + + # Pytest command + pytest_command = [ + "pytest", + "-p", "pytester", # Load the pytester plugin + "-m", "smoke", # Run only tests with the "smoke" marker + "-vv" # Verbose output + ] + + try: + # Run the pytest command, streaming output to the console + process = subprocess.Popen( + pytest_command, + env=env, + stdout=None, # Allow real-time streaming of stdout + stderr=None, # Allow real-time streaming of stderr + ) + + # Wait for the process to complete + process.wait() + + # Check the exit code + if process.returncode == 0: + print("All tests passed successfully!") + else: + print(f"Some tests failed with exit code: {process.returncode}") + + except Exception as e: + print(f"An error occurred while running pytest: {e}") + +if __name__ == "__main__": + run_pytest() diff --git a/tests/conftest.py b/tests/conftest.py index e946a02..29f5b36 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,15 +1,5 @@ from pytest import mark from pytest import fixture +from dotenv import load_dotenv - -@fixture -def dummy_fixture(): - print('before') - yield - print('after') - - -@mark.testomatio('@T6a2dfc31') -def test_one_pass_new(): - x = 'this' - assert 'h' in x +load_dotenv() diff --git a/tests/sub/sub_mob/sub_sub_class_test.py b/tests/sub/sub_mob/sub_sub_class_test.py index df6a9d0..98d68ae 100644 --- a/tests/sub/sub_mob/sub_sub_class_test.py +++ b/tests/sub/sub_mob/sub_sub_class_test.py @@ -1,16 +1,20 @@ from pytest import mark +import pytest class TestClassSubSub: + @pytest.mark.testomatio("@T7e1cf6d3") def test_one_pass_sub(self): x = 'this' assert 'h' in x + @pytest.mark.testomatio("@T64c0abec") def test_two_fail_sub(self): x = 'hello' assert hasattr(x, 'check') + @pytest.mark.testomatio("@Ta488bdcb") @mark.skip def test_three_skip_sub(self, dummy_fixture): x = 'hello' diff --git a/tests/sub/sub_mob/sub_sub_test.py b/tests/sub/sub_mob/sub_sub_test.py index 8b44b16..cdc6307 100644 --- a/tests/sub/sub_mob/sub_sub_test.py +++ b/tests/sub/sub_mob/sub_sub_test.py @@ -1,23 +1,28 @@ from pytest import mark +import pytest - +@pytest.mark.testomatio("@T761fa328") def test_pass_sub_sub(): assert 2 + 2 == 4 +@pytest.mark.testomatio("@T327cdc55") def test_pass_fix_sub_sub(dummy_fixture): assert 3 + 3 == 6 +@pytest.mark.testomatio("@T0c63a54a") def test_fail_sub_sub(): assert 2 + 2 == 11 +@pytest.mark.testomatio("@T3dd906d1") @mark.parametrize('data', [1, 2, 3, 4, 5, 'a']) def test_ddt_parametrized_sub_sub(data): assert str(data).isnumeric() +@pytest.mark.testomatio("@T1aec685a") @mark.skip def test_skip_sub_sub(): n = 3 diff --git a/tests/sub/test_class_sub.py b/tests/sub/test_class_sub.py index 706dbad..b1ee566 100644 --- a/tests/sub/test_class_sub.py +++ b/tests/sub/test_class_sub.py @@ -3,14 +3,17 @@ class TestClassSub: + @mark.testomatio("@T7e1cf6d3") def test_one_pass_sub(self): x = 'this' assert 'h' in x + @mark.testomatio("@T64c0abec") def test_two_fail_sub(self): x = 'hello' assert hasattr(x, 'check') + @mark.testomatio("@Ta488bdcb") @mark.skip def test_three_skip_sub(self): x = 'hello' diff --git a/tests/sub/test_sub.py b/tests/sub/test_sub.py index 1215e6d..51bd0d3 100644 --- a/tests/sub/test_sub.py +++ b/tests/sub/test_sub.py @@ -1,23 +1,28 @@ from pytest import mark +@mark.testomatio("@T9c322c95") def test_pass_sub(): assert 2 + 2 == 4 +@mark.testomatio("@T4e6f250b") def test_pass_fix_sub(dummy_fixture): assert 3 + 3 == 6 +@mark.testomatio("@T0bf7108d") def test_fail_sub(): assert 2 + 2 == 11 +@mark.testomatio("@T7e069711") @mark.parametrize('data', [1, 2, 3, 4, 5]) def test_ddt_parametrized_sub(data): assert str(data).isnumeric() +@mark.testomatio("@Tad0d98ed") @mark.skip def test_skip_sub(): n = 3 diff --git a/tests/test_cli_param_test_id.py b/tests/test_cli_param_test_id.py new file mode 100644 index 0000000..9eaec1b --- /dev/null +++ b/tests/test_cli_param_test_id.py @@ -0,0 +1,68 @@ +import pytest +pytestmark = pytest.mark.smoke + +test_file = """ + import pytest + + def test_smoke(): + pass + + @pytest.mark.testomatio("@T123") + def test_testomatio_only(): + pass + + @pytest.mark.testomatio("@T456") + def test_smoke_and_testomatio(): + pass + + def test_neither_marker(): + pass +""" + +@pytest.mark.testomatio("@T7b058966") +def test_cli_param_test_id_without_filters(pytester): + pytester.makepyfile(test_file) + + result = pytester.runpytest_subprocess("--testomatio", "report", "-vv") + result.assert_outcomes(passed=4, failed=0, skipped=0) + result.stdout.fnmatch_lines([ + "*::test_smoke*", + "*::test_testomatio_only*", + "*::test_smoke_and_testomatio*", + "*::test_neither_marker*", + ]) + +@pytest.mark.testomatio("@T3cf626ca") +def test_cli_param_test_id_with_k_filter(pytester): + pytester.makepyfile(test_file) + + result = pytester.runpytest_subprocess("--testomatio" ,"report", "-vv", "-k", "test_neither_marker") + result.assert_outcomes(passed=1, failed=0, skipped=0) + result.stdout.fnmatch_lines([ + "*::test_neither_marker*", + ]) + +@pytest.mark.testomatio("@T709adc8a") +def test_cli_param_test_id_without_k_filter_matching_2_tests(pytester): + pytester.makepyfile(test_file) + + result = pytester.runpytest_subprocess("--testomatio", "report", "-vv", "-k", "test_smoke") + result.assert_outcomes(passed=2, failed=0, skipped=0) + result.stdout.fnmatch_lines([ + "*::test_smoke*", + "*::test_smoke_and_testomatio*", + ]) + +# TODO: troubleshoot pytester env +# The testomatio and test-id parameters are lost in the pytester env. +# Please test it in a semiautomated way with "test_cli_params.py" test +@pytest.mark.testomatio("@T5a965adf") +def test_cli_param_test_id_with_test_id_filter(pytester): + pytest.skip() + pytester.makepyfile(test_file) + + result = pytester.runpytest_subprocess("--testomatio", "report", '--test-id="@T123"', "-vv") + result.assert_outcomes(passed=1, failed=0, skipped=0) + result.stdout.fnmatch_lines([ + "*::test_testomatio_only*", + ]) \ No newline at end of file diff --git a/tests/test_cli_params.py b/tests/test_cli_params.py new file mode 100644 index 0000000..e37fba7 --- /dev/null +++ b/tests/test_cli_params.py @@ -0,0 +1,25 @@ +# pytest --testomatio report tests --test-id="@T123" -k test_smoke +# verify 3 test passes +# tests/test_cli_params.py::test_smoke PASSED [ 33%] +# tests/test_cli_params.py::test_smoke_and_testomatio PASSED [ 66%] +# tests/test_cli_params.py::test_testomatio_only PASSED [100%] +# +# ======================================= 3 passed, 50 deselected in 0.89s ======================================= + +import pytest + +@pytest.mark.testomatio("@T55ecbca9") +def test_smoke(): + pass + +@pytest.mark.testomatio("@T123") +def test_testomatio_only(): + pass + +@pytest.mark.testomatio("@T456") +def test_smoke_and_testomatio(): + pass + +@pytest.mark.testomatio("@T06f3da52") +def test_neither_marker(): + pass \ No newline at end of file diff --git a/tests/test_decorators.py b/tests/test_decorators.py index 6b6494d..3ea17fb 100644 --- a/tests/test_decorators.py +++ b/tests/test_decorators.py @@ -7,10 +7,12 @@ def test_something(): assert 1 == 1 +@mark.testomatio("@T81850b4b") def test_no_decorator(): assert 1 == 1 +@mark.testomatio("@T9c91e8e7") def test_some_test(): x = os.getenv('TESTOMATIO_CODE_STYLE') assert x == 'pep8' diff --git a/tests/test_parameters.py b/tests/test_parameters.py new file mode 100644 index 0000000..847af42 --- /dev/null +++ b/tests/test_parameters.py @@ -0,0 +1,67 @@ +import pytest +pytestmark = pytest.mark.smoke + +test_file = """ + import pytest + + # Define some dummy callables + def add(a, b): + return a + b + + def multiply(a, b): + return a * b + + @pytest.mark.testomatio("@Tbca18714") + @pytest.mark.parametrize( + "operation, a, b, expected", + [ + (add, 2, 3, 5), # Test add function + (multiply, 2, 3, 6), # Test multiply function + ], + ) + def test_operations(operation, a, b, expected): + # Call the provided operation + result = operation(a, b) + assert result == expected, f"Expected {expected}, got {result}" +""" + +@pytest.mark.testomatio("@Tb8930394") +def test_callable_in_params(pytester): + pytester.makepyfile(test_file) + + pytester.runpytest("--testomatio", "sync", "-n", "0", "--no-detach") + result = pytester.runpytest("--testomatio", "report", "-vv") + result.assert_outcomes(passed=2, failed=0, skipped=0) + cleaned_lines = [line.strip() for line in result.stdout.lines if line.strip()] + + assert any("test_callable_in_params.py::test_operations[add-2-3-5]" in line for line in cleaned_lines) + assert any("test_callable_in_params.py::test_operations[multiply-2-3-6]" in line for line in cleaned_lines) + +session_fixture_file = """ + import pytest + + @pytest.fixture(scope="session", params=["db_connection_1", "db_connection_2"]) + def session_fixture(request): + # Simulate setting up a database connection + db_connection = request.param + yield db_connection + # Simulate tearing down the database connection + + def test_session_fixture_usage(session_fixture): + assert session_fixture in ["db_connection_1", "db_connection_2"], ( + f"Unexpected session fixture value: {session_fixture}" + ) +""" + +def test_session_fixture_with_param(pytester): + pytester.makepyfile(session_fixture_file) + + pytester.runpytest("--testomatio", "sync", "-n", "0", "--no-detach") + result = pytester.runpytest("--testomatio", "report", "-vv", "--full-trace") + result.assert_outcomes(passed=2, failed=0, skipped=0) + + cleaned_lines = [line.strip() for line in result.stdout.lines if line.strip()] + + assert any("test_session_fixture_usage[db_connection_1]" in line for line in cleaned_lines) + assert any("test_session_fixture_usage[db_connection_2]" in line for line in cleaned_lines) + diff --git a/tests/test_sync.py b/tests/test_sync.py new file mode 100644 index 0000000..8e5645a --- /dev/null +++ b/tests/test_sync.py @@ -0,0 +1,42 @@ +import pytest +#TODO verify requests to testomatio + +@pytest.mark.testomatio("@Tfaf4da53") +@pytest.mark.smoke +def test_sync_stop_when_xdist_in_use(pytester): + pytester.makepyfile(""" + def test_example(): + assert True + """) + + # Ensure that your plugin code raises UsageError for this scenario instead of a generic Exception. + # Something like: + # if option == 'sync' and parallel_set: + # raise UsageError("The 'sync' mode does not support parallel execution! In order to synchronise test run command sync as '--testomatio sync -n 0'") + + result = pytester.runpytest('-p', 'xdist', '--testomatio', 'sync', '-vv') + + # Match the entire error line as it appears in stderr + result.stderr.fnmatch_lines([ + "ERROR: The 'sync' mode does not support parallel execution! In order to synchronise test run command sync as '--testomatio sync -n 0'" + ]) + + # Now that it's a usage error, pytest should produce a summary line that we can assert on + assert result.ret != 0 + +@pytest.mark.smoke +def test_sync_works_with_xdist_set_to_0(pytester): + pytester.makepyfile(""" + def test_example(): + assert True + """) + + result = pytester.runpytest_subprocess('-p', 'xdist', '--testomatio', 'sync', '-n', '0', '-vv') + + # Assert that the special exit message is printed to stderr + result.stdout.fnmatch_lines([ + "*Sync completed without test execution*" + ]) + + # Optional: Verify the process exited successfully (0 means no error) + assert result.ret == 2 \ No newline at end of file