From 730c8a55114adc98fb1af0b88d1731188d6fcb17 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Mon, 15 Jul 2024 08:24:45 +0000 Subject: [PATCH] Update tests We're updating the tests, following the same pattern used by the other rocks. The integration tests ensure that every supported version can be deployed using helm charts, while the sanity tests verify the image filesystem and executables. --- .github/workflows/pull_request.yaml | 6 + .github/workflows/tests.yaml | 39 --- tests/integration/conftest.py | 136 ++++++++++ tests/integration/test_multus_v3_8.py | 55 ++++ tests/integration/test_multus_v4_0_2.py | 41 +++ tests/integration/test_util/config.py | 63 +++++ .../integration/test_util/harness/__init__.py | 18 ++ tests/integration/test_util/harness/base.py | 106 ++++++++ tests/integration/test_util/harness/juju.py | 203 ++++++++++++++ tests/integration/test_util/harness/local.py | 77 ++++++ tests/integration/test_util/harness/lxd.py | 179 ++++++++++++ .../test_util/harness/multipass.py | 134 +++++++++ tests/integration/test_util/util.py | 254 ++++++++++++++++++ tests/lxd-profile.yaml | 105 ++++++++ tests/requirements-dev.txt | 5 + tests/requirements-test.txt | 5 + tests/sanity/test_multus_v3_8.py | 48 ++++ .../sanity/test_multus_v4_0_2.py | 35 ++- tests/templates/bootstrap-session.yaml | 7 + tests/tox.ini | 69 +++++ v3.8/tests/test_rock.py | 39 --- v3.8/tox.ini | 46 ---- v4.0.2/tox.ini | 46 ---- 23 files changed, 1536 insertions(+), 180 deletions(-) delete mode 100644 .github/workflows/tests.yaml create mode 100644 tests/integration/conftest.py create mode 100644 tests/integration/test_multus_v3_8.py create mode 100644 tests/integration/test_multus_v4_0_2.py create mode 100644 tests/integration/test_util/config.py create mode 100644 tests/integration/test_util/harness/__init__.py create mode 100644 tests/integration/test_util/harness/base.py create mode 100644 tests/integration/test_util/harness/juju.py create mode 100644 tests/integration/test_util/harness/local.py create mode 100644 tests/integration/test_util/harness/lxd.py create mode 100644 tests/integration/test_util/harness/multipass.py create mode 100644 tests/integration/test_util/util.py create mode 100644 tests/lxd-profile.yaml create mode 100644 tests/requirements-dev.txt create mode 100644 tests/requirements-test.txt create mode 100644 tests/sanity/test_multus_v3_8.py rename v4.0.2/tests/test_rock.py => tests/sanity/test_multus_v4_0_2.py (53%) create mode 100644 tests/templates/bootstrap-session.yaml create mode 100644 tests/tox.ini delete mode 100644 v3.8/tests/test_rock.py delete mode 100644 v3.8/tox.ini delete mode 100644 v4.0.2/tox.ini diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index 2f5e780..c068059 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -18,6 +18,12 @@ jobs: rockcraft-revisions: '{"amd64": "1783", "arm64": "1784"}' arch-skipping-maximize-build-space: '["arm64"]' platform-labels: '{"arm64": ["self-hosted", "Linux", "ARM64", "jammy", "large"]}' + run-tests: + uses: canonical/k8s-workflows/.github/workflows/run_tests.yaml@main + needs: [build-and-push-arch-specifics] + secrets: inherit + with: + rock-metas: ${{ needs.build-and-push-arch-specifics.outputs.rock-metas }} scan-images: uses: canonical/k8s-workflows/.github/workflows/scan_images.yaml@main needs: [build-and-push-arch-specifics] diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml deleted file mode 100644 index 1f5074f..0000000 --- a/.github/workflows/tests.yaml +++ /dev/null @@ -1,39 +0,0 @@ -name: Tests - -on: - pull_request: - -jobs: - tests: - runs-on: ubuntu-24.04 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Update packages - run: sudo apt-get update - - - name: Install docker - run: sudo snap install docker - - - name: Initialize LXD - run: | - sudo lxd init --auto - - sudo iptables -P FORWARD ACCEPT - sudo sysctl net.ipv4.ip_forward=1 - - - name: Install rockcraft - run: sudo snap install rockcraft --classic - - - name: Install yq - run: sudo snap install yq - - - name: Install tox - run: sudo apt-get install -y tox - - - name: Run Tox (v3.8) - run: sudo tox -c v3.8/tox.ini - - - name: Run Tox (v4.0.2) - run: sudo tox -c v4.0.2/tox.ini diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..7b4ca75 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,136 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import logging +from pathlib import Path +from typing import Generator, List + +import pytest +from test_util import config, harness, util + +LOG = logging.getLogger(__name__) + + +def _harness_clean(h: harness.Harness): + "Clean up created instances within the test harness." + + if config.SKIP_CLEANUP: + LOG.warning( + "Skipping harness cleanup. " + "It is your job now to clean up cloud resources" + ) + else: + LOG.debug("Cleanup") + h.cleanup() + + +@pytest.fixture(scope="module") +def h() -> harness.Harness: + LOG.debug("Create harness for %s", config.SUBSTRATE) + if config.SUBSTRATE == "local": + h = harness.LocalHarness() + elif config.SUBSTRATE == "lxd": + h = harness.LXDHarness() + elif config.SUBSTRATE == "multipass": + h = harness.MultipassHarness() + elif config.SUBSTRATE == "juju": + h = harness.JujuHarness() + else: + raise harness.HarnessError( + "TEST_SUBSTRATE must be one of: local, lxd, multipass, juju" + ) + + yield h + + _harness_clean(h) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "node_count: Mark a test to specify how many instance nodes need to be created\n" + "disable_k8s_bootstrapping: By default, the first k8s node is bootstrapped. This marker disables that.", + ) + + +@pytest.fixture(scope="function") +def node_count(request) -> int: + node_count_marker = request.node.get_closest_marker("node_count") + if not node_count_marker: + return 1 + node_count_arg, *_ = node_count_marker.args + return int(node_count_arg) + + +@pytest.fixture(scope="function") +def disable_k8s_bootstrapping(request) -> int: + return bool(request.node.get_closest_marker("disable_k8s_bootstrapping")) + + +@pytest.fixture(scope="function") +def instances( + h: harness.Harness, node_count: int, tmp_path: Path, disable_k8s_bootstrapping: bool +) -> Generator[List[harness.Instance], None, None]: + """Construct instances for a cluster. + + Bootstrap and setup networking on the first instance, if `disable_k8s_bootstrapping` marker is not set. + """ + if not config.SNAP_CHANNEL: + pytest.fail("Set TEST_SNAP_CHANNEL to the channel of the k8s snap to install.") + + if node_count <= 0: + pytest.xfail("Test requested 0 or fewer instances, skip this test.") + + LOG.info(f"Creating {node_count} instances") + instances: List[harness.Instance] = [] + + for _ in range(node_count): + # Create instances and setup the k8s snap in each. + instance = h.new_instance() + instances.append(instance) + util.setup_k8s_snap(instance) + + if not disable_k8s_bootstrapping: + first_node, *_ = instances + first_node.exec(["k8s", "bootstrap"]) + + yield instances + + if config.SKIP_CLEANUP: + LOG.warning("Skipping clean-up of instances, delete them on your own") + return + + # Cleanup after each test. + # We cannot execute _harness_clean() here as this would also + # remove the module_instance. + for instance in instances: + h.delete_instance(instance.id) + + +@pytest.fixture(scope="module") +def module_instance( + h: harness.Harness, tmp_path_factory: pytest.TempPathFactory, + request +) -> Generator[harness.Instance, None, None]: + """Constructs and bootstraps an instance that persists over a test session. + + Bootstraps the instance with all k8sd features enabled to reduce testing time. + """ + LOG.info("Setup node and enable all features") + + instance = h.new_instance() + util.setup_k8s_snap(instance) + request.addfinalizer(lambda: util.purge_k8s_snap(instance)) + + bootstrap_config_path = "/home/ubuntu/bootstrap-session.yaml" + instance.send_file( + (config.MANIFESTS_DIR / "bootstrap-session.yaml").as_posix(), + bootstrap_config_path, + ) + + instance.exec(["k8s", "bootstrap", "--file", bootstrap_config_path]) + util.wait_until_k8s_ready(instance, [instance]) + util.wait_for_network(instance) + util.wait_for_dns(instance) + + yield instance diff --git a/tests/integration/test_multus_v3_8.py b/tests/integration/test_multus_v3_8.py new file mode 100644 index 0000000..b7bdb2f --- /dev/null +++ b/tests/integration/test_multus_v3_8.py @@ -0,0 +1,55 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import os +import pathlib + +from test_util import harness, util + + +def test_multus_deployment( + tmp_path: pathlib.Path, + module_instance: harness.Instance): + clone_path = tmp_path / "multus" + clone_path.mkdir() + + clone_command = [ + "git", "clone", "https://github.com/k8snetworkplumbingwg/helm-charts", + "--depth", "1", + str(clone_path.absolute()) + ] + module_instance.exec(clone_command) + + chart_path = clone_path / 'multus' + + helm_command = [ + "sudo", "k8s", + "helm", "install", "multus-cni", + str(chart_path.absolute()), + "--namespace", "kube-system", + ] + + image_uri = os.getenv("ROCK_MULTUS_V3_8") + assert image_uri is not None, "ROCK_MULTUS_V3_8 is not set" + image_split = image_uri.split(":") + + helm_command += [ + "--set", + f"image.repository={image_split[0]}", + "--set", + f"image.tag={image_split[1]}", + "--set", + "securityContext.runAsUser=584792", + ] + + module_instance.exec(helm_command) + + util.stubbornly(retries=3, delay_s=1).on(module_instance).exec( + [ + "sudo", "k8s", + "kubectl", "rollout", "status", + "daemonset", "multus-cni-multus-ds", + "--namespace", "kube-system", + "--timeout", "60s", + ] + ) diff --git a/tests/integration/test_multus_v4_0_2.py b/tests/integration/test_multus_v4_0_2.py new file mode 100644 index 0000000..c8b9769 --- /dev/null +++ b/tests/integration/test_multus_v4_0_2.py @@ -0,0 +1,41 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import os + +from test_util import harness, util + + +def test_multus_deployment(module_instance: harness.Instance): + helm_command = [ + "sudo", "k8s", + "helm", "install", "multus-cni", + "oci://registry-1.docker.io/bitnamicharts/multus-cni", + "--version", "2.1.7", + "--namespace", "kube-system", + ] + + image_uri = os.getenv("ROCK_MULTUS_V4_0_2") + assert image_uri is not None, "ROCK_MULTUS_V4_0_2 is not set" + image_split = image_uri.split(":") + + helm_command += [ + "--set", + f"image.repository={image_split[0]}", + "--set", + f"image.tag={image_split[1]}", + "--set", + "securityContext.runAsUser=584792", + ] + + module_instance.exec(helm_command) + + util.stubbornly(retries=3, delay_s=1).on(module_instance).exec( + [ + "sudo", "k8s", + "kubectl", "rollout", "status", + "daemonset", "multus-cni", + "--namespace", "kube-system", + "--timeout", "180s", + ] + ) diff --git a/tests/integration/test_util/config.py b/tests/integration/test_util/config.py new file mode 100644 index 0000000..2fcc7f6 --- /dev/null +++ b/tests/integration/test_util/config.py @@ -0,0 +1,63 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import os +from pathlib import Path + +DIR = Path(__file__).absolute().parent + +MANIFESTS_DIR = DIR / ".." / ".." / "templates" + +# SNAP is the absolute path to the snap against which we run the integration tests. +SNAP_CHANNEL = os.getenv("TEST_SNAP_CHANNEL") + +# SUBSTRATE is the substrate to use for running the integration tests. +# One of 'local' (default), 'lxd', 'juju', or 'multipass'. +SUBSTRATE = os.getenv("TEST_SUBSTRATE") or "local" + +# SKIP_CLEANUP can be used to prevent machines to be automatically destroyed +# after the tests complete. +SKIP_CLEANUP = (os.getenv("TEST_SKIP_CLEANUP") or "") == "1" + +# LXD_PROFILE_NAME is the profile name to use for LXD containers. +LXD_PROFILE_NAME = os.getenv("TEST_LXD_PROFILE_NAME") or "k8s-integration" + +# LXD_PROFILE is the profile to use for LXD containers. +LXD_PROFILE = ( + os.getenv("TEST_LXD_PROFILE") + or (DIR / ".." / ".." / "lxd-profile.yaml").read_text() +) + +# LXD_IMAGE is the image to use for LXD containers. +LXD_IMAGE = os.getenv("TEST_LXD_IMAGE") or "ubuntu:22.04" + +# LXD_SIDELOAD_IMAGES_DIR is an optional directory with OCI images from the host +# that will be mounted at /var/snap/k8s/common/images on the LXD containers. +LXD_SIDELOAD_IMAGES_DIR = os.getenv("TEST_LXD_SIDELOAD_IMAGES_DIR") or "" + +# MULTIPASS_IMAGE is the image to use for Multipass VMs. +MULTIPASS_IMAGE = os.getenv("TEST_MULTIPASS_IMAGE") or "22.04" + +# MULTIPASS_CPUS is the number of cpus for Multipass VMs. +MULTIPASS_CPUS = os.getenv("TEST_MULTIPASS_CPUS") or "2" + +# MULTIPASS_MEMORY is the memory for Multipass VMs. +MULTIPASS_MEMORY = os.getenv("TEST_MULTIPASS_MEMORY") or "2G" + +# MULTIPASS_DISK is the disk size for Multipass VMs. +MULTIPASS_DISK = os.getenv("TEST_MULTIPASS_DISK") or "10G" + +# JUJU_MODEL is the Juju model to use. +JUJU_MODEL = os.getenv("TEST_JUJU_MODEL") + +# JUJU_CONTROLLER is the Juju controller to use. +JUJU_CONTROLLER = os.getenv("TEST_JUJU_CONTROLLER") + +# JUJU_CONSTRAINTS is the constraints to use when creating Juju machines. +JUJU_CONSTRAINTS = os.getenv("TEST_JUJU_CONSTRAINTS", "mem=4G cores=2 root-disk=20G") + +# JUJU_BASE is the base OS to use when creating Juju machines. +JUJU_BASE = os.getenv("TEST_JUJU_BASE") or "ubuntu@22.04" + +# JUJU_MACHINES is a list of existing Juju machines to use. +JUJU_MACHINES = os.getenv("TEST_JUJU_MACHINES") or "" diff --git a/tests/integration/test_util/harness/__init__.py b/tests/integration/test_util/harness/__init__.py new file mode 100644 index 0000000..1aa0c6f --- /dev/null +++ b/tests/integration/test_util/harness/__init__.py @@ -0,0 +1,18 @@ +# +# Copyright 2024 Canonical, Ltd. +# +from test_util.harness.base import Harness, HarnessError, Instance +from test_util.harness.juju import JujuHarness +from test_util.harness.local import LocalHarness +from test_util.harness.lxd import LXDHarness +from test_util.harness.multipass import MultipassHarness + +__all__ = [ + HarnessError, + Harness, + Instance, + JujuHarness, + LocalHarness, + LXDHarness, + MultipassHarness, +] diff --git a/tests/integration/test_util/harness/base.py b/tests/integration/test_util/harness/base.py new file mode 100644 index 0000000..81a969a --- /dev/null +++ b/tests/integration/test_util/harness/base.py @@ -0,0 +1,106 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import subprocess +from functools import partial + + +class HarnessError(Exception): + """Base error for all our harness failures""" + + pass + + +class Instance: + """Reference to a harness and a given instance id. + + Provides convenience methods for an instance to call its harness' methods + """ + + def __init__(self, h: "Harness", id: str) -> None: + self._h = h + self._id = id + + self.send_file = partial(h.send_file, id) + self.pull_file = partial(h.pull_file, id) + self.exec = partial(h.exec, id) + self.delete_instance = partial(h.delete_instance, id) + + @property + def id(self) -> str: + return self._id + + def __str__(self) -> str: + return f"{self._h.name}:{self.id}" + + +class Harness: + """Abstract how integration tests can start and manage multiple machines. This allows + writing integration tests that can run on the local machine, LXD, or Multipass with minimum + effort. + """ + + name: str + + def new_instance(self) -> Instance: + """Creates a new instance on the infrastructure and returns an object + which can be used to interact with it. + + If the operation fails, a HarnessError is raised. + """ + raise NotImplementedError + + def send_file(self, instance_id: str, source: str, destination: str): + """Send a local file to the instance. + + :param instance_id: The instance_id, as returned by new_instance() + :param source: Path to the file that will be copied to the instance + :param destination: Path in the instance where the file will be copied. + This must always be an absolute path. + + + If the operation fails, a HarnessError is raised. + """ + raise NotImplementedError + + def pull_file(self, instance_id: str, source: str, destination: str): + """Pull a file from the instance and save it on the local machine + + :param instance_id: The instance_id, as returned by new_instance() + :param source: Path to the file that will be copied from the instance. + This must always be an absolute path. + :param destination: Path on the local machine the file will be saved. + + If the operation fails, a HarnessError is raised. + """ + raise NotImplementedError + + def exec( + self, instance_id: str, command: list, **kwargs + ) -> subprocess.CompletedProcess: + """Run a command as root on the instance. + + :param instance_id: The instance_id, as returned by new_instance() + :param command: Command for subprocess.run() + :param kwargs: Keyword args compatible with subprocess.run() + + If the operation fails, a subprocesss.CalledProcessError is raised. + """ + raise NotImplementedError + + def delete_instance(self, instance_id: str): + """Delete a previously created instance. + + :param instance_id: The instance_id, as returned by new_instance() + + If the operation fails, a HarnessError is raised. + """ + raise NotImplementedError + + def cleanup(self): + """Delete any leftover resources after the tests are done, e.g. delete any + instances that might still be running. + + If the operation fails, a HarnessError is raised. + """ + raise NotImplementedError diff --git a/tests/integration/test_util/harness/juju.py b/tests/integration/test_util/harness/juju.py new file mode 100644 index 0000000..4d3a02b --- /dev/null +++ b/tests/integration/test_util/harness/juju.py @@ -0,0 +1,203 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import json +import logging +import shlex +import subprocess +from pathlib import Path + +from test_util import config +from test_util.harness import Harness, HarnessError, Instance +from test_util.util import run + +LOG = logging.getLogger(__name__) + + +class JujuHarness(Harness): + """A Harness that creates an Juju machine for each instance.""" + + name = "juju" + + def __init__(self): + super(JujuHarness, self).__init__() + + self.model = config.JUJU_MODEL + if not self.model: + raise HarnessError("Set JUJU_MODEL to the Juju model to use") + + if config.JUJU_CONTROLLER: + self.model = f"{config.JUJU_CONTROLLER}:{self.model}" + + self.constraints = config.JUJU_CONSTRAINTS + self.base = config.JUJU_BASE + self.existing_machines = {} + self.instances = set() + + if config.JUJU_MACHINES: + self.existing_machines = { + instance_id.strip(): False + for instance_id in config.JUJU_MACHINES.split() + } + LOG.debug( + "Configured Juju substrate (model %s, machines %s)", + self.model, + config.JUJU_MACHINES, + ) + + else: + LOG.debug( + "Configured Juju substrate (model %s, base %s, constraints %s)", + self.model, + self.base, + self.constraints, + ) + + def new_instance(self) -> Instance: + for instance_id in self.existing_machines: + if not self.existing_machines[instance_id]: + LOG.debug("Reusing existing machine %s", instance_id) + self.existing_machines[instance_id] = True + self.instances.add(instance_id) + return Instance(self, instance_id) + + LOG.debug("Creating instance with constraints %s", self.constraints) + try: + p = run( + [ + "juju", + "add-machine", + "-m", + self.model, + "--constraints", + self.constraints, + "--base", + self.base, + ], + capture_output=True, + ) + + output = p.stderr.decode().strip() + if not output.startswith("created machine "): + raise HarnessError(f"failed to parse output from juju add-machine {p=}") + + instance_id = output.split(" ")[2] + except subprocess.CalledProcessError as e: + raise HarnessError("Failed to create Juju machine") from e + + self.instances.add(instance_id) + + self.exec(instance_id, ["snap", "wait", "system", "seed.loaded"]) + return Instance(self, instance_id) + + def send_file(self, instance_id: str, source: str, destination: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + if not Path(destination).is_absolute(): + raise HarnessError(f"path {destination} must be absolute") + + LOG.debug( + "Copying file %s to instance %s at %s", source, instance_id, destination + ) + try: + self.exec( + instance_id, + ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()], + ) + run(["juju", "scp", source, f"{instance_id}:{destination}"]) + except subprocess.CalledProcessError as e: + raise HarnessError("juju scp command failed") from e + + def pull_file(self, instance_id: str, source: str, destination: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + if not Path(source).is_absolute(): + raise HarnessError(f"path {source} must be absolute") + + LOG.debug( + "Copying file %s from instance %s to %s", source, instance_id, destination + ) + try: + run(["juju", "scp", f"{instance_id}:{source}", destination]) + except subprocess.CalledProcessError as e: + raise HarnessError("juju scp command failed") from e + + def exec(self, instance_id: str, command: list, **kwargs): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + LOG.debug("Execute command %s in instance %s", command, instance_id) + capture_output = kwargs.pop("capture_output", False) + check = kwargs.pop("check", True) + stdout = kwargs.pop("stdout", None) + stderr = kwargs.pop("stderr", None) + input = f" < Instance: + if self.initialized: + raise HarnessError("local substrate only supports up to one instance") + + self.initialized = True + LOG.debug("Initializing instance") + try: + self.exec(self.hostname, ["snap", "wait", "system", "seed.loaded"]) + except subprocess.CalledProcessError as e: + raise HarnessError("failed to wait for snapd seed") from e + + return Instance(self, self.hostname) + + def send_file(self, _: str, source: str, destination: str): + if not self.initialized: + raise HarnessError("no instance initialized") + + if not Path(destination).is_absolute(): + raise HarnessError(f"path {destination} must be absolute") + + LOG.debug("Copying file %s to %s", source, destination) + try: + self.exec( + _, ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()] + ) + shutil.copy(source, destination) + except subprocess.CalledProcessError as e: + raise HarnessError("failed to copy file") from e + except shutil.SameFileError: + pass + + def pull_file(self, _: str, source: str, destination: str): + return self.send_file(_, destination, source) + + def exec(self, _: str, command: list, **kwargs): + if not self.initialized: + raise HarnessError("no instance initialized") + + LOG.debug("Executing command %s on %s", command, self.hostname) + return run(["sudo", "-E", "bash", "-c", shlex.join(command)], **kwargs) + + def delete_instance(self, _: str): + LOG.debug("Stopping instance") + self.initialized = False + + def cleanup(self): + LOG.debug("Stopping instance") + self.initialized = False diff --git a/tests/integration/test_util/harness/lxd.py b/tests/integration/test_util/harness/lxd.py new file mode 100644 index 0000000..a5aaebd --- /dev/null +++ b/tests/integration/test_util/harness/lxd.py @@ -0,0 +1,179 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import logging +import os +import shlex +import subprocess +from pathlib import Path + +from test_util import config +from test_util.harness import Harness, HarnessError, Instance +from test_util.util import run, stubbornly + +LOG = logging.getLogger(__name__) + + +class LXDHarness(Harness): + """A Harness that creates an LXD container for each instance.""" + + name = "lxd" + + def next_id(self) -> int: + self._next_id += 1 + return self._next_id + + def __init__(self): + super(LXDHarness, self).__init__() + + self._next_id = 0 + + self.profile = config.LXD_PROFILE_NAME + self.sideload_images_dir = config.LXD_SIDELOAD_IMAGES_DIR + self.image = config.LXD_IMAGE + self.instances = set() + + LOG.debug("Checking for LXD profile %s", self.profile) + try: + run(["lxc", "profile", "show", self.profile]) + except subprocess.CalledProcessError: + try: + LOG.debug("Creating LXD profile %s", self.profile) + run(["lxc", "profile", "create", self.profile]) + + except subprocess.CalledProcessError as e: + raise HarnessError( + f"Failed to create LXD profile {self.profile}" + ) from e + + try: + LOG.debug("Configuring LXD profile %s", self.profile) + run( + ["lxc", "profile", "edit", self.profile], + input=config.LXD_PROFILE.encode(), + ) + except subprocess.CalledProcessError as e: + raise HarnessError(f"Failed to configure LXD profile {self.profile}") from e + + LOG.debug( + "Configured LXD substrate (profile %s, image %s)", self.profile, self.image + ) + + def new_instance(self) -> Instance: + instance_id = f"k8s-integration-{os.urandom(3).hex()}-{self.next_id()}" + + LOG.debug("Creating instance %s with image %s", instance_id, self.image) + try: + stubbornly(retries=3, delay_s=1).exec( + [ + "lxc", + "launch", + self.image, + instance_id, + "-p", + "default", + "-p", + self.profile, + ] + ) + self.instances.add(instance_id) + + if self.sideload_images_dir: + stubbornly(retries=3, delay_s=1).exec( + [ + "lxc", + "config", + "device", + "add", + instance_id, + "k8s-e2e-images", + "disk", + f"source={self.sideload_images_dir}", + "path=/mnt/images", + "readonly=true", + ] + ) + + self.exec( + instance_id, + ["mkdir", "-p", "/var/snap/k8s/common"], + ) + self.exec( + instance_id, + ["cp", "-rv", "/mnt/images", "/var/snap/k8s/common/images"], + ) + except subprocess.CalledProcessError as e: + raise HarnessError(f"Failed to create LXD container {instance_id}") from e + + self.exec(instance_id, ["snap", "wait", "system", "seed.loaded"]) + return Instance(self, instance_id) + + def send_file(self, instance_id: str, source: str, destination: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + if not Path(destination).is_absolute(): + raise HarnessError(f"path {destination} must be absolute") + + LOG.debug( + "Copying file %s to instance %s at %s", source, instance_id, destination + ) + try: + self.exec( + instance_id, + ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()], + capture_output=True, + ) + run( + ["lxc", "file", "push", source, f"{instance_id}{destination}"], + capture_output=True, + ) + except subprocess.CalledProcessError as e: + LOG.error("command {e.cmd} failed") + LOG.error(f" {e.returncode=}") + LOG.error(f" {e.stdout.decode()=}") + LOG.error(f" {e.stderr.decode()=}") + raise HarnessError("failed to push file") from e + + def pull_file(self, instance_id: str, source: str, destination: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + if not Path(source).is_absolute(): + raise HarnessError(f"path {source} must be absolute") + + LOG.debug( + "Copying file %s from instance %s to %s", source, instance_id, destination + ) + try: + run( + ["lxc", "file", "pull", f"{instance_id}{source}", destination], + stdout=subprocess.DEVNULL, + ) + except subprocess.CalledProcessError as e: + raise HarnessError("lxc file push command failed") from e + + def exec(self, instance_id: str, command: list, **kwargs): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + LOG.debug("Execute command %s in instance %s", command, instance_id) + return run( + ["lxc", "shell", instance_id, "--", "bash", "-c", shlex.join(command)], + **kwargs, + ) + + def delete_instance(self, instance_id: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + try: + run(["lxc", "rm", instance_id, "--force"]) + except subprocess.CalledProcessError as e: + raise HarnessError(f"failed to delete instance {instance_id}") from e + + self.instances.discard(instance_id) + + def cleanup(self): + for instance_id in self.instances.copy(): + self.delete_instance(instance_id) diff --git a/tests/integration/test_util/harness/multipass.py b/tests/integration/test_util/harness/multipass.py new file mode 100644 index 0000000..a98df7e --- /dev/null +++ b/tests/integration/test_util/harness/multipass.py @@ -0,0 +1,134 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import logging +import os +import shlex +import subprocess +from pathlib import Path + +from test_util import config +from test_util.harness import Harness, HarnessError, Instance +from test_util.util import run + +LOG = logging.getLogger(__name__) + + +class MultipassHarness(Harness): + """A Harness that creates a Multipass VM for each instance.""" + + name = "multipass" + + def next_id(self) -> int: + self._next_id += 1 + return self._next_id + + def __init__(self): + super(MultipassHarness, self).__init__() + + self._next_id = 0 + + self.image = config.MULTIPASS_IMAGE + self.cpus = config.MULTIPASS_CPUS + self.memory = config.MULTIPASS_MEMORY + self.disk = config.MULTIPASS_DISK + self.instances = set() + + LOG.debug("Configured Multipass substrate (image %s)", self.image) + + def new_instance(self) -> Instance: + instance_id = f"k8s-integration-{os.urandom(3).hex()}-{self.next_id()}" + + LOG.debug("Creating instance %s with image %s", instance_id, self.image) + try: + run( + [ + "multipass", + "launch", + self.image, + "--name", + instance_id, + "--cpus", + self.cpus, + "--memory", + self.memory, + "--disk", + self.disk, + ] + ) + except subprocess.CalledProcessError as e: + raise HarnessError(f"Failed to create multipass VM {instance_id}") from e + + self.instances.add(instance_id) + + self.exec(instance_id, ["snap", "wait", "system", "seed.loaded"]) + return Instance(self, instance_id) + + def send_file(self, instance_id: str, source: str, destination: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + if not Path(destination).is_absolute(): + raise HarnessError(f"path {destination} must be absolute") + + LOG.debug( + "Copying file %s to instance %s at %s", source, instance_id, destination + ) + try: + self.exec( + instance_id, + ["mkdir", "-m=0777", "-p", Path(destination).parent.as_posix()], + ) + run(["multipass", "transfer", source, f"{instance_id}:{destination}"]) + except subprocess.CalledProcessError as e: + raise HarnessError("lxc file push command failed") from e + + def pull_file(self, instance_id: str, source: str, destination: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + if not Path(source).is_absolute(): + raise HarnessError(f"path {source} must be absolute") + + LOG.debug( + "Copying file %s from instance %s to %s", source, instance_id, destination + ) + try: + run(["multipass", "transfer", f"{instance_id}:{source}", destination]) + except subprocess.CalledProcessError as e: + raise HarnessError("lxc file push command failed") from e + + def exec(self, instance_id: str, command: list, **kwargs): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + LOG.debug("Execute command %s in instance %s", command, instance_id) + return run( + [ + "multipass", + "exec", + instance_id, + "--", + "sudo", + "bash", + "-c", + shlex.join(command), + ], + **kwargs, + ) + + def delete_instance(self, instance_id: str): + if instance_id not in self.instances: + raise HarnessError(f"unknown instance {instance_id}") + + try: + run(["multipass", "delete", instance_id]) + run(["multipass", "purge"]) + except subprocess.CalledProcessError as e: + raise HarnessError(f"failed to delete instance {instance_id}") from e + + self.instances.discard(instance_id) + + def cleanup(self): + for instance_id in self.instances.copy(): + self.delete_instance(instance_id) diff --git a/tests/integration/test_util/util.py b/tests/integration/test_util/util.py new file mode 100644 index 0000000..f7ad700 --- /dev/null +++ b/tests/integration/test_util/util.py @@ -0,0 +1,254 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import json +import logging +import shlex +import subprocess +from functools import partial +from pathlib import Path +from typing import Any, Callable, List, Optional, Union + +from tenacity import ( + RetryCallState, + retry, + retry_if_exception_type, + stop_after_attempt, + stop_never, + wait_fixed, +) +from test_util import config, harness + +LOG = logging.getLogger(__name__) + + +def run(command: list, **kwargs) -> subprocess.CompletedProcess: + """Log and run command.""" + kwargs.setdefault("check", True) + + LOG.debug("Execute command %s (kwargs=%s)", shlex.join(command), kwargs) + return subprocess.run(command, **kwargs) + + +def stubbornly( + retries: Optional[int] = None, + delay_s: Optional[Union[float, int]] = None, + exceptions: Optional[tuple] = None, + **retry_kds, +): + """ + Retry a command for a while, using tenacity + + By default, retry immediately and forever until no exceptions occur. + + Some commands need to execute until they pass some condition + > stubbornly(*retry_args).until(*some_condition).exec(*some_command) + + Some commands need to execute until they complete + > stubbornly(*retry_args).exec(*some_command) + + : param retries int: convenience param to use stop=retry.stop_after_attempt() + : param delay_s float|int: convenience param to use wait=retry.wait_fixed(delay_s) + : param exceptions Tuple[Exception]: convenience param to use retry=retry.retry_if_exception_type(exceptions) + : param retry_kds Mapping: direct interface to all tenacity arguments for retrying + """ + + def _before_sleep(retry_state: RetryCallState): + attempt = retry_state.attempt_number + tries = f"/{retries}" if retries is not None else "" + LOG.info( + f"Attempt {attempt}{tries} failed. Error: {retry_state.outcome.exception()}" + ) + LOG.info(f"Retrying in {delay_s} seconds...") + + _waits = wait_fixed(delay_s) if delay_s is not None else wait_fixed(0) + _stops = stop_after_attempt(retries) if retries is not None else stop_never + _exceptions = exceptions or (Exception,) # default to retry on all exceptions + + _retry_args = dict( + wait=_waits, + stop=_stops, + retry=retry_if_exception_type(_exceptions), + before_sleep=_before_sleep, + ) + # Permit any tenacity retry overrides from these ^defaults + _retry_args.update(retry_kds) + + class Retriable: + def __init__(self) -> None: + self._condition = None + self._run = partial(run, capture_output=True) + + @retry(**_retry_args) + def exec( + self, + command_args: List[str], + **command_kwds, + ): + """ + Execute a command against a harness or locally with subprocess to be retried. + + :param List[str] command_args: The command to be executed, as a str or list of str + :param Mapping[str,str] command_kwds: Additional keyword arguments to be passed to exec + """ + + try: + resp = self._run(command_args, **command_kwds) + except subprocess.CalledProcessError as e: + LOG.warning(f" rc={e.returncode}") + LOG.warning(f" stdout={e.stdout.decode()}") + LOG.warning(f" stderr={e.stderr.decode()}") + raise + if self._condition: + assert self._condition(resp), "Failed to meet condition" + return resp + + def on(self, instance: harness.Instance) -> "Retriable": + """ + Target the command at some instance. + + :param instance Instance: Instance on a test harness. + """ + self._run = partial(instance.exec, capture_output=True) + return self + + def until( + self, condition: Callable[[subprocess.CompletedProcess], bool] = None + ) -> "Retriable": + """ + Test the output of the executed command against an expected response + + :param Callable condition: a callable which returns a truth about the command output + """ + self._condition = condition + return self + + return Retriable() + + +# Installs and setups the k8s snap on the given instance and connects the interfaces. +def setup_k8s_snap(instance: harness.Instance): + LOG.info("Install k8s snap") + instance.exec( + ["snap", "install", "k8s", "--classic", "--channel", config.SNAP_CHANNEL] + ) + + +def purge_k8s_snap(instance: harness.Instance): + LOG.info("Purge k8s snap") + instance.exec( + ["sudo", "snap", "remove", "k8s", "--purge"] + ) + + +# Validates that the K8s node is in Ready state. +def wait_until_k8s_ready( + control_node: harness.Instance, instances: List[harness.Instance] +): + for instance in instances: + host = hostname(instance) + result = ( + stubbornly(retries=15, delay_s=5) + .on(control_node) + .until(lambda p: " Ready" in p.stdout.decode()) + .exec(["k8s", "kubectl", "get", "node", host, "--no-headers"]) + ) + LOG.info("Kubelet registered successfully!") + LOG.info("%s", result.stdout.decode()) + + +def wait_for_dns(instance: harness.Instance): + LOG.info("Waiting for DNS to be ready") + instance.exec(["k8s", "x-wait-for", "dns"]) + + +def wait_for_network(instance: harness.Instance): + LOG.info("Waiting for network to be ready") + instance.exec(["k8s", "x-wait-for", "network"]) + + +def hostname(instance: harness.Instance) -> str: + """Return the hostname for a given instance.""" + resp = instance.exec(["hostname"], capture_output=True) + return resp.stdout.decode().strip() + + +def get_local_node_status(instance: harness.Instance) -> str: + resp = instance.exec(["k8s", "local-node-status"], capture_output=True) + return resp.stdout.decode().strip() + + +def get_nodes(control_node: harness.Instance) -> List[Any]: + """Get a list of existing nodes. + + Args: + control_node: instance on which to execute check + + Returns: + list of nodes + """ + result = control_node.exec( + ["k8s", "kubectl", "get", "nodes", "-o", "json"], capture_output=True + ) + assert result.returncode == 0, "Failed to get nodes with kubectl" + node_list = json.loads(result.stdout.decode()) + assert node_list["kind"] == "List", "Should have found a list of nodes" + return [node for node in node_list["items"]] + + +def ready_nodes(control_node: harness.Instance) -> List[Any]: + """Get a list of the ready nodes. + + Args: + control_node: instance on which to execute check + + Returns: + list of nodes + """ + return [ + node + for node in get_nodes(control_node) + if all( + condition["status"] == "False" + for condition in node["status"]["conditions"] + if condition["type"] != "Ready" + ) + ] + + +# Create a token to join a node to an existing cluster +def get_join_token( + initial_node: harness.Instance, joining_cplane_node: harness.Instance, *args: str +) -> str: + out = initial_node.exec( + ["k8s", "get-join-token", joining_cplane_node.id, *args], + capture_output=True, + ) + return out.stdout.decode().strip() + + +# Join an existing cluster. +def join_cluster(instance: harness.Instance, join_token: str): + instance.exec(["k8s", "join-cluster", join_token]) + + +def get_default_cidr(instance: harness.Instance, instance_default_ip: str): + # ---- + # 1: lo inet 127.0.0.1/8 scope host lo ..... + # 28: eth0 inet 10.42.254.197/24 metric 100 brd 10.42.254.255 scope global dynamic eth0 .... + # ---- + # Fetching the cidr for the default interface by matching with instance ip from the output + p = instance.exec(["ip", "-o", "-f", "inet", "addr", "show"], capture_output=True) + out = p.stdout.decode().split(" ") + return [i for i in out if instance_default_ip in i][0] + + +def get_default_ip(instance: harness.Instance): + # --- + # default via 10.42.254.1 dev eth0 proto dhcp src 10.42.254.197 metric 100 + # --- + # Fetching the default IP address from the output, e.g. 10.42.254.197 + p = instance.exec( + ["ip", "-o", "-4", "route", "show", "to", "default"], capture_output=True + ) + return p.stdout.decode().split(" ")[8] diff --git a/tests/lxd-profile.yaml b/tests/lxd-profile.yaml new file mode 100644 index 0000000..c6a05f3 --- /dev/null +++ b/tests/lxd-profile.yaml @@ -0,0 +1,105 @@ +description: "LXD profile for Canonical Kubernetes" +config: + linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,iptable_raw,netlink_diag,nf_nat,overlay,br_netfilter,xt_socket + raw.lxc: | + lxc.apparmor.profile=unconfined + lxc.mount.auto=proc:rw sys:rw cgroup:rw + lxc.cgroup.devices.allow=a + lxc.cap.drop= + security.nesting: "true" + security.privileged: "true" +devices: + aadisable2: + path: /dev/kmsg + source: /dev/kmsg + type: unix-char + dev-loop-control: + major: "10" + minor: "237" + path: /dev/loop-control + type: unix-char + dev-loop0: + major: "7" + minor: "0" + path: /dev/loop0 + type: unix-block + dev-loop1: + major: "7" + minor: "1" + path: /dev/loop1 + type: unix-block + dev-loop2: + major: "7" + minor: "2" + path: /dev/loop2 + type: unix-block + dev-loop3: + major: "7" + minor: "3" + path: /dev/loop3 + type: unix-block + dev-loop4: + major: "7" + minor: "4" + path: /dev/loop4 + type: unix-block + dev-loop5: + major: "7" + minor: "5" + path: /dev/loop5 + type: unix-block + dev-loop6: + major: "7" + minor: "6" + path: /dev/loop6 + type: unix-block + dev-loop7: + major: "7" + minor: "7" + path: /dev/loop7 + type: unix-block + dev-loop8: + major: "7" + minor: "8" + path: /dev/loop8 + type: unix-block + dev-loop9: + major: "7" + minor: "9" + path: /dev/loop9 + type: unix-block + dev-loop10: + major: "7" + minor: "10" + path: /dev/loop10 + type: unix-block + dev-loop11: + major: "7" + minor: "11" + path: /dev/loop11 + type: unix-block + dev-loop12: + major: "7" + minor: "12" + path: /dev/loop12 + type: unix-block + dev-loop13: + major: "7" + minor: "13" + path: /dev/loop13 + type: unix-block + dev-loop14: + major: "7" + minor: "14" + path: /dev/loop14 + type: unix-block + dev-loop15: + major: "7" + minor: "15" + path: /dev/loop15 + type: unix-block + dev-loop16: + major: "7" + minor: "16" + path: /dev/loop16 + type: unix-block diff --git a/tests/requirements-dev.txt b/tests/requirements-dev.txt new file mode 100644 index 0000000..a66721a --- /dev/null +++ b/tests/requirements-dev.txt @@ -0,0 +1,5 @@ +black==24.3.0 +codespell==2.2.4 +flake8==6.0.0 +isort==5.12.0 +licenseheaders==0.8.8 diff --git a/tests/requirements-test.txt b/tests/requirements-test.txt new file mode 100644 index 0000000..d7b20cf --- /dev/null +++ b/tests/requirements-test.txt @@ -0,0 +1,5 @@ +coverage[toml]==7.2.5 +pytest==7.3.1 +PyYAML==6.0.1 +tenacity==8.2.3 +charmed-kubeflow-chisme>=0.4 diff --git a/tests/sanity/test_multus_v3_8.py b/tests/sanity/test_multus_v3_8.py new file mode 100644 index 0000000..6a41512 --- /dev/null +++ b/tests/sanity/test_multus_v3_8.py @@ -0,0 +1,48 @@ +import subprocess +import os + +from charmed_kubeflow_chisme.rock import CheckRock + + +def ensure_image_contains_paths(image, paths): + for path in paths: + subprocess.run( + [ + "docker", "run", "--rm", image, + "ls", "-l", path + ], + check=True, + ) + + +def test_entrypoint_helpstring(): + image = os.getenv("ROCK_MULTUS_V3_8") + assert image is not None, "ROCK_MULTUS_V3_8 is not set" + docker_run = subprocess.run( + ["docker", "run", "--rm", "--entrypoint", + "/entrypoint.sh", image, "--help"], + capture_output=True, + check=True, + text=True, + ) + assert ( + "This is an entrypoint script for Multus CNI" in docker_run.stdout + ) + + +def test_image_files(): + """Test rock.""" + check_rock = CheckRock( + os.path.dirname(__file__) + "/../../v3.8/rockcraft.yaml") + rock_image = check_rock.get_name() + rock_version = check_rock.get_version() + LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" + + # check rock filesystem + ensure_image_contains_paths( + LOCAL_ROCK_IMAGE, + [ + "/entrypoint.sh", + "/usr/src/multus-cni/bin/multus", + "/usr/src/multus-cni/LICENSE", + ]) diff --git a/v4.0.2/tests/test_rock.py b/tests/sanity/test_multus_v4_0_2.py similarity index 53% rename from v4.0.2/tests/test_rock.py rename to tests/sanity/test_multus_v4_0_2.py index f07f8b4..722231a 100644 --- a/v4.0.2/tests/test_rock.py +++ b/tests/sanity/test_multus_v4_0_2.py @@ -1,36 +1,50 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -# -# -import random -import pytest -import string import subprocess +import os from charmed_kubeflow_chisme.rock import CheckRock -def check_image_paths(image, paths): +def ensure_image_contains_paths(image, paths): for path in paths: subprocess.run( [ - "docker", "run", image, + "docker", "run", "--rm", image, "ls", "-l", path ], check=True, ) -def test_rock(): +def test_entrypoint_helpstring(): + image = os.getenv("ROCK_MULTUS_V4_0_2") + assert image is not None, "ROCK_MULTUS_V4_0_2 is not set" + # "/thin_entrypoint --help" shows the help string but has a + # non-zero exit code (1). + docker_run = subprocess.run( + ["docker", "run", "--rm", "--entrypoint", + "/thin_entrypoint", image, "--help"], + capture_output=True, + check=False, + text=True, + ) + assert ( + "--multus-conf-file string" in docker_run.stderr + ) + + +def test_image_files(): """Test rock.""" - check_rock = CheckRock("rockcraft.yaml") + check_rock = CheckRock( + os.path.dirname(__file__) + "/../../v4.0.2/rockcraft.yaml") rock_image = check_rock.get_name() rock_version = check_rock.get_version() LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" # check rock filesystem - check_image_paths( + ensure_image_contains_paths( LOCAL_ROCK_IMAGE, [ "/install_multus", @@ -42,3 +56,4 @@ def test_rock(): "/usr/src/multus-cni/bin/multus-daemon", "/usr/src/multus-cni/bin/multus-shim", ]) + diff --git a/tests/templates/bootstrap-session.yaml b/tests/templates/bootstrap-session.yaml new file mode 100644 index 0000000..6066e63 --- /dev/null +++ b/tests/templates/bootstrap-session.yaml @@ -0,0 +1,7 @@ +# Contains the bootstrap configuration for the session instance of the integration tests. +# The session instance persists over test runs and is used to speed-up the integration tests. +cluster-config: + network: + enabled: true + dns: + enabled: true diff --git a/tests/tox.ini b/tests/tox.ini new file mode 100644 index 0000000..9b03827 --- /dev/null +++ b/tests/tox.ini @@ -0,0 +1,69 @@ +[tox] +no_package = True +skip_missing_interpreters = True +env_list = format, lint, integration +min_version = 4.0.0 + +[testenv] +set_env = + PYTHONBREAKPOINT=pdb.set_trace + PY_COLORS=1 +pass_env = + PYTHONPATH + +[testenv:format] +description = Apply coding style standards to code +deps = -r {tox_root}/requirements-dev.txt +commands = + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/tests + isort {tox_root}/tests --profile=black + black {tox_root}/tests + +[testenv:lint] +description = Check code against coding style standards +deps = -r {tox_root}/requirements-dev.txt +commands = + codespell {tox_root}/tests + flake8 {tox_root}/tests + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/tests --dry + isort {tox_root}/tests --profile=black --check + black {tox_root}/tests --check --diff + +[testenv:sanity] +description = Run integration tests +deps = + -r {tox_root}/requirements-test.txt +commands = + pytest -v \ + --maxfail 1 \ + --tb native \ + --log-cli-level DEBUG \ + --disable-warnings \ + {posargs} \ + {tox_root}/sanity +pass_env = + TEST_* + ROCK_* + +[testenv:integration] +description = Run integration tests +deps = + -r {tox_root}/requirements-test.txt +commands = + pytest -v \ + --maxfail 1 \ + --tb native \ + --log-cli-level DEBUG \ + --disable-warnings \ + {posargs} \ + {tox_root}/integration +pass_env = + TEST_* + ROCK_* + +[flake8] +max-line-length = 120 +select = E,W,F,C,N +ignore = W503 +exclude = venv,.git,.tox,.tox_env,.venv,build,dist,*.egg_info +show-source = true diff --git a/v3.8/tests/test_rock.py b/v3.8/tests/test_rock.py deleted file mode 100644 index ebf976a..0000000 --- a/v3.8/tests/test_rock.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. -# -# - -import random -import pytest -import string -import subprocess - -from charmed_kubeflow_chisme.rock import CheckRock - - -def check_image_paths(image, paths): - for path in paths: - subprocess.run( - [ - "docker", "run", image, - "ls", "-l", path - ], - check=True, - ) - - -def test_rock(): - """Test rock.""" - check_rock = CheckRock("rockcraft.yaml") - rock_image = check_rock.get_name() - rock_version = check_rock.get_version() - LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" - - # check rock filesystem - check_image_paths( - LOCAL_ROCK_IMAGE, - [ - "/entrypoint.sh", - "/usr/src/multus-cni/bin/multus", - "/usr/src/multus-cni/LICENSE", - ]) diff --git a/v3.8/tox.ini b/v3.8/tox.ini deleted file mode 100644 index 22e7a79..0000000 --- a/v3.8/tox.ini +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. -[tox] -skipsdist = True -skip_missing_interpreters = True -envlist = pack, export-to-docker, sanity - -[testenv] -setenv = - PYTHONPATH={toxinidir} - PYTHONBREAKPOINT=ipdb.set_trace - -[testenv:pack] -passenv = * -allowlist_externals = - rockcraft -commands = - rockcraft pack -v - -[testenv:export-to-docker] -passenv = * -allowlist_externals = - bash - skopeo - yq -commands = - # export already packed rock to docker - bash -c 'NAME="$(yq -r .name rockcraft.yaml)" && \ - VERSION="$(yq -r .version rockcraft.yaml)" && \ - ARCH="$(yq -r ".platforms | keys | .[0]" rockcraft.yaml)" && \ - ROCK="$\{NAME\}_$\{VERSION\}_$\{ARCH\}.rock" && \ - DOCKER_IMAGE=$NAME:$VERSION && \\ - echo "Exporting $ROCK to docker as $DOCKER_IMAGE" && \ - rockcraft.skopeo --insecure-policy copy \ - oci-archive:$ROCK docker-daemon:$DOCKER_IMAGE' - -[testenv:sanity] -passenv = * -deps = - pytest - charmed-kubeflow-chisme -allowlist_externals = - echo -commands = - # run rock tests - pytest -v --tb native --show-capture=all --log-cli-level=INFO {posargs} {toxinidir}/tests diff --git a/v4.0.2/tox.ini b/v4.0.2/tox.ini deleted file mode 100644 index 22e7a79..0000000 --- a/v4.0.2/tox.ini +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. -[tox] -skipsdist = True -skip_missing_interpreters = True -envlist = pack, export-to-docker, sanity - -[testenv] -setenv = - PYTHONPATH={toxinidir} - PYTHONBREAKPOINT=ipdb.set_trace - -[testenv:pack] -passenv = * -allowlist_externals = - rockcraft -commands = - rockcraft pack -v - -[testenv:export-to-docker] -passenv = * -allowlist_externals = - bash - skopeo - yq -commands = - # export already packed rock to docker - bash -c 'NAME="$(yq -r .name rockcraft.yaml)" && \ - VERSION="$(yq -r .version rockcraft.yaml)" && \ - ARCH="$(yq -r ".platforms | keys | .[0]" rockcraft.yaml)" && \ - ROCK="$\{NAME\}_$\{VERSION\}_$\{ARCH\}.rock" && \ - DOCKER_IMAGE=$NAME:$VERSION && \\ - echo "Exporting $ROCK to docker as $DOCKER_IMAGE" && \ - rockcraft.skopeo --insecure-policy copy \ - oci-archive:$ROCK docker-daemon:$DOCKER_IMAGE' - -[testenv:sanity] -passenv = * -deps = - pytest - charmed-kubeflow-chisme -allowlist_externals = - echo -commands = - # run rock tests - pytest -v --tb native --show-capture=all --log-cli-level=INFO {posargs} {toxinidir}/tests