diff --git a/.github/workflows/update-libs.yaml b/.github/workflows/update-libs.yaml new file mode 100644 index 00000000..8828a8b1 --- /dev/null +++ b/.github/workflows/update-libs.yaml @@ -0,0 +1,14 @@ +name: Auto-update Charm Libraries +on: + # Manual trigger + workflow_dispatch: + # Check regularly the upstream every four hours + schedule: + - cron: "0 0,4,8,12,16,20 * * *" + +jobs: + update-lib: + name: Check libraries + uses: canonical/observability/.github/workflows/update-libs.yaml@main + secrets: inherit + diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index c5d590a4..bf3a977e 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -218,7 +218,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 19 +LIBPATCH = 21 logger = logging.getLogger(__name__) @@ -608,70 +608,32 @@ def _replace_template_fields( # noqa: C901 If existing datasource variables are present, try to substitute them. """ replacements = {"loki": "${lokids}", "prometheus": "${prometheusds}"} - used_replacements = [] + used_replacements = [] # type: List[str] # If any existing datasources match types we know, or we didn't find # any templating variables at all, template them. if datasources or not existing_templates: - panels = dict_content["panels"] + panels = dict_content.get("panels", {}) + if panels: + dict_content["panels"] = _template_panels( + panels, replacements, used_replacements, existing_templates, datasources + ) - # Go through all the panels. If they have a datasource set, AND it's one - # that we can convert to ${lokids} or ${prometheusds}, by stripping off the - # ${} templating and comparing the name to the list we built, replace it, - # otherwise, leave it alone. - # - # COS only knows about Prometheus and Loki. - for panel in panels: - if "datasource" not in panel or not panel.get("datasource"): - continue - if not existing_templates: - datasource = panel.get("datasource") - if type(datasource) == str: - if "loki" in datasource: - panel["datasource"] = "${lokids}" - else: - panel["datasource"] = "${prometheusds}" - elif type(datasource) == dict: - # In dashboards exported by Grafana 9, datasource type is dict - dstype = datasource.get("type", "") - if dstype == "loki": - panel["datasource"]["uid"] = "${lokids}" - elif dstype == "prometheus": - panel["datasource"]["uid"] = "${prometheusds}" - else: - logger.debug("Unrecognized datasource type '%s'; skipping", dstype) - continue - else: - logger.error("Unknown datasource format: skipping") - continue - else: - if type(panel["datasource"]) == str: - if panel["datasource"].lower() in replacements.values(): - # Already a known template variable - continue - # Strip out variable characters and maybe braces - ds = re.sub(r"(\$|\{|\})", "", panel["datasource"]) - replacement = replacements.get(datasources[ds], "") - if replacement: - used_replacements.append(ds) - panel["datasource"] = replacement or panel["datasource"] - elif type(panel["datasource"]) == dict: - dstype = panel["datasource"].get("type", "") - if panel["datasource"].get("uid", "").lower() in replacements.values(): - # Already a known template variable - continue - # Strip out variable characters and maybe braces - ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", "")) - replacement = replacements.get(datasources[ds], "") - if replacement: - used_replacements.append(ds) - panel["datasource"]["uid"] = replacement - else: - logger.error("Unknown datasource format: skipping") - continue + # Find panels nested under rows + rows = dict_content.get("rows", {}) + if rows: + + for row_idx, row in enumerate(rows): + if "panels" in row.keys(): + rows[row_idx]["panels"] = _template_panels( + row["panels"], + replacements, + used_replacements, + existing_templates, + datasources, + ) - # Put our substitutions back - dict_content["panels"] = panels + dict_content["rows"] = rows # Finally, go back and pop off the templates we stubbed out deletions = [] @@ -685,6 +647,82 @@ def _replace_template_fields( # noqa: C901 return dict_content +def _template_panels( + panels: dict, + replacements: dict, + used_replacements: list, + existing_templates: bool, + datasources: dict, +) -> dict: + """Iterate through a `panels` object and template it appropriately.""" + # Go through all the panels. If they have a datasource set, AND it's one + # that we can convert to ${lokids} or ${prometheusds}, by stripping off the + # ${} templating and comparing the name to the list we built, replace it, + # otherwise, leave it alone. + # + for panel in panels: + if "datasource" not in panel or not panel.get("datasource"): + continue + if not existing_templates: + datasource = panel.get("datasource") + if type(datasource) == str: + if "loki" in datasource: + panel["datasource"] = "${lokids}" + elif "grafana" in datasource: + continue + else: + panel["datasource"] = "${prometheusds}" + elif type(datasource) == dict: + # In dashboards exported by Grafana 9, datasource type is dict + dstype = datasource.get("type", "") + if dstype == "loki": + panel["datasource"]["uid"] = "${lokids}" + elif dstype == "prometheus": + panel["datasource"]["uid"] = "${prometheusds}" + else: + logger.debug("Unrecognized datasource type '%s'; skipping", dstype) + continue + else: + logger.error("Unknown datasource format: skipping") + continue + else: + if type(panel["datasource"]) == str: + if panel["datasource"].lower() in replacements.values(): + # Already a known template variable + continue + # Strip out variable characters and maybe braces + ds = re.sub(r"(\$|\{|\})", "", panel["datasource"]) + + if ds not in datasources.keys(): + # Unknown, non-templated datasource, potentially a Grafana builtin + continue + + replacement = replacements.get(datasources[ds], "") + if replacement: + used_replacements.append(ds) + panel["datasource"] = replacement or panel["datasource"] + elif type(panel["datasource"]) == dict: + dstype = panel["datasource"].get("type", "") + if panel["datasource"].get("uid", "").lower() in replacements.values(): + # Already a known template variable + continue + # Strip out variable characters and maybe braces + ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", "")) + + if ds not in datasources.keys(): + # Unknown, non-templated datasource, potentially a Grafana builtin + continue + + replacement = replacements.get(datasources[ds], "") + if replacement: + used_replacements.append(ds) + panel["datasource"]["uid"] = replacement + else: + logger.error("Unknown datasource format: skipping") + continue + return panels + + def _inject_labels(content: str, topology: dict, transformer: "CosTool") -> str: """Inject Juju topology into panel expressions via CosTool. diff --git a/lib/charms/observability_libs/v0/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py similarity index 65% rename from lib/charms/observability_libs/v0/kubernetes_service_patch.py rename to lib/charms/observability_libs/v1/kubernetes_service_patch.py index a3fb9109..b4587956 100644 --- a/lib/charms/observability_libs/v0/kubernetes_service_patch.py +++ b/lib/charms/observability_libs/v1/kubernetes_service_patch.py @@ -9,21 +9,20 @@ default contains a "placeholder" port, which is 65536/TCP. When modifying the default set of resources managed by Juju, one must consider the lifecycle of the -charm. In this case, any modifications to the default service (created during deployment), will -be overwritten during a charm upgrade. +charm. In this case, any modifications to the default service (created during deployment), will be +overwritten during a charm upgrade. When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` events which applies the patch to the cluster. This should ensure that the service ports are correct throughout the charm's life. -The constructor simply takes a reference to the parent charm, and a list of tuples that each define -a port for the service, where each tuple contains: +The constructor simply takes a reference to the parent charm, and a list of +[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the +service. For information regarding the `lightkube` `ServicePort` model, please visit the +`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport). -- a name for the port -- port for the service to listen on -- optionally: a targetPort for the service (the port in the container!) -- optionally: a nodePort for the service (for NodePort or LoadBalancer services only!) -- optionally: a name of the service (in case service name needs to be patched as well) +Optionally, a name of the service (in case service name needs to be patched as well), labels, +selectors, and annotations can be provided as keyword arguments. ## Getting Started @@ -32,8 +31,8 @@ ```shell cd some-charm -charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch -echo <<-EOF >> requirements.txt +charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch +cat << EOF >> requirements.txt lightkube lightkube-models EOF @@ -41,28 +40,71 @@ Then, to initialise the library: -For ClusterIP services: +For `ClusterIP` services: + ```python # ... -from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort class SomeCharm(CharmBase): def __init__(self, *args): # ... - self.service_patcher = KubernetesServicePatch(self, [(f"{self.app.name}", 8080)]) + port = ServicePort(443, name=f"{self.app.name}") + self.service_patcher = KubernetesServicePatch(self, [port]) # ... ``` -For LoadBalancer/NodePort services: +For `LoadBalancer`/`NodePort` services: + ```python # ... -from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort class SomeCharm(CharmBase): def __init__(self, *args): # ... + port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666) self.service_patcher = KubernetesServicePatch( - self, [(f"{self.app.name}", 443, 443, 30666)], "LoadBalancer" + self, [port], "LoadBalancer" + ) + # ... +``` + +Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"` + +```python +# ... +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP") + udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP") + sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP") + self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp]) + # ... +``` + +Bound with custom events by providing `refresh_event` argument: +For example, you would like to have a configurable port in your charm and want to apply +service patch every time charm config is changed. + +```python +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") + self.service_patcher = KubernetesServicePatch( + self, + [port], + refresh_event=self.on.config_changed ) # ... ``` @@ -83,15 +125,16 @@ def setUp(self, *unused): import logging from types import MethodType -from typing import Literal, Sequence, Tuple, Union +from typing import List, Literal, Optional, Union from lightkube import ApiError, Client +from lightkube.core import exceptions from lightkube.models.core_v1 import ServicePort, ServiceSpec from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.core_v1 import Service from lightkube.types import PatchType from ops.charm import CharmBase -from ops.framework import Object +from ops.framework import BoundEvent, Object logger = logging.getLogger(__name__) @@ -99,13 +142,12 @@ def setUp(self, *unused): LIBID = "0042f86d0a874435adef581806cddbbb" # Increment this major API version when introducing breaking changes -LIBAPI = 0 +LIBAPI = 1 # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 6 +LIBPATCH = 5 -PortDefinition = Union[Tuple[str, int], Tuple[str, int, int], Tuple[str, int, int, int]] ServiceType = Literal["ClusterIP", "LoadBalancer"] @@ -115,18 +157,20 @@ class KubernetesServicePatch(Object): def __init__( self, charm: CharmBase, - ports: Sequence[PortDefinition], - service_name: str = None, + ports: List[ServicePort], + service_name: Optional[str] = None, service_type: ServiceType = "ClusterIP", - additional_labels: dict = None, - additional_selectors: dict = None, - additional_annotations: dict = None, + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, + *, + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, ): """Constructor for KubernetesServicePatch. Args: charm: the charm that is instantiating the library. - ports: a list of tuples (name, port, targetPort, nodePort) for every service port. + ports: a list of ServicePorts service_name: allows setting custom name to the patched service. If none given, application name will be used. service_type: desired type of K8s service. Default value is in line with ServiceSpec's @@ -136,6 +180,9 @@ def __init__( additional_selectors: Selectors to be added to the kubernetes service (by default only "app.kubernetes.io/name" is set to the service name) additional_annotations: Annotations to be added to the kubernetes service. + refresh_event: an optional bound event or list of bound events which + will be observed to re-apply the patch (e.g. on port change). + The `install` and `upgrade-charm` events would be observed regardless. """ super().__init__(charm, "kubernetes-service-patch") self.charm = charm @@ -155,22 +202,27 @@ def __init__( self.framework.observe(charm.on.install, self._patch) self.framework.observe(charm.on.upgrade_charm, self._patch) + # apply user defined events + if refresh_event: + if not isinstance(refresh_event, list): + refresh_event = [refresh_event] + + for evt in refresh_event: + self.framework.observe(evt, self._patch) + def _service_object( self, - ports: Sequence[PortDefinition], - service_name: str = None, + ports: List[ServicePort], + service_name: Optional[str] = None, service_type: ServiceType = "ClusterIP", - additional_labels: dict = None, - additional_selectors: dict = None, - additional_annotations: dict = None, + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, ) -> Service: """Creates a valid Service representation. Args: - ports: a list of tuples of the form (name, port) or (name, port, targetPort) - or (name, port, targetPort, nodePort) for every service port. If the 'targetPort' - is omitted, it is assumed to be equal to 'port', with the exception of NodePort - and LoadBalancer services, where all port numbers have to be specified. + ports: a list of ServicePorts service_name: allows setting custom name to the patched service. If none given, application name will be used. service_type: desired type of K8s service. Default value is in line with ServiceSpec's @@ -203,15 +255,7 @@ def _service_object( ), spec=ServiceSpec( selector=selector, - ports=[ - ServicePort( - name=p[0], - port=p[1], - targetPort=p[2] if len(p) > 2 else p[1], # type: ignore[misc] - nodePort=p[3] if len(p) > 3 else None, # type: ignore[arg-type, misc] - ) - for p in ports - ], + ports=ports, type=service_type, ), ) @@ -222,11 +266,15 @@ def _patch(self, _) -> None: Raises: PatchFailed: if patching fails due to lack of permissions, or otherwise. """ - if not self.charm.unit.is_leader(): + try: + client = Client() + except exceptions.ConfigError as e: + logger.warning("Error creating k8s client: %s", e) return - client = Client() try: + if self._is_patched(client): + return if self.service_name != self._app: self._delete_and_create_service(client) client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) @@ -252,12 +300,25 @@ def is_patched(self) -> bool: bool: A boolean indicating if the service patch has been applied. """ client = Client() + return self._is_patched(client) + + def _is_patched(self, client: Client) -> bool: # Get the relevant service from the cluster - service = client.get(Service, name=self.service_name, namespace=self._namespace) + try: + service = client.get(Service, name=self.service_name, namespace=self._namespace) + except ApiError as e: + if e.status.code == 404 and self.service_name != self._app: + return False + else: + logger.error("Kubernetes service get failed: %s", str(e)) + raise + # Construct a list of expected ports, should the patch be applied expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # Construct a list in the same manner, using the fetched service - fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501 + fetched_ports = [ + (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] + ] # noqa: E501 return expected_ports == fetched_ports @property diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index 2aa5779d..f080fb84 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -1,6 +1,6 @@ # Copyright 2021 Canonical Ltd. # See LICENSE file for licensing details. -"""Source code can be found on GitHub at canonical/observability-libs/lib/charms/observability_libs. +"""Prometheus Scrape Library. ## Overview @@ -13,6 +13,11 @@ shared between Prometheus charms and any other charm that intends to provide a scrape target for Prometheus. +## Source code + +Source code can be found on GitHub at: + https://github.com/canonical/prometheus-k8s-operator/tree/main/lib/charms/prometheus_k8s + ## Dependencies Using this library requires you to fetch the juju_topology library from @@ -354,7 +359,7 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 26 +LIBPATCH = 27 logger = logging.getLogger(__name__) diff --git a/lib/charms/traefik_k8s/v1/ingress.py b/lib/charms/traefik_k8s/v1/ingress.py index 69008a73..898b609d 100644 --- a/lib/charms/traefik_k8s/v1/ingress.py +++ b/lib/charms/traefik_k8s/v1/ingress.py @@ -69,7 +69,7 @@ def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 7 +LIBPATCH = 8 DEFAULT_RELATION_NAME = "ingress" RELATION_INTERFACE = "ingress" @@ -559,6 +559,6 @@ def url(self) -> Optional[str]: Returns None if the URL isn't available yet. """ - data = self._stored.current_url or None # type: ignore + data = self._stored.current_url or self._get_url_from_relation_data() # type: ignore assert isinstance(data, (str, type(None))) # for static checker return data diff --git a/src/charm.py b/src/charm.py index bc405fcd..471e44c7 100755 --- a/src/charm.py +++ b/src/charm.py @@ -27,7 +27,10 @@ ResourceRequirements, adjust_resource_requirements, ) -from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch +from charms.observability_libs.v1.kubernetes_service_patch import ( + KubernetesServicePatch, + ServicePort, +) from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider from charms.traefik_k8s.v1.ingress import ( IngressPerAppReadyEvent, @@ -124,8 +127,8 @@ def __init__(self, *args): self.service_patcher = KubernetesServicePatch( self, [ - (f"{self.app.name}", self._ports.api, self._ports.api), - (f"{self.app.name}-ha", self._ports.ha, self._ports.ha), + ServicePort(self._ports.api, f"{self.app.name}"), + ServicePort(self._ports.ha, f"{self.app.name}-ha"), ], ) self.resources_patch = KubernetesComputeResourcesPatch( diff --git a/tests/unit/test_external_url.py b/tests/unit/test_external_url.py index cdbdb428..c25c56db 100644 --- a/tests/unit/test_external_url.py +++ b/tests/unit/test_external_url.py @@ -128,14 +128,17 @@ def test_config_option_overrides_traefik(self): self.assertEqual(self.get_url_cli_arg(), external_url_ingress) self.assertTrue(self.is_service_running()) + # NOTE intentionally not emptying out relation data manually + # FIXME: figure out if we do or do not need to manually empty out relation-data + # before relation-broken is emitted. + # https://github.com/canonical/operator/issues/888 + app_data = {"ingress": ""} + self.harness.update_relation_data(rel_id, "traefik-app", app_data) + # AND WHEN the traefik relation is removed self.harness.remove_relation_unit(rel_id, "traefik-app/0") self.harness.remove_relation(rel_id) - # NOTE intentionally not emptying out relation data manually - # app_data = {"ingress": ""} - # self.harness.update_relation_data(rel_id, "traefik-app", app_data) - # THEN the fqdn is used as external url self.assertEqual(self.get_url_cli_arg(), self.fqdn_url) diff --git a/tox.ini b/tox.ini index b73e4a0b..88741ee7 100644 --- a/tox.ini +++ b/tox.ini @@ -69,7 +69,7 @@ deps = types-setuptools types-toml charm: -r{toxinidir}/requirements.txt - lib: git+https://github.com/canonical/operator#egg=ops + lib: ops unit: {[testenv:unit]deps} integration: {[testenv:integration]deps} commands =