diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index dda2f12..287a3f2 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,6 +1,6 @@ # This is a dockerfile specifically for running as a devcontainer FROM mcr.microsoft.com/oss/go/microsoft/golang:1.22-fips-cbl-mariner2.0 -RUN tdnf update -y && tdnf install make -y && tdnf install git -y && tdnf install gawk -y +RUN tdnf update -y && tdnf install make -y && tdnf install git -y && tdnf install gawk -y && tdnf install jq -y RUN go install github.com/cweill/gotests/gotests@latest && \ go install github.com/fatih/gomodifytags@latest && \ diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 364aa05..bd3777e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: Release Docker Image +name: Release on: push: @@ -49,3 +49,23 @@ jobs: if: ${{ success() }} run: | docker push ${{ env.REGISTRY }}/${{ env.REPO_PREFIX }}peerd:${{ steps.get_image_tag.outputs.docker_tag }} + + ci: + name: Run AKS CI + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write # This is required for requesting the JWT from AAD. + env: + TAG: ${{ steps.publish.outputs.get_image_tag.outputs.docker_tag }} + steps: + - name: 'Az CLI login' + uses: azure/login@v1 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: 'Make' + run: | + PEERD_IMAGE_TAG=${{ env.TAG }} make tests-ci-aks diff --git a/README.md b/README.md index 132fb5c..e0fd517 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,10 @@ [![Build Status]][build-status] [![Kind CI Status]][kind-ci-status] -[![Docker Release CI]][release-ci] +[![Release CI]][release-ci] [![CodeQL]][code-ql] [![Go Report Card]][go-report-card] +[![Scorecard supply-chain security]][scorecard-supply-chain-security] ![cluster-ops] @@ -264,7 +265,7 @@ integration with [Overlaybd]. [build-status]: https://github.com/azure/peerd/actions/workflows/build.yml [Kind CI Status]: https://github.com/azure/peerd/actions/workflows/kind.yml/badge.svg [kind-ci-status]: https://github.com/azure/peerd/actions/workflows/kind.yml -[Docker Release CI]: https://github.com/azure/peerd/actions/workflows/release.yml/badge.svg +[Release CI]: https://github.com/azure/peerd/actions/workflows/release.yml/badge.svg [release-ci]: https://github.com/azure/peerd/actions/workflows/release.yml [Code Coverage]: https://img.shields.io/badge/coverage-54.9%25-orange [node-arch]: ./assets/images/http-flow.png @@ -288,3 +289,5 @@ integration with [Overlaybd]. [white paper]: https://pdos.csail.mit.edu/~petar/papers/maymounkov-kademlia-lncs.pdf [design-doc]: ./docs/design.md [cluster-ops]: ./assets/images//cluster-ops.gif +[Scorecard supply-chain security]: https://github.com/Azure/peerd/actions/workflows/scorecard.yml/badge.svg +[scorecard-supply-chain-security]: https://github.com/Azure/peerd/actions/workflows/scorecard.yml diff --git a/build/ci/k8s/azure-cli.yml b/build/ci/k8s/azure-cli.yml new file mode 100644 index 0000000..ac061f7 --- /dev/null +++ b/build/ci/k8s/azure-cli.yml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-cli-daemonset + namespace: peerd-ns +spec: + selector: + matchLabels: + app: peerd-test + template: + metadata: + labels: + app: peerd-test + spec: + initContainers: + - name: sleep + image: busybox + command: ["sh", "-c", "sleep $(shuf -i 1-10 -n 1)"] + containers: + - name: azure-cli + image: mcr.microsoft.com/azure-cli:latest + imagePullPolicy: Always diff --git a/build/ci/scripts/kind.sh b/build/ci/scripts/kind.sh index e261da9..c845feb 100755 --- a/build/ci/scripts/kind.sh +++ b/build/ci/scripts/kind.sh @@ -180,10 +180,10 @@ wait_for_events() { for pod in $( echo "$pods" | tr -s " " "\012" ); do echo "checking pod '$pod' for event '$event'" - foundEvent=$(kubectl --context=$context get events --field-selector involvedObject.kind=Pod,involvedObject.name=$pod -o json | jq -r ".items[] | select(.reason == \"$event\")") + foundEvent=$(kubectl --context=$context -n $ns get events --field-selector involvedObject.kind=Pod,involvedObject.name=$pod -o json | jq -r ".items[] | select(.reason == \"$event\")") [[ "$foundEvent" == "" ]] && echo "Event '$event' not found for pod '$pod'" || found=$((found+1)) - errorEvent=$(kubectl --context=$context get events --field-selector involvedObject.kind=Pod,involvedObject.name=$pod -o json | jq -r '.items[] | select(.reason == "P2PDisconnected" or .resosn == "P2PFailed")') + errorEvent=$(kubectl --context=$context -n $ns get events --field-selector involvedObject.kind=Pod,involvedObject.name=$pod -o json | jq -r '.items[] | select(.reason == "P2PDisconnected" or .resosn == "P2PFailed")') [[ "$errorEvent" == "" ]] || (echo "Error event found for pod '$pod': $errorEvent" && exit 1) done diff --git a/internal/context/context.go b/internal/context/context.go index 7832d9a..e1ffdec 100644 --- a/internal/context/context.go +++ b/internal/context/context.go @@ -61,7 +61,7 @@ var ( NodeName, _ = os.Hostname() Namespace = "peerd-ns" - // KubeConfigPath is the path of the kubeconfig file, which is used if run in an environment outside a pod. + // KubeConfigPath is the path of the kubeconfig file. KubeConfigPath = "/opt/peerd/kubeconfig" ) diff --git a/internal/k8s/events/events.go b/internal/k8s/events/events.go index cef8cf7..2f79946 100644 --- a/internal/k8s/events/events.go +++ b/internal/k8s/events/events.go @@ -54,6 +54,7 @@ func NewRecorder(ctx context.Context) (EventRecorder, error) { objRef = &v1.ObjectReference{ Kind: "Pod", Name: pod.Name, + Namespace: pod.Namespace, UID: pod.UID, APIVersion: pod.APIVersion, } @@ -61,14 +62,14 @@ func NewRecorder(ctx context.Context) (EventRecorder, error) { broadcaster := record.NewBroadcaster() broadcaster.StartStructuredLogging(4) - broadcaster.StartRecordingToSink(&typedv1core.EventSinkImpl{Interface: clientset.CoreV1().Events("")}) + broadcaster.StartRecordingToSink(&typedv1core.EventSinkImpl{Interface: clientset.CoreV1().Events(p2pcontext.Namespace)}) return &eventRecorder{ recorder: broadcaster.NewRecorder( scheme.Scheme, v1.EventSource{}, ), - nodeRef: objRef, + objRef: objRef, }, nil } @@ -89,32 +90,32 @@ func FromContext(ctx context.Context) EventRecorder { type eventRecorder struct { recorder record.EventRecorder - nodeRef *v1.ObjectReference + objRef *v1.ObjectReference } -// Active should be called to indicate that the node is active in the cluster. +// Active should be called to indicate that the instance is active in the cluster. func (er *eventRecorder) Active() { - er.recorder.Eventf(er.nodeRef, v1.EventTypeNormal, "P2PActive", "P2P proxy is active on node %s", er.nodeRef.Name) + er.recorder.Eventf(er.objRef, v1.EventTypeNormal, "P2PActive", "P2P proxy is active on instance %s", er.objRef.Name) } -// Connected should be called to indicate that the node is connected to the cluster. +// Connected should be called to indicate that the instance is connected to the cluster. func (er *eventRecorder) Connected() { - er.recorder.Eventf(er.nodeRef, v1.EventTypeNormal, "P2PConnected", "P2P proxy is connected to cluster on node %s", er.nodeRef.Name) + er.recorder.Eventf(er.objRef, v1.EventTypeNormal, "P2PConnected", "P2P proxy is connected to cluster on instance %s", er.objRef.Name) } -// Disconnected should be called to indicate that the node is disconnected from the cluster. +// Disconnected should be called to indicate that the instance is disconnected from the cluster. func (er *eventRecorder) Disconnected() { - er.recorder.Eventf(er.nodeRef, v1.EventTypeWarning, "P2PDisconnected", "P2P proxy is disconnected from cluster on node %s", er.nodeRef.Name) + er.recorder.Eventf(er.objRef, v1.EventTypeWarning, "P2PDisconnected", "P2P proxy is disconnected from cluster on instance %s", er.objRef.Name) } -// Failed should be called to indicate that the node has failed. +// Failed should be called to indicate that the instance has failed. func (er *eventRecorder) Failed() { - er.recorder.Eventf(er.nodeRef, v1.EventTypeWarning, "P2PFailed", "P2P proxy failed on node %s", er.nodeRef.Name) + er.recorder.Eventf(er.objRef, v1.EventTypeWarning, "P2PFailed", "P2P proxy failed on instance %s", er.objRef.Name) } -// Initializing should be called to indicate that the node is initializing. +// Initializing should be called to indicate that the instance is initializing. func (er *eventRecorder) Initializing() { - er.recorder.Eventf(er.nodeRef, v1.EventTypeNormal, "P2PInitializing", "P2P proxy is initializing on node %s", er.nodeRef.Name) + er.recorder.Eventf(er.objRef, v1.EventTypeNormal, "P2PInitializing", "P2P proxy is initializing on instance %s", er.objRef.Name) } var _ EventRecorder = &eventRecorder{} diff --git a/internal/k8s/events/events_test.go b/internal/k8s/events/events_test.go index 584a8e1..5dbe982 100644 --- a/internal/k8s/events/events_test.go +++ b/internal/k8s/events/events_test.go @@ -14,7 +14,7 @@ import ( func TestExpectedEvents(t *testing.T) { er := &eventRecorder{ recorder: &testRecorder{t}, - nodeRef: &v1.ObjectReference{ + objRef: &v1.ObjectReference{ Kind: "Node", Name: "node-name", UID: "node.UID", @@ -32,7 +32,7 @@ func TestExpectedEvents(t *testing.T) { func TestFromContext(t *testing.T) { er := &eventRecorder{ recorder: &testRecorder{t}, - nodeRef: &v1.ObjectReference{ + objRef: &v1.ObjectReference{ Kind: "Node", Name: "node-name", UID: "node.UID", diff --git a/internal/metrics/memory.go b/internal/metrics/memory.go index de5b708..f381ed0 100644 --- a/internal/metrics/memory.go +++ b/internal/metrics/memory.go @@ -12,7 +12,7 @@ import ( var ( // Path is the default path to write metrics. - Path = "/var/log/p2pmetrics" + Path = "/var/log/peerdmetrics" // ReportInterval is the interval to report metrics. ReportInterval = 3 * time.Minute diff --git a/internal/routing/router.go b/internal/routing/router.go index 3a4f0f4..c769fd2 100644 --- a/internal/routing/router.go +++ b/internal/routing/router.go @@ -210,7 +210,7 @@ func (r *router) Resolve(ctx context.Context, key string, allowSelf bool, count // Advertise advertises the given keys to the network. func (r *router) Advertise(ctx context.Context, keys []string) error { - zerolog.Ctx(ctx).Debug().Str("host", r.host.ID().String()).Strs("keys", keys).Msg("advertising keys") + zerolog.Ctx(ctx).Trace().Str("host", r.host.ID().String()).Strs("keys", keys).Msg("advertising keys") for _, key := range keys { c, err := createCid(key) if err != nil { diff --git a/scripts/azure.sh b/scripts/azure.sh new file mode 100755 index 0000000..5c32991 --- /dev/null +++ b/scripts/azure.sh @@ -0,0 +1,253 @@ +#!/bin/bash +set -e + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source $SCRIPT_DIR/env.sh + +PEERD_HELM_CHART="$SCRIPT_DIR/../build/ci/k8s/peerd-helm" +TESTS_AZURE_CLI_DEPLOY_TEMPLATE=$SCRIPT_DIR/../build/ci/k8s/azure-cli.yml + +show_help() { + usageStr=" +Usage: $(basename $0) [OPTIONS] + +This script is used for deploying apps to an AKS cluster for testing purposes. + +Options: + -h Show help + -y Confirm execution, otherwise, it's a dry-run + +Sub commands: + nodepool + up + delete + + init + random + + run + dotnet + +* dry run: create nodepool called 'nodepool1' and install the peerd proxy + $(basename $0) nodepool up nodepool1 + +* confirm: create nodepool called 'nodepool1' and install the peerd proxy + $(basename $0) nodepool up -y nodepool1 + +* dry run: delete nodepool 'nodepool1' + $(basename $0) delete nodepool 'nodepool1' + +* confirm: delete nodepool 'nodepool1' + $(basename $0) nodepool delete -y 'nodepool1' + +* dry run: runs the ctr test on 'nodepool1' + $(basename $0) test ctr 'nodepool1' + +* confirm: run the ctr test on 'nodepool1' + $(basename $0) test ctr -y 'nodepool1' +" + echo "$usageStr" +} + +get_aks_credentials() { + local cluster=$1 + local rg=$2 + + az aks get-credentials --resource-group $rg --name $cluster --overwrite-existing && \ + kubelogin convert-kubeconfig -l azurecli && \ + kubectl cluster-info +} + +nodepool_deploy() { + local aksName=$1 + local rg=$2 + local nodepool=$3 + + if [ "$DRY_RUN" == "false" ]; then + echo "creating nodepool '$nodepool' in aks cluster '$aksName' in resource group '$rg'" && \ + az aks nodepool add --cluster-name $aksName --name $nodepool --resource-group $rg \ + --mode User --labels "p2p-nodepool=$nodepool" --node-count 3 --node-vm-size Standard_D2s_v3 + else + echo "[dry run] would have deployed nodepool '$nodepool' to aks cluster '$aksName' in resource group '$rg'" + fi + +} + +peerd_helm_deploy() { + local nodepool=$1 + local peerd_image_tag=$2 + + ensure_azure_token + + echo "deploying peerd to k8s cluster, chart: '$PEERD_HELM_CHART', tag: '$peerd_image_tag'" && \ + kubectl cluster-info + + if [ "$DRY_RUN" == "false" ]; then + HELM_RELEASE_NAME=peerd && \ + helm install --wait $HELM_RELEASE_NAME $PEERD_HELM_CHART \ + --set "peerd.image.ref=ghcr.io/azure/acr/dev/peerd:$peerd_image_tag" + else + echo "[dry run] would have deployed app to k8s cluster" + fi + + print_and_exit_if_dry_run +} + +wait_for_peerd_pods() { + local cluster=$1 + local rg=$2 + local nodepool=$3 + local event=$4 + local minimumRequired=$5 + + local found=0 + + # Get the list of pods. + pods=$(kubectl -n peerd-ns get pods -l app=peerd -o jsonpath='{.items[*].metadata.name}') + echo "pods: $pods" + total=`echo "$pods" | tr -s " " "\012" | wc -l` + + if [ -z "$minimumRequired" ]; then + minimumRequired=$total + fi + + # Loop until all pods are connected or an error occurs. + for ((i=1; i<=100; i++)); do + # Initialize a counter for connected pods. + found=0 + + # Loop through each pod. + for pod in $( echo "$pods" | tr -s " " "\012" ); do + echo "checking pod '$pod' for event '$event'" + + foundEvent=$(kubectl -n peerd-ns get events --field-selector involvedObject.kind=Pod,involvedObject.name=$pod -o json | jq -r ".items[] | select(.reason == \"$event\")") + [[ "$foundEvent" == "" ]] && echo "Event '$event' not found for pod '$pod'" || found=$((found+1)) + + errorEvent=$(kubectl -n peerd-ns get events --field-selector involvedObject.kind=Pod,involvedObject.name=$pod -o json | jq -r '.items[] | select(.reason == "P2PDisconnected" or .resosn == "P2PFailed")') + [[ "$errorEvent" == "" ]] || (echo "Error event found for pod '$pod': $errorEvent" && exit 1) + done + + if [ $found -eq $total ]; then + echo "Success: All pods have event '$event'." + break + else + echo "Waiting: $found out of $total pods have event '$event'. Attempt $i of 100." + sleep 15 + fi + done + + if [ $found -eq $total ]; then + return + elif [ $found -ge $minimumRequired ]; then + echo "Warning: only $found out of $total pods have event '$event', but it meets the minimum criteria of $minimumRequired." + return + else + echo "Validation failed" + exit 1 + fi +} + +print_peerd_metrics() { + p=$(kubectl -n peerd-ns get pods -l app=peerd -o jsonpath='{.items[*].metadata.name}') + echo "pods: $p" + + for pod in $( echo "$p" | tr -s " " "\012" ); do + echo "checking pod '$pod' for metrics" + kubectl -n peerd-ns exec -i $pod -- bash -c "cat /var/log/peerdmetrics" + done +} + +cmd__nodepool__delete() { + local aksName=$AKS_NAME + local rg=$RESOURCE_GROUP + local nodepool=$1 + + if [ "$DRY_RUN" == "false" ]; then + echo "deleting nodepool '$nodepool' in aks cluster '$aksName' in resource group '$rg'" && \ + az aks nodepool delete --cluster-name $aksName --name $nodepool --resource-group $rg + else + echo "[dry run] would have deleted nodepool '$nodepool' in aks cluster '$aksName' in resource group '$rg'" + fi +} + +cmd__nodepool__up () { + local nodepool=$1 + local peerd_image_tag=$PEERD_IMAGE_TAG + + echo "get AKS credentials" + get_aks_credentials $AKS_NAME $RESOURCE_GROUP + + echo "sanitizing" + helm uninstall peerd --ignore-not-found=true + + echo "creating new nodepool '$nodepool'" + nodepool_deploy $AKS_NAME $RESOURCE_GROUP $nodepool + + echo "deploying peerd helm chart using tag '$peerd_image_tag'" + peerd_helm_deploy $nodepool $peerd_image_tag + + echo "waiting for pods to connect" + wait_for_peerd_pods $AKS_NAME $RESOURCE_GROUP $nodepool "P2PConnected" +} + +cmd__test__ctr() { + aksName=$AKS_NAME + rg=$RESOURCE_GROUP + local nodepool=$1 + + echo "running test 'ctr'" + + if [ "$DRY_RUN" == "true" ]; then + echo "[dry run] would have run test 'ctr'" + else + # Get nodes + nodes=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') + echo "nodes: $nodes" + total=`echo "$nodes" | tr -s " " "\012" | wc -l` + + # Pull the image on all nodes and verify that at least one P2PActive event is generated. + kubectl apply -f $TESTS_AZURE_CLI_DEPLOY_TEMPLATE + + wait_for_peerd_pods $context $AKS_NAME $RESOURCE_GROUP $nodepool "P2PActive" 1 + + echo "fetching metrics from pods" + print_peerd_metrics + fi + + print_and_exit_if_dry_run +} + + +# Initialize script. +if [[ -z "$DRY_RUN" ]]; then + DRY_RUN="true" +fi + +validate_params +validate_prerequisites + +echo $@ + +# Check sub command then check fall through to +# main command if sub command doesn't exist +# functions that are entry points should be of the form +# cmd__{command}__{subcommand} or cmd__{command} +if declare -f "cmd__${1}__${2}" >/dev/null; then + func="cmd__${1}__${2}" + + # pop $1 $2 off the argument list + shift; shift; + + get_opts $@ + + "$func" "$2" # invoke our named function w/ all remaining arguments +elif declare -f "cmd__$1" >/dev/null; then + func="cmd__$1" + shift; # pop $1 off the argument list + get_opts $@ + "$func" "$1" # invoke our named function w/ all remaining arguments +else + echo "Neither command $1 nor subcommand ${1} ${2} recognized" >&2 + show_help + exit 1 +fi diff --git a/scripts/env.az.sh b/scripts/env.az.sh new file mode 100755 index 0000000..f814e88 --- /dev/null +++ b/scripts/env.az.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +SUBSCRIPTION="dfb63c8c-7c89-4ef8-af13-75c1d873c895" + +ensure_azure_token() { + az account set --subscription $SUBSCRIPTION +} + +get_az_user() { + ensure_azure_token + azUser=$(az account show --query user.name -o tsv | awk -F '@' '{ print $1 }') + echo -n $azUser +} + diff --git a/scripts/env.sh b/scripts/env.sh new file mode 100755 index 0000000..741713f --- /dev/null +++ b/scripts/env.sh @@ -0,0 +1,104 @@ +#!/bin/bash +set -e + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source $SCRIPT_DIR/env.az.sh + +# Azure resources +RESOURCE_GROUP="p2p-ci-rg" +LOCATION="westus2" +AKS_NAME="acrp2pciaks" +ACR_NAME="acrp2pci" + +indent() { + sed 's/^/ /' +} + +ensure_context_dir() { + if [ "$DRY_RUN" == "false" ]; then + ls "$CONTEXT_DIR" >/dev/null 2>&1 || mkdir $CONTEXT_DIR + fi +} + +print_and_exit_if_dry_run() { + if [ "$DRY_RUN" == "true" ]; then + echo + echo + echo "DRY RUN SUCCESSFUL: to confirm execution, re-run script with '-y'" + exit 0 + fi +} + +validate_prerequisites() { + if ! get_prerequisites_versions ; then + exit -1 + fi +} + +get_opts() { + while getopts 'yh' OPTION; do + case "$OPTION" in + y) + DRY_RUN="false" + ;; + h) + show_help + exit 1 # exit non-zero to break invocation of command + ;; + esac + done + shift $((OPTIND-1)) +} + +validate_params() { + local ec=2 + + if [[ "$DRY_RUN" != "true" ]] && [[ "$DRY_RUN" != "false" ]]; then + show_help + echo "ERROR: dry run parameter invalid, expect true or false" + exit $ec + fi + + if [[ -z "$SUBSCRIPTION" ]]; then + show_help + echo "ERROR: subscription parameter is required" + exit $ec + fi + + if [[ -z "$RESOURCE_GROUP" ]]; then + show_help + echo "ERROR: resource group parameter is required" + exit $ec + fi + + if [[ -z "$LOCATION" ]]; then + show_help + echo "ERROR: location parameter is required" + exit $ec + fi +} + +# Prepare local environment: try to install tools +get_prerequisites_versions() { + local ec=1 + az --version >/dev/null 2>&1 || { + echo "az cli not found: see https://learn.microsoft.com/en-us/cli/azure/install-azure-cli" + return $ec + } + jq --version >/dev/null 2>&1 || { + echo "jq not found: to install, try 'apt install jq'" + return $ec + } + kubectl version --client=true >/dev/null 2>&1 || { + echo "kubectl not found: see https://kubernetes.io/docs/tasks/tools/" + return $ec + } + envsubst --version >/dev/null 2>&1 || { + echo "envsubst not found: to install, try 'apt-get install gettext-base'" + return $ec + } + which uuid >/dev/null 2>&1 || { + echo "uuid not found: to install, try 'apt-get install uuid'" + return $ec + } +} diff --git a/tests/Makefile b/tests/Makefile index 0a63dfe..5cadbfb 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -17,4 +17,18 @@ tests-random-image: ## Builds the 'random' tests image ifndef CONTAINER_REGISTRY $(eval CONTAINER_REGISTRY := localhost) endif - $(call build-image-internal,$(ROOT_DIR)/tests/dockerfiles/random.Dockerfile,random,$(ROOT_DIR)) \ No newline at end of file + $(call build-image-internal,$(ROOT_DIR)/tests/dockerfiles/random.Dockerfile,random,$(ROOT_DIR)) + +.PHONY: tests-ci-aks +tests-ci-aks: ## Run CI in AKS + @echo "+ $@" +ifndef NODEPOOL + $(eval NODEPOOL := $(shell date +"p2p%y%m%d")) +endif +ifndef PEERD_IMAGE_TAG + $(eval PEERD_IMAGE_TAG := "dev") +endif + @echo "\033[92mRunning CI NODEPOOL: $(NODEPOOL)\033[0m" + @( PEERD_IMAGE_TAG=$(PEERD_IMAGE_TAG) $(ROOT_DIR)/scripts/azure.sh nodepool up -y $(NODEPOOL) ) + @( $(ROOT_DIR)/scripts/azure.sh test ctr -y $(NODEPOOL) ) + @( $(ROOT_DIR)/scripts/azure.sh nodepool delete -y $(NODEPOOL) )