forked from csantanapr/knative-kind
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path01-kind.sh
executable file
·50 lines (45 loc) · 1.85 KB
/
01-kind.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
#!/usr/bin/env bash
set -eo pipefail
kindVersion=$(kind version);
K8S_VERSION=${k8sVersion:-v1.21.1@sha256:fae9a58f17f18f06aeac9772ca8b5ac680ebbed985e266f711d936e91d113bad}
CLUSTER_NAME=${KIND_CLUSTER_NAME:-knative}
KIND_VERSION=${KIND_VERSION:-v0.11}
echo "KinD version is ${kindVersion}"
if [[ ! $kindVersion =~ "${KIND_VERSION}." ]]; then
echo "WARNING: Please make sure you are using KinD version ${KIND_VERSION}.x, download from https://github.com/kubernetes-sigs/kind/releases"
echo "For example if using brew, run: brew upgrade kind"
read -p "Do you want to continue on your own risk? Y/n: " REPLYKIND </dev/tty
if [ "$REPLYKIND" == "Y" ] || [ "$REPLYKIND" == "y" ] || [ -z "$REPLYKIND" ]; then
echo "You are very brave..."
sleep 2
elif [ "$REPLYKIND" == "N" ] || [ "$REPLYKIND" == "n" ]; then
echo "Installation stopped, please upgrade kind and run again"
exit 0
fi
fi
REPLY=continue
KIND_EXIST="$(kind get clusters -q | grep ${CLUSTER_NAME} || true)"
if [[ ${KIND_EXIST} ]] ; then
read -p "Knative Cluster kind-${CLUSTER_NAME} already installed, delete and re-create? N/y: " REPLY </dev/tty
fi
if [ "$REPLY" == "Y" ] || [ "$REPLY" == "y" ]; then
kind delete cluster --name ${CLUSTER_NAME}
elif [ "$REPLY" == "N" ] || [ "$REPLY" == "n" ] || [ -z "$REPLY" ]; then
echo "Installation skipped"
exit 0
fi
KIND_CLUSTER=$(mktemp)
cat <<EOF | kind create cluster --name ${CLUSTER_NAME} --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:${K8S_VERSION}
extraPortMappings:
- containerPort: 31080 # expose port 31380 of the node to port 80 on the host, later to be use by kourier or contour ingress
listenAddress: 127.0.0.1
hostPort: 80
EOF
echo "Waiting on cluster to be ready..."
sleep 10
kubectl wait pod --timeout=-1s --for=condition=Ready -l '!job-name' -n kube-system > /dev/null