diff --git a/kubernetes/Bug_Control.md b/kubernetes/Bug_Control.md new file mode 100644 index 0000000..5a1cb75 --- /dev/null +++ b/kubernetes/Bug_Control.md @@ -0,0 +1,50 @@ +# Error Control Vagrant Kubernetes +While using `vagrant / vagrantfile` you can encounter of some errors, try to add few errors which faced during development of `kubernetes vagrant` file with solutions which worked whenever faced issues. + +### SSH connection get timeout or say ssh connection got reset, something like below error + +```bash +SSH connection was reset! This usually happens when the machine is +taking too long to reboot. First, try reloading your machine with +`vagrant reload`, since a simple restart sometimes fixes things. +If that doesn't work, destroy your machine and recreate it with +a `vagrant destroy` followed by a `vagrant up`. If that doesn't work, +contact support. +``` + +> Then can try to add below config to your vagrantfile +```bash + config.vm.boot_timeout = 600 +``` + +> if still not solved then best can try +```bash +$ rm -rf .vagrant +``` + +### While creating VM, vagrant failed to rename vm because of unclear vms +```bash +The name of your virtual machine couldn't be set because VirtualBox +is reporting another VM with that name already exists. Most of the +time, this is because of an error with VirtualBox not cleaning up +properly. To fix this, verify that no VMs with that name do exist +(by opening the VirtualBox GUI). If they don't, then look at the +folder in the error message from VirtualBox below and remove it +if there isn't any information you need in there. + +VirtualBox error: + +VBoxManage: error: Could not rename the directory '/Users/XXXXXXX/VirtualBox VMs/ubuntu-18.04-amd64_1619926105557_22107' to '/Users/XXXXXXX/VirtualBox VMs/load-balancer' to save the settings file (VERR_ALREADY_EXISTS) +VBoxManage: error: Details: code NS_ERROR_FAILURE (0x80004005), component SessionMachine, interface IMachine, callee nsISupports +VBoxManage: error: Context: "SaveSettings()" at line 3249 of file VBoxManageModifyVM.cpp +``` + +> Simple can run command with path from error +```bash +$ rm -rf /Users/XXXXXXX/VirtualBox VMs/load-balancer +``` + +> Better to comment below line from your vagrantfile +```bash + config.ssh.keep_alive = true +``` \ No newline at end of file diff --git a/kubernetes/ha/Vagrantfile b/kubernetes/ha/Vagrantfile new file mode 100644 index 0000000..053a640 --- /dev/null +++ b/kubernetes/ha/Vagrantfile @@ -0,0 +1,48 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require 'yaml' +require 'open3' + +k8s = YAML.load_file(File.join(File.dirname(__FILE__), 'config.yaml')) +ENV["LC_ALL"] = "en_US.UTF-8" + +msg = <> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['master'] + "-#{m}") + system("vagrant ssh --no-tty -c 'echo \"#{mpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['ha']) + + 1.step(k8s['resources']['master']['count']) do |n| + next if m == n + system("vagrant ssh --no-tty -c 'echo \"#{mpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['master'] + "-#{n}") + end + + 1.step(k8s['resources']['node']['count']) do |e| + system("vagrant ssh --no-tty -c 'echo \"#{mpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['node'] + "-#{e}") + end + + # Push all required configs/certificates to master node + system("vagrant ssh --no-tty -c 'scp -o StrictHostKeyChecking=no /opt/certificates/{encryption-config.yaml,kube-controller-manager.kubeconfig,kube-scheduler.kubeconfig,admin.kubeconfig,ca.pem,ca-key.pem,kubernetes-key.pem,kubernetes.pem,service-account-key.pem,service-account.pem} " + k8s['cluster']['master'] + "-#{m}" + ":~/certificates/' " + k8s['cluster']['ha']) + # Start etcd on all controller + system("vagrant ssh --no-tty -c 'sudo cp /home/vagrant/certificates/{ca.pem,kubernetes-key.pem,kubernetes.pem} /etc/etcd/; sudo cp /home/vagrant/certificates/{ca.pem,ca-key.pem,kubernetes-key.pem,kubernetes.pem,service-account-key.pem,service-account.pem,encryption-config.yaml} /var/lib/kubernetes/; sudo cp /home/vagrant/certificates/{kube-controller-manager.kubeconfig,kube-scheduler.kubeconfig} /var/lib/kubernetes/; sudo systemctl enable --now etcd; sudo systemctl enable --now kube-apiserver; sudo systemctl enable --now kube-controller-manager; sudo systemctl enable --now kube-scheduler; sudo systemctl enable --now nginx; mkdir -p /home/" + k8s['user'] + "/.kube; cp -i /home/" + k8s['user'] + "/certificates/admin.kubeconfig /home/" + k8s['user'] + "/.kube/config' " + k8s['cluster']['master'] + "-#{m}") + end + + 1.step(k8s['resources']['node']['count']) do |m| + wpub, stdeerr, status = Open3.capture3("vagrant ssh --no-tty -c 'cat /home/" + k8s['user'] + "/.ssh/id_rsa.pub' " + k8s['cluster']['node'] + "-#{m}") + system("vagrant ssh --no-tty -c 'echo \"#{lbpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['node'] + "-#{m}") + system("vagrant ssh --no-tty -c 'echo \"#{wpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['ha']) + + 1.step(k8s['resources']['node']['count']) do |n| + next if m == n + system("vagrant ssh --no-tty -c 'echo \"#{wpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['node'] + "-#{n}") + end + + 1.step(k8s['resources']['master']['count']) do |e| + system("vagrant ssh --no-tty -c 'echo \"#{wpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['master'] + "-#{e}") + end + + # Push all required configs/certificates to worker node + system("vagrant ssh --no-tty -c 'scp -o StrictHostKeyChecking=no /opt/certificates/{" + k8s['cluster']['node'] + "-#{m}.kubeconfig" + ",kube-proxy.kubeconfig,ca.pem,admin.kubeconfig," + k8s['cluster']['node'] + "-#{m}.pem," + k8s['cluster']['node'] + "-#{m}-key.pem} " + k8s['cluster']['node'] + "-#{m}" + ":~/certificates/' " + k8s['cluster']['ha']) + # Bootstrapping the Kubernetes Worker Nodes + system("vagrant ssh --no-tty -c 'sudo cp /home/vagrant/certificates/{" + k8s['cluster']['node'] + "-#{m}-key.pem," + k8s['cluster']['node'] + "-#{m}.pem} /var/lib/kubelet/; sudo cp /home/vagrant/certificates/" + k8s['cluster']['node'] + "-#{m}.kubeconfig /var/lib/kubelet/kubeconfig; sudo cp /home/vagrant/certificates/ca.pem /var/lib/kubernetes/; sudo cp /home/vagrant/certificates/kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig; sudo systemctl enable --now kubelet; sudo systemctl enable --now kube-proxy; sudo systemctl enable --now containerd; mkdir -p /home/" + k8s['user'] + "/.kube; cp -i /home/" + k8s['user'] + "/certificates/admin.kubeconfig /home/" + k8s['user'] + "/.kube/config' " + k8s['cluster']['node'] + "-#{m}") + end + + system("vagrant ssh --no-tty -c 'kubectl apply --kubeconfig /home/vagrant/certificates/admin.kubeconfig -f /home/vagrant/certificates/cluster_role.yaml; kubectl apply --kubeconfig /home/vagrant/certificates/admin.kubeconfig -f /home/vagrant/certificates/cluster_role_binding.yaml' " + k8s['cluster']['master'] + "-1") + + # Configuring kubectl for Remote Access + system("mkdir -p ${HOME}/.kube") + system("vagrant ssh --no-tty -c 'cat /opt/certificates/ca.pem' " + k8s['cluster']['ha'] + " > ${HOME}/.kube/ca.pem") + system("vagrant ssh --no-tty -c 'cat /opt/certificates/admin.pem' " + k8s['cluster']['ha'] + " > ${HOME}/.kube/admin.pem") + system("vagrant ssh --no-tty -c 'cat /opt/certificates/admin-key.pem' " + k8s['cluster']['ha'] + " > ${HOME}/.kube/admin-key.pem") + system("kubectl config set-cluster kubernetes-the-hard-way --certificate-authority=${HOME}/.kube/ca.pem --embed-certs=true --server=https://#{k8s['ip_part']}.#{k8s['resources']['ha']['ip_prefix']}:6443 && kubectl config set-credentials admin --client-certificate=${HOME}/.kube/admin.pem --client-key=${HOME}/.kube/admin-key.pem && kubectl config set-context kubernetes-the-hard-way --cluster=kubernetes-the-hard-way --user=admin && kubectl config use-context kubernetes-the-hard-way") + + # Deploying the DNS Cluster Add-on + system("kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns-1.8.yaml") + end +end diff --git a/kubernetes/ha/script/bootstrap.sh b/kubernetes/ha/script/bootstrap.sh new file mode 100644 index 0000000..605cb70 --- /dev/null +++ b/kubernetes/ha/script/bootstrap.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +cat < /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward = 1 +EOF +sysctl --system + +# Disable all memory swaps to increase performance. +sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab +swapoff -a + +# disable man-db installation +{ +apt-get remove man-db --purge -y +sudo rm -rf /usr/share/locale/ +sudo rm -rf /usr/share/man/ +sudo rm -rf /usr/share/doc/ + +cat > /etc/dpkg/dpkg.cfg.d/01_nodoc <> /etc/ssh/sshd_config diff --git a/kubernetes/ha/script/bootstrap_ha.sh b/kubernetes/ha/script/bootstrap_ha.sh new file mode 100644 index 0000000..55ce2f6 --- /dev/null +++ b/kubernetes/ha/script/bootstrap_ha.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# disable man-db installation +{ +apt-get remove man-db --purge -y +sudo rm -rf /usr/share/locale/ +sudo rm -rf /usr/share/man/ +sudo rm -rf /usr/share/doc/ + +cat > /etc/dpkg/dpkg.cfg.d/01_nodoc <> /etc/ssh/sshd_config diff --git a/kubernetes/ha/script/bootstrap_master.sh b/kubernetes/ha/script/bootstrap_master.sh new file mode 100644 index 0000000..079cf42 --- /dev/null +++ b/kubernetes/ha/script/bootstrap_master.sh @@ -0,0 +1,249 @@ +#!/usr/bin/env bash + +apt-get update +apt-get install -y nginx + +cat > kubernetes.default.svc.cluster.local < encryption-config.yaml < ca-config.json < ca-csr.json < admin-csr.json < ${instance}-csr.json < kube-controller-manager-csr.json < kube-proxy-csr.json < kube-scheduler-csr.json < kubernetes-csr.json < service-account-csr.json <