Skip to content

Commit

Permalink
Merge pull request #2 from marthanda93/ha-cluster
Browse files Browse the repository at this point in the history
Ha cluster
  • Loading branch information
marthanda93 authored May 2, 2021
2 parents 25f8f1e + ce0a0b2 commit 2573f6f
Show file tree
Hide file tree
Showing 13 changed files with 1,228 additions and 0 deletions.
50 changes: 50 additions & 0 deletions kubernetes/Bug_Control.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# Error Control Vagrant Kubernetes
While using `vagrant / vagrantfile` you can encounter of some errors, try to add few errors which faced during development of `kubernetes vagrant` file with solutions which worked whenever faced issues.

### SSH connection get timeout or say ssh connection got reset, something like below error

```bash
SSH connection was reset! This usually happens when the machine is
taking too long to reboot. First, try reloading your machine with
`vagrant reload`, since a simple restart sometimes fixes things.
If that doesn't work, destroy your machine and recreate it with
a `vagrant destroy` followed by a `vagrant up`. If that doesn't work,
contact support.
```

> Then can try to add below config to your vagrantfile
```bash
config.vm.boot_timeout = 600
```

> if still not solved then best can try
```bash
$ rm -rf .vagrant
```

### While creating VM, vagrant failed to rename vm because of unclear vms
```bash
The name of your virtual machine couldn't be set because VirtualBox
is reporting another VM with that name already exists. Most of the
time, this is because of an error with VirtualBox not cleaning up
properly. To fix this, verify that no VMs with that name do exist
(by opening the VirtualBox GUI). If they don't, then look at the
folder in the error message from VirtualBox below and remove it
if there isn't any information you need in there.
VirtualBox error:
VBoxManage: error: Could not rename the directory '/Users/XXXXXXX/VirtualBox VMs/ubuntu-18.04-amd64_1619926105557_22107' to '/Users/XXXXXXX/VirtualBox VMs/load-balancer' to save the settings file (VERR_ALREADY_EXISTS)
VBoxManage: error: Details: code NS_ERROR_FAILURE (0x80004005), component SessionMachine, interface IMachine, callee nsISupports
VBoxManage: error: Context: "SaveSettings()" at line 3249 of file VBoxManageModifyVM.cpp
```
> Simple can run command with path from error
```bash
$ rm -rf /Users/XXXXXXX/VirtualBox VMs/load-balancer
```
> Better to comment below line from your vagrantfile
```bash
config.ssh.keep_alive = true
```
48 changes: 48 additions & 0 deletions kubernetes/ha/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

require 'yaml'
require 'open3'

k8s = YAML.load_file(File.join(File.dirname(__FILE__), 'config.yaml'))
ENV["LC_ALL"] = "en_US.UTF-8"

msg = <<MSG
------------------------------------------------------
Kubernetes up and running ✌ ☺ ✌
URLS:
- Kubernetes control plane is running at https://192.160.0.10:6443
- CoreDNS is running at https://192.160.0.10:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
------------------------------------------------------
MSG

Vagrant.configure(k8s['api_version']) do |config|
config.vm.boot_timeout = 600
# config.ssh.keep_alive = true

# Load Balancer vm
if File.exist?('lib/ha.rb')
eval(IO.read('lib/ha.rb'), binding)
end

# Kubernetes Controller cluster
(1..k8s['resources']['master']['count']).each do |i|
if File.exist?('lib/master.rb')
eval(IO.read('lib/master.rb'), binding)
end
end

# Kubernetes Worker cluster
(1..k8s['resources']['node']['count']).each do |i|
if File.exist?('lib/node.rb')
eval(IO.read('lib/node.rb'), binding)
end
end

# Exchange ssh keys to access each other, expect HA can access each of vm but not other vm cannot to access HA directly.
if File.exist?('lib/trigger.rb')
eval(IO.read('lib/trigger.rb'), binding)
end
end
29 changes: 29 additions & 0 deletions kubernetes/ha/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
---
api_version: "2"
image: "bento/ubuntu-18.04"
ip_part: "10.240.0"
user: "vagrant"

cluster:
master: "controller"
node: "worker"
ha: "load-balancer"

resources:
master:
cpus: 1
memory: 1024
count: 3
ip_prefix: 10
node:
cpus: 2
memory: 2048
count: 2
ip_prefix: 20
ha:
cpus: 1
memory: 1024
ip_prefix: 10

net:
network_type: private_network
53 changes: 53 additions & 0 deletions kubernetes/ha/lib/ha.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
config.vm.define "#{k8s['cluster']['ha']}" do |subconfig|
subconfig.vm.post_up_message = $msg
subconfig.vm.box = k8s['image']
subconfig.vm.box_check_update = false

subconfig.vm.hostname = "#{k8s['cluster']['ha']}"
subconfig.vm.network :private_network, ip: "#{k8s['ip_part']}.10"

# Hostfile :: Master node
subconfig.vm.provision "Load Balancer hostfile update", type: "shell" do |lb|
lb.inline = <<-SHELL
echo -e "127.0.0.1\t$2" | tee -a /etc/hosts; echo -e "$1\t$2" | tee -a /etc/hosts
SHELL
lb.args = ["#{k8s['ip_part']}.10", "#{k8s['cluster']['ha']}"]
end
subconfig.vm.provision "Master and Worker node hostfile update", type: "shell" do |cluster|
cluster.inline = <<-SHELL
# master
for i in $(eval echo {1..#{k8s['resources']['master']['count']}}); do
echo -e "${1}.$((10 + $i))\t#{k8s['cluster']['master']}-${i}" | tee -a /etc/hosts
done
# worker
for i in $(eval echo {1..#{k8s['resources']['node']['count']}}); do
echo -e "${1}.$((20 + $i))\t#{k8s['cluster']['node']}-${i}" | tee -a /etc/hosts
done
SHELL
cluster.args = ["#{k8s['ip_part']}"]
end

subconfig.vm.provider "virtualbox" do |vb|
vb.memory = k8s['resources']['ha']['memory']
vb.cpus = k8s['resources']['ha']['cpus']
vb.name = "#{k8s['cluster']['ha']}"
vb.gui = false
end

subconfig.vm.provision "#{k8s['cluster']['ha']}-setup", type: "shell" do |lb|
lb.path = "script/bootstrap_ha.sh"
lb.args = ["#{k8s['user']}", "#{k8s['ip_part']}", "#{k8s['cluster']['master']}", "#{k8s['resources']['master']['count']}"]
end

subconfig.vm.provision "certificates provisioning", type: "shell" do |lb_cert|
lb_cert.path = "script/provisioning.sh"
lb_cert.args = ["#{k8s['ip_part']}", "#{k8s['resources']['master']['ip_prefix']}", "#{k8s['resources']['node']['ip_prefix']}", "#{k8s['resources']['ha']['ip_prefix']}", "#{k8s['cluster']['master']}", "#{k8s['cluster']['node']}", "#{k8s['resources']['master']['count']}", "#{k8s['resources']['node']['count']}"]
end

subconfig.vm.provision "Generating Kubernetes Configuration", type: "shell" do |lb_config|
lb_config.path = "script/kube_config.sh"
lb_config.args = ["#{k8s['ip_part']}", "#{k8s['resources']['master']['ip_prefix']}", "#{k8s['resources']['node']['ip_prefix']}", "#{k8s['resources']['ha']['ip_prefix']}", "#{k8s['cluster']['master']}", "#{k8s['cluster']['node']}", "#{k8s['resources']['master']['count']}", "#{k8s['resources']['node']['count']}"]
end

subconfig.vm.provision "Reboot to load all config", type:"shell", inline: "shutdown -r now"
end
49 changes: 49 additions & 0 deletions kubernetes/ha/lib/master.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
config.vm.define "#{k8s['cluster']['master']}-#{i}" do |subconfig|
# subconfig.vm.post_up_message = $msg
subconfig.vm.box = k8s['image']
subconfig.vm.box_check_update = false

subconfig.vm.hostname = "#{k8s['cluster']['master']}-#{i}"
subconfig.vm.network :private_network, ip: "#{k8s['ip_part']}.#{i + k8s['resources']['master']['ip_prefix']}"

# Hostfile :: Master node
subconfig.vm.provision "Load Balancer hostfile update", type: "shell" do |lb|
lb.inline = <<-SHELL
echo -e "127.0.0.1\t$1" | tee -a /etc/hosts; echo -e "$2\t$3" | tee -a /etc/hosts
SHELL
lb.args = ["#{k8s['cluster']['master']}-#{i}", "#{k8s['ip_part']}.#{k8s['resources']['ha']['ip_prefix']}", "#{k8s['cluster']['ha']}"]
end
subconfig.vm.provision "Master and Worker node hostfile update", type: "shell" do |cluster|
cluster.inline = <<-SHELL
# master
for i in $(eval echo {1..#{k8s['resources']['master']['count']}}); do
echo -e "${1}.$((#{k8s['resources']['master']['ip_prefix']} + $i))\t#{k8s['cluster']['master']}-${i}" | tee -a /etc/hosts
done
# worker
for i in $(eval echo {1..#{k8s['resources']['node']['count']}}); do
echo -e "${1}.$((#{k8s['resources']['node']['ip_prefix']} + $i))\t#{k8s['cluster']['node']}-${i}" | tee -a /etc/hosts
done
SHELL
cluster.args = ["#{k8s['ip_part']}"]
end

subconfig.vm.provider "virtualbox" do |vb|
vb.name = "#{k8s['cluster']['master']}-#{i}"
vb.memory = k8s['resources']['master']['memory']
vb.cpus = k8s['resources']['master']['cpus']
vb.gui = false
end

subconfig.vm.provision "vm-setup", type: "shell" do |vms|
vms.path = "script/bootstrap.sh"
vms.args = ["#{k8s['user']}"]
end

subconfig.vm.provision "#{k8s['cluster']['master']}-#{i}-setup", type: "shell" do |mns|
mns.path = "script/bootstrap_master.sh"
mns.args = ["#{k8s['ip_part']}", "#{k8s['resources']['master']['ip_prefix']}", "#{i}", "#{k8s['cluster']['master']}", "#{k8s['resources']['master']['count']}"]
end

subconfig.vm.provision "Reboot to load all config", type:"shell", inline: "shutdown -r now"
end
46 changes: 46 additions & 0 deletions kubernetes/ha/lib/node.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
config.vm.define "#{k8s['cluster']['node']}-#{i}" do |subconfig|
subconfig.vm.box = k8s['image']

subconfig.vm.hostname = "#{k8s['cluster']['node']}-#{i}"
subconfig.vm.network :private_network, ip: "#{k8s['ip_part']}.#{i + k8s['resources']['node']['ip_prefix']}"

# Hostfile :: Master node
subconfig.vm.provision "Load Balancer hostfile update", type: "shell" do |lb|
lb.inline = <<-SHELL
echo -e "127.0.0.1\t$1" | tee -a /etc/hosts; echo -e "$2\t$3" | tee -a /etc/hosts
SHELL
lb.args = ["#{k8s['cluster']['node']}", "#{k8s['ip_part']}.#{k8s['resources']['ha']['ip_prefix']}", "#{k8s['cluster']['ha']}"]
end
subconfig.vm.provision "Master and Worker node hostfile update", type: "shell" do |cluster|
cluster.inline = <<-SHELL
# master
for i in $(eval echo {1..#{k8s['resources']['master']['count']}}); do
echo -e "${1}.$((#{k8s['resources']['master']['ip_prefix']} + $i))\t#{k8s['cluster']['master']}-${i}" | tee -a /etc/hosts
done
# worker
for i in $(eval echo {1..#{k8s['resources']['node']['count']}}); do
echo -e "${1}.$((#{k8s['resources']['node']['ip_prefix']} + $i))\t#{k8s['cluster']['node']}-${i}" | tee -a /etc/hosts
done
SHELL
cluster.args = ["#{k8s['ip_part']}"]
end

subconfig.vm.provider "virtualbox" do |vb|
vb.memory = k8s['resources']['node']['memory']
vb.cpus = k8s['resources']['node']['cpus']
vb.name = "#{k8s['cluster']['node']}-#{i}"
vb.gui = false
end

subconfig.vm.provision "vm-setup", type: "shell" do |vms|
vms.path = "script/bootstrap.sh"
vms.args = ["#{k8s['user']}"]
end

subconfig.vm.provision "kube-setup", type: "shell" do |ks|
ks.path = "script/bootstrap_node.sh"
end

subconfig.vm.provision "Reboot to load all config", type:"shell", inline: "shutdown -r now"
end
60 changes: 60 additions & 0 deletions kubernetes/ha/lib/trigger.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
config.trigger.after :up do |trigger|
trigger.only_on = "#{k8s['cluster']['node']}-#{k8s['resources']['node']['count']}"
trigger.info = msg

trigger.ruby do |env,machine|
lbpub, stdeerr, status = Open3.capture3("vagrant ssh --no-tty -c 'cat /home/" + k8s['user'] + "/.ssh/id_rsa.pub' " + k8s['cluster']['ha'])

1.step(k8s['resources']['master']['count']) do |m|
mpub, stdeerr, status = Open3.capture3("vagrant ssh --no-tty -c 'cat /home/" + k8s['user'] + "/.ssh/id_rsa.pub' " + k8s['cluster']['master'] + "-#{m}")
system("vagrant ssh --no-tty -c 'echo \"#{lbpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['master'] + "-#{m}")
system("vagrant ssh --no-tty -c 'echo \"#{mpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['ha'])

1.step(k8s['resources']['master']['count']) do |n|
next if m == n
system("vagrant ssh --no-tty -c 'echo \"#{mpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['master'] + "-#{n}")
end

1.step(k8s['resources']['node']['count']) do |e|
system("vagrant ssh --no-tty -c 'echo \"#{mpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['node'] + "-#{e}")
end

# Push all required configs/certificates to master node
system("vagrant ssh --no-tty -c 'scp -o StrictHostKeyChecking=no /opt/certificates/{encryption-config.yaml,kube-controller-manager.kubeconfig,kube-scheduler.kubeconfig,admin.kubeconfig,ca.pem,ca-key.pem,kubernetes-key.pem,kubernetes.pem,service-account-key.pem,service-account.pem} " + k8s['cluster']['master'] + "-#{m}" + ":~/certificates/' " + k8s['cluster']['ha'])
# Start etcd on all controller
system("vagrant ssh --no-tty -c 'sudo cp /home/vagrant/certificates/{ca.pem,kubernetes-key.pem,kubernetes.pem} /etc/etcd/; sudo cp /home/vagrant/certificates/{ca.pem,ca-key.pem,kubernetes-key.pem,kubernetes.pem,service-account-key.pem,service-account.pem,encryption-config.yaml} /var/lib/kubernetes/; sudo cp /home/vagrant/certificates/{kube-controller-manager.kubeconfig,kube-scheduler.kubeconfig} /var/lib/kubernetes/; sudo systemctl enable --now etcd; sudo systemctl enable --now kube-apiserver; sudo systemctl enable --now kube-controller-manager; sudo systemctl enable --now kube-scheduler; sudo systemctl enable --now nginx; mkdir -p /home/" + k8s['user'] + "/.kube; cp -i /home/" + k8s['user'] + "/certificates/admin.kubeconfig /home/" + k8s['user'] + "/.kube/config' " + k8s['cluster']['master'] + "-#{m}")
end

1.step(k8s['resources']['node']['count']) do |m|
wpub, stdeerr, status = Open3.capture3("vagrant ssh --no-tty -c 'cat /home/" + k8s['user'] + "/.ssh/id_rsa.pub' " + k8s['cluster']['node'] + "-#{m}")
system("vagrant ssh --no-tty -c 'echo \"#{lbpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['node'] + "-#{m}")
system("vagrant ssh --no-tty -c 'echo \"#{wpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['ha'])

1.step(k8s['resources']['node']['count']) do |n|
next if m == n
system("vagrant ssh --no-tty -c 'echo \"#{wpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['node'] + "-#{n}")
end

1.step(k8s['resources']['master']['count']) do |e|
system("vagrant ssh --no-tty -c 'echo \"#{wpub}\" >> /home/" + k8s['user'] + "/.ssh/authorized_keys' " + k8s['cluster']['master'] + "-#{e}")
end

# Push all required configs/certificates to worker node
system("vagrant ssh --no-tty -c 'scp -o StrictHostKeyChecking=no /opt/certificates/{" + k8s['cluster']['node'] + "-#{m}.kubeconfig" + ",kube-proxy.kubeconfig,ca.pem,admin.kubeconfig," + k8s['cluster']['node'] + "-#{m}.pem," + k8s['cluster']['node'] + "-#{m}-key.pem} " + k8s['cluster']['node'] + "-#{m}" + ":~/certificates/' " + k8s['cluster']['ha'])
# Bootstrapping the Kubernetes Worker Nodes
system("vagrant ssh --no-tty -c 'sudo cp /home/vagrant/certificates/{" + k8s['cluster']['node'] + "-#{m}-key.pem," + k8s['cluster']['node'] + "-#{m}.pem} /var/lib/kubelet/; sudo cp /home/vagrant/certificates/" + k8s['cluster']['node'] + "-#{m}.kubeconfig /var/lib/kubelet/kubeconfig; sudo cp /home/vagrant/certificates/ca.pem /var/lib/kubernetes/; sudo cp /home/vagrant/certificates/kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig; sudo systemctl enable --now kubelet; sudo systemctl enable --now kube-proxy; sudo systemctl enable --now containerd; mkdir -p /home/" + k8s['user'] + "/.kube; cp -i /home/" + k8s['user'] + "/certificates/admin.kubeconfig /home/" + k8s['user'] + "/.kube/config' " + k8s['cluster']['node'] + "-#{m}")
end

system("vagrant ssh --no-tty -c 'kubectl apply --kubeconfig /home/vagrant/certificates/admin.kubeconfig -f /home/vagrant/certificates/cluster_role.yaml; kubectl apply --kubeconfig /home/vagrant/certificates/admin.kubeconfig -f /home/vagrant/certificates/cluster_role_binding.yaml' " + k8s['cluster']['master'] + "-1")

# Configuring kubectl for Remote Access
system("mkdir -p ${HOME}/.kube")
system("vagrant ssh --no-tty -c 'cat /opt/certificates/ca.pem' " + k8s['cluster']['ha'] + " > ${HOME}/.kube/ca.pem")
system("vagrant ssh --no-tty -c 'cat /opt/certificates/admin.pem' " + k8s['cluster']['ha'] + " > ${HOME}/.kube/admin.pem")
system("vagrant ssh --no-tty -c 'cat /opt/certificates/admin-key.pem' " + k8s['cluster']['ha'] + " > ${HOME}/.kube/admin-key.pem")
system("kubectl config set-cluster kubernetes-the-hard-way --certificate-authority=${HOME}/.kube/ca.pem --embed-certs=true --server=https://#{k8s['ip_part']}.#{k8s['resources']['ha']['ip_prefix']}:6443 && kubectl config set-credentials admin --client-certificate=${HOME}/.kube/admin.pem --client-key=${HOME}/.kube/admin-key.pem && kubectl config set-context kubernetes-the-hard-way --cluster=kubernetes-the-hard-way --user=admin && kubectl config use-context kubernetes-the-hard-way")

# Deploying the DNS Cluster Add-on
system("kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns-1.8.yaml")
end
end
Loading

0 comments on commit 2573f6f

Please sign in to comment.