- https://hackernoon.com/how-to-use-prometheus-adapter-to-autoscale-custom-metrics-deployments-p1p3tl0
- https://www.tecmint.com/install-kubernetes-cluster-on-centos-7/
- https://www.tecmint.com/deploy-nginx-on-a-kubernetes-cluster/
- https://www.tecmint.com/install-a-kubernetes-cluster-on-centos-8/
- https://phoenixnap.com/kb/how-to-install-docker-centos-7
- https://www.padok.fr/en/blog/add-taint-nodes-tolerations
- [root@kubemaster ~]# cat /etc/hosts
- 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
- ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
- 10.0.100.30 kubemaster
- 10.0.100.31 kubenode1
- 10.0.100.32 kubenode2
- [root@kubemaster ~]# ping kubenode1
- PING kubenode1 (10.0.100.31) 56(84) bytes of data.
- 64 bytes from kubenode1 (10.0.100.31): icmp_seq=1 ttl=64 time=1.01 ms
- ^C
- --- kubenode1 ping statistics ---
- 1 packets transmitted, 1 received, 0% packet loss, time 0ms
- rtt min/avg/max/mdev = 1.015/1.015/1.015/0.000 ms
- [root@kubemaster ~]# ping kubenode2
- PING kubenode2 (10.0.100.32) 56(84) bytes of data.
- 64 bytes from kubenode2 (10.0.100.32): icmp_seq=1 ttl=64 time=0.897 ms
- ^C
- --- kubenode2 ping statistics ---
- 1 packets transmitted, 1 received, 0% packet loss, time 0ms
- rtt min/avg/max/mdev = 0.897/0.897/0.897/0.000 ms
- [root@kubemaster ~]#
- [root@kubemaster ~]# setenforce 0
- [root@kubemaster ~]#
- [root@kubemaster ~]# sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
- [root@kubemaster ~]#
- [root@kubemaster ~]# reboot
- login as: centos
- [root@kubemaster ~]# id -a centos
- uid=1000(centos) gid=1000(centos) groups=1000(centos),10(wheel)
- [root@kubemaster ~]#
- [root@kubemaster ~]# firewall-cmd --permanent --add-port=6443/tcp
- success
- [root@kubemaster ~]# firewall-cmd --permanent --add-port=2379-2380/tcp
- success
- [root@kubemaster ~]# firewall-cmd --permanent --add-port=10250/tcp
- success
- [root@kubemaster ~]# firewall-cmd --permanent --add-port=10251/tcp
- success
- [root@kubemaster ~]# firewall-cmd --permanent --add-port=10252/tcp
- success
- [root@kubemaster ~]# firewall-cmd --permanent --add-port=10255/tcp
- success
- [root@kubemaster ~]# firewall-cmd --reload
- success
- [root@kubemaster ~]#
- [root@kubemaster ~]# modprobe br_netfilter
- [root@kubemaster ~]#
- [root@kubemaster ~]# echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
- [root@kubemaster ~]#
-
[root@kubemaster ~]# cat <
/etc/yum.repos.d/kubernetes.repo - > [kubernetes]
- > name=Kubernetes
- > baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
- > enabled=1
- > gpgcheck=1
- > repo_gpgcheck=1
- > gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- > EOF
- [root@kubemaster ~]#
- [root@kubemaster ~]# yum install kubeadm docker -y ---------after successfull installation.
- Complete!
- [root@kubemaster ~]#
- [root@kubemaster ~]# systemctl enable kubelet
- Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
- [root@kubemaster ~]#
- [root@kubemaster ~]#systemctl start kubelet
- [root@kubemaster ~]#
- [root@kubemaster ~]# systemctl enable docker
- Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
- [root@kubemaster ~]#
- [root@kubemaster ~]# systemctl start docker
- [root@kubemaster ~]#
- [root@kubemaster ~]# swapoff -a
- [root@kubemaster ~]# vi /etc/fstab -----------------Comment swap in fstab
- [root@kubemaster ~]#
- [root@kubemaster ~]# kubeadm init
- [init] Using Kubernetes version: v1.22.4
- [preflight] Running pre-flight checks
- [WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
- [preflight] Pulling images required for setting up a Kubernetes cluster
- [preflight] This might take a minute or two, depending on the speed of your internet connection
- [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
- [certs] Using certificateDir folder "/etc/kubernetes/pki"
- [certs] Generating "ca" certificate and key
- [certs] Generating "apiserver" certificate and key
- [certs] apiserver serving cert is signed for DNS names [kubemaster kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.100. 30]
- [certs] Generating "apiserver-kubelet-client" certificate and key
- [certs] Generating "front-proxy-ca" certificate and key
- [certs] Generating "front-proxy-client" certificate and key
- [certs] Generating "etcd/ca" certificate and key
- [certs] Generating "etcd/server" certificate and key
- [certs] etcd/server serving cert is signed for DNS names [kubemaster localhost] and IPs [10.0.100.30 127.0.0.1 ::1]
- [certs] Generating "etcd/peer" certificate and key
- [certs] etcd/peer serving cert is signed for DNS names [kubemaster localhost] and IPs [10.0.100.30 127.0.0.1 ::1]
- [certs] Generating "etcd/healthcheck-client" certificate and key
- [certs] Generating "apiserver-etcd-client" certificate and key
- [certs] Generating "sa" key and public key
- [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
- [kubeconfig] Writing "admin.conf" kubeconfig file
- [kubeconfig] Writing "kubelet.conf" kubeconfig file
- [kubeconfig] Writing "controller-manager.conf" kubeconfig file
- [kubeconfig] Writing "scheduler.conf" kubeconfig file
- [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
- [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
- [kubelet-start] Starting the kubelet
- [control-plane] Using manifest folder "/etc/kubernetes/manifests"
- [control-plane] Creating static Pod manifest for "kube-apiserver"
- [control-plane] Creating static Pod manifest for "kube-controller-manager"
- [control-plane] Creating static Pod manifest for "kube-scheduler"
- [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
- [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
- [apiclient] All control plane components are healthy after 12.065357 seconds
- [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
- [kubelet] Creating a ConfigMap "kubelet-config-1.22" in namespace kube-system with the configuration for the kubelets in the cluster
- [upload-certs] Skipping phase. Please see --upload-certs
- [mark-control-plane] Marking the node kubemaster as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.
- [mark-control-plane] Marking the node kubemaster as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
- [bootstrap-token] Using token: hhgqlw.talhbe9am34rl4sm
- [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
- [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
- [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
- [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
- [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
- [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
- [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
- [addons] Applied essential addon: CoreDNS
- [addons] Applied essential addon: kube-proxy
- Your Kubernetes control-plane has initialized successfully!
- To start using your cluster, you need to run the following as a regular user:
- mkdir -p $HOME/.kube
- sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
- sudo chown $(id -u):$(id -g) $HOME/.kube/config
- Alternatively, if you are the root user, you can run:
- export KUBECONFIG=/etc/kubernetes/admin.conf
- You should now deploy a pod network to the cluster.
- Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
- https://kubernetes.io/docs/concepts/cluster-administration/addons/
- Then you can join any number of worker nodes by running the following on each as root:
- kubeadm join 10.0.100.30:6443 --token hhgqlw.talhbe9am34rl4sm \
- --discovery-token-ca-cert-hash sha256:5de3908e2cd0b4241870b8176c42f66b45569c43f89514b2033815dcaaf0b2a2
- [root@kubemaster ~]#
- [root@kubemaster ~]# kubectl version
- Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.4", GitCommit:"b695d79d4f967c403a96986f1750a35eb75e75f1", GitTreeState:"clean", BuildDate:"2021-11-17T15:48:33Z", GoVersion:"go1.16.10", Compiler:"gc", Platform:"linux/amd64"}
- The connection to the server localhost:8080 was refused - did you specify the right host or port?
- [root@kubemaster ~]#
- [root@kubemaster ~]# mkdir -p $HOME/.kube
- [root@kubemaster ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
- [root@kubemaster ~]# chown $(id -u):$(id -g) $HOME/.kube/config
- [root@kubemaster ~]#
- [root@kubemaster ~]# exit
- logout
- [centos@kubemaster ~]$ mkdir -p $HOME/.kube
- [centos@kubemaster ~]$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
- [centos@kubemaster ~]$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
- [centos@kubemaster ~]$
- [centos@kubemaster ~]$ sudo su -
- Last login: Sat Nov 20 03:04:15 EST 2021 on pts/0
- [root@kubemaster ~]#
- [root@kubemaster ~]# kubectl get nodes
- NAME STATUS ROLES AGE VERSION -----------The reason for NotReady is POD network not yet initiated.
- kubemaster NotReady control-plane,master 2m33s v1.22.4
- [root@kubemaster ~]#
- [root@kubemaster ~]# export kubever=$(kubectl version | base64 | tr -d '\n') --------- This command will initiate POD network
- [root@kubemaster ~]#
- [root@kubemaster ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
- serviceaccount/weave-net created
- clusterrole.rbac.authorization.k8s.io/weave-net created
- clusterrolebinding.rbac.authorization.k8s.io/weave-net created
- role.rbac.authorization.k8s.io/weave-net created
- rolebinding.rbac.authorization.k8s.io/weave-net created
- daemonset.apps/weave-net created
- [root@kubemaster ~]#
- [root@kubemaster ~]# kubectl get nodes
- NAME STATUS ROLES AGE VERSION
- kubemaster Ready control-plane,master 3m56s v1.22.4
- [root@kubemaster ~]#
- Once the worker nodes are ready and join to cluster can get below o/p
- [root@kubemaster ~]# kubectl get nodes
- NAME STATUS ROLES AGE VERSION
- kubemaster Ready control-plane,master 14m v1.22.4
- kubemaster Ready < none > 44s v1.22.4
- [root@kubemaster ~]#
- [root@kubemaster ~]# kubectl get nodes
- NAME STATUS ROLES AGE VERSION
- kubemaster Ready control-plane,master 40m v1.22.4
- kubemaster Ready < none > 26m v1.22.4
- kubemaster Ready < none > 20m v1.22.4
- [root@kubemaster ~]#
- =======================Below Steps on Node1============================
- [root@kubenode1 ~]# cat /etc/hosts
- 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
- ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
- 10.0.100.30 kubemaster
- 10.0.100.31 kubenode1
- 10.0.100.32 kubenode2
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# setenforce 0
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# firewall-cmd --permanent --add-port=6783/tcp
- success
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# firewall-cmd --permanent --add-port=10250/tcp
- success
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# firewall-cmd --permanent --add-port=10255/tcp
- success
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# firewall-cmd --permanent --add-port=30000-32767/tcp
- success
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# firewall-cmd --reload
- success
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# modprobe br_netfilter
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
- [root@kubenode1 ~]#
-
[root@kubenode1 ~]# cat <
/etc/yum.repos.d/kubernetes.repo - > [kubernetes]
- > name=Kubernetes
- > baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
- > enabled=1
- > gpgcheck=1
- > repo_gpgcheck=1
- > gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- > EOF
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# cat /etc/yum.repos.d/kubernetes.repo
- [kubernetes]
- name=Kubernetes
- baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
- enabled=1
- gpgcheck=1
- repo_gpgcheck=1
- gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# yum install kubeadm docker -y
- Complete!
- [root@kubenode1 ~]#
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# systemctl enable docker
- Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# systemctl start docker
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# systemctl enable kubelet
- Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# systemctl start kubelet
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# swapoff -a
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# vi /etc/fstab
- [root@kubenode1 ~]#
- [root@kubenode1 ~]# kubeadm join 10.0.100.30:6443 --token hhgqlw.talhbe9am34rl4sm --discovery-token-ca-cert-hash sha256:5de3908e2cd0b4241870b8176c42f66b45569c43f89514b2033815dcaaf0b2a2
- [preflight] Running pre-flight checks
- [preflight] Reading configuration from the cluster...
- [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
- [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
- [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
- [kubelet-start] Starting the kubelet
- [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
- This node has joined the cluster:
- * Certificate signing request was sent to apiserver and a response was received.
- * The Kubelet was informed of the new secure connection details.
- Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
- [root@kubenode1 ~]#
- =========================2nd Node========================
- [root@kubenode2 ~]# cat /etc/hosts
- 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
- ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
- 10.0.100.30 kubemaster
- 10.0.100.31 kubenode1
- 10.0.100.32 kubenode2
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# setenforce 0
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# firewall-cmd --permanent --add-port=6783/tcp
- success
- [root@kubenode2 ~]# firewall-cmd --permanent --add-port=10250/tcp
- success
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# firewall-cmd --permanent --add-port=10255/tcp
- success
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# firewall-cmd --permanent --add-port=30000-32767/tcp
- success
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# firewall-cmd --reload
- success
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# modprobe br_netfilter
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
- [root@kubenode2 ~]#
-
[root@kubenode2 ~]# cat <
/etc/yum.repos.d/kubernetes.repo - > [kubernetes]
- > name=Kubernetes
- > baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
- > enabled=1
- > gpgcheck=1
- > repo_gpgcheck=1
- > gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- > EOF
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# yum install kubeadm docker -y
- Loaded plugins: fastestmirror, langpacks
- Complete!
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# systemctl enable docker
- Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# systemctl start docker
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# systemctl enable kubelet
- Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# systemctl start kubelet
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# swapoff -a
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# vi /etc/fstab
- [root@kubenode2 ~]#
- [root@kubenode2 ~]# kubeadm join 10.0.100.30:6443 --token hhgqlw.talhbe9am34rl4sm --discovery-token-ca-cert-hash sha256:5de3908e2cd0b4241870b8176c42f66b45569c43f89514b2033815dcaaf0b2a2
- [preflight] Running pre-flight checks
- [preflight] Reading configuration from the cluster...
- [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
- [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
- [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
- [kubelet-start] Starting the kubelet
- [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
- This node has joined the cluster:
- * Certificate signing request was sent to apiserver and a response was received.
- * The Kubelet was informed of the new secure connection details.
- Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
- [root@kubenode2 ~]#