CentOS Stream 10
Sponsored Link

Kubernetes : Remove Nodes2025/01/24

 

Remove Nodes from existing Kubernetes Cluster.

[1] Only when deleting a Control Plane node, it is necessary to delete Etcd and load balancing configuration beforehand like follows.
[root@ctrl ~]#
kubectl get nodes

NAME               STATUS   ROLES           AGE     VERSION
dlp-1.srv.world    Ready    control-plane   17m     v1.31.5
dlp.srv.world      Ready    control-plane   2d2h    v1.31.5
node01.srv.world   Ready    <none>          2d2h    v1.31.5
node02.srv.world   Ready    <none>          2d2h    v1.31.5
node03.srv.world   Ready    <none>          4m56s   v1.31.5

[root@ctrl ~]#
kubectl get pods -n kube-system | grep etcd

kube-system   etcd-dlp-1.srv.world                       1/1     Running   0          19m
kube-system   etcd-dlp.srv.world                         1/1     Running   4          2d2h

# as an example, delete [dlp-1.srv.world] node
# access to the Etcd on a Control Plane which is not the delete target and
# remove configuration for delete target

[root@ctrl ~]#
kubectl -n kube-system exec -it etcd-dlp.srv.world -- sh

sh-5.2#
sh-5.2# etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key member list 
b328c32cc7e9915d, started, dlp-1.srv.world, https://10.0.0.31:2380, https://10.0.0.31:2379, false
dd4b95995dc266b1, started, dlp.srv.world, https://10.0.0.30:2380, https://10.0.0.30:2379, false

# remove a member which you like to delete
sh-5.2# etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key member remove b328c32cc7e9915d 
Member b328c32cc7e9915d removed from cluster 63678238411c70a3

sh-5.2# exit 
exit

[root@ctrl ~]#
vi /etc/nginx/nginx.conf
# remove the target Control Plane
stream {
    upstream k8s-api {
        server 10.0.0.30:6443;
        ###server 10.0.0.31:6443;
    }
    server {
        listen 6443;
        proxy_pass k8s-api;
    }
}

[root@ctrl ~]#
systemctl reload nginx
[2] Remove a node from the cluster.
From this point, the procedure is the same for both the Control Plane and Worker.
[root@ctrl ~]#
kubectl get nodes

NAME               STATUS   ROLES           AGE     VERSION
dlp-1.srv.world    Ready    control-plane   17m     v1.31.5
dlp.srv.world      Ready    control-plane   2d2h    v1.31.5
node01.srv.world   Ready    <none>          2d2h    v1.31.5
node02.srv.world   Ready    <none>          2d2h    v1.31.5
node03.srv.world   Ready    <none>          4m56s   v1.31.5

# prepare to remove a target node
# --ignore-daemonsets ⇒ ignore pods in DaemonSet
# --delete-emptydir-data ⇒ ignore pods that has emptyDir volumes
# --force ⇒ also remove pods that was created as a pod, not as deployment or others

[root@ctrl ~]#
kubectl drain dlp-1.srv.world --ignore-daemonsets --delete-emptydir-data --force

node/dlp-1.srv.world cordoned
Warning: ignoring DaemonSet-managed Pods: kube-system/calico-node-8ckkz, kube-system/kube-proxy-thzrl
node/dlp-1.srv.world drained

# verify a few minutes later

[root@ctrl ~]#
kubectl get nodes dlp-1.srv.world

NAME              STATUS                     ROLES           AGE   VERSION
dlp-1.srv.world   Ready,SchedulingDisabled   control-plane   28m   v1.31.5

# run delete method

[root@ctrl ~]#
kubectl delete node dlp-1.srv.world

node "dlp-1.srv.world" deleted

[root@ctrl ~]#
kubectl get nodes

NAME               STATUS   ROLES           AGE    VERSION
dlp.srv.world      Ready    control-plane   2d2h   v1.31.5
node01.srv.world   Ready    <none>          2d2h   v1.31.5
node02.srv.world   Ready    <none>          2d2h   v1.31.5
node03.srv.world   Ready    <none>          17m    v1.31.5
[3] On the removed Node, Reset kubeadm settings.
[root@dlp-1 ~]#
kubeadm reset

[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0124 13:10:24.067304   13255 reset.go:123] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: Get "https://10.0.0.25:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
W0124 13:10:24.067662   13255 preflight.go:56] [reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y
[preflight] Running pre-flight checks
W0124 13:10:25.789781   13255 removeetcdmember.go:106] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] Deleted contents of the etcd data directory: /var/lib/etcd
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]

The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually by using the "iptables" command.

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.
Matched Content