Last active
April 10, 2023 06:25
-
-
Save jdluther2020/4033f3ae9ba913440c4e24a979a6ed37 to your computer and use it in GitHub Desktop.
Mastering Kubernetes One Task at a Time - Know Thy Nodes!—Objective 6 - Schedule Pods on all Nodes (Worker + Control Plane)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/bash | |
# Purpose: Mastering Kubernetes One Task at a Time - Know Thy Nodes!—Objective 6 - Schedule Pods on all Nodes (Worker + Control Plane) | |
# Blog Ref: https://medium.com/the-aws-way/the-aws-way-mastering-kubernetes-one-task-at-a-time-know-thy-nodes-e3ad157ac2db | |
# GitHub Ref: https://github.com/jdluther2020/jdluther-kubernetes-io-tasks/ | |
# Author's NOTE | |
# 1. # are comment lines | |
# 2. Command output wherever helpful is shown inside {} | |
# 3. Everything is executed on a local dev environment (MacOS) | |
# | |
# OBJECTIVE 6 - SCHEDULE PODS ON ALL NODES (WORKER + CONTROL PLANE) | |
# - Create a deployment whose pods run on all nodes of the cluster | |
# | |
# Fist, check for the taints to see if any node including control plane that normally prevents pod scheduling | |
# We'll use the taint key to set the toleration next | |
kubectl describe nodes | grep -i -A 5 taints | |
{ | |
Taints: node-role.kubernetes.io/control-plane:NoSchedule | |
Unschedulable: false | |
Lease: | |
HolderIdentity: basic-multi-node-cluster-control-plane | |
AcquireTime: <unset> | |
RenewTime: Thu, 06 Apr 2023 02:09:15 +0000 | |
-- | |
Taints: <none> | |
Unschedulable: false | |
Lease: | |
HolderIdentity: basic-multi-node-cluster-worker | |
AcquireTime: <unset> | |
RenewTime: Thu, 06 Apr 2023 02:09:14 +0000 | |
-- | |
Taints: <none> | |
Unschedulable: false | |
Lease: | |
HolderIdentity: basic-multi-node-cluster-worker2 | |
AcquireTime: <unset> | |
RenewTime: Thu, 06 Apr 2023 02:09:22 +0000 | |
} | |
# Create a deployment manifest with 3 replica pods to schedule one on each node (2 workers and 1 control plane) | |
cat <<EOF | tee deploy.yaml | |
apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
creationTimestamp: null | |
labels: | |
app: nginx-deployment | |
name: nginx-deployment | |
spec: | |
replicas: 3 | |
selector: | |
matchLabels: | |
app: nginx-deployment | |
strategy: {} | |
template: | |
metadata: | |
creationTimestamp: null | |
labels: | |
app: nginx-deployment | |
spec: | |
tolerations: | |
- key: node-role.kubernetes.io/control-plane | |
effect: NoSchedule | |
containers: | |
- image: nginx:latest | |
name: nginx | |
resources: {} | |
status: {} | |
EOF | |
# Create deployment | |
kubectl apply -f deploy.yaml | |
{ | |
deployment.apps/nginx-deployment created | |
} | |
# Confirm each cluster node has one of the deployment pods | |
kubectl get pod -o wide | |
{ | |
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES | |
nginx-deployment-98976f567-bkc66 1/1 Running 0 27s 10.244.2.3 basic-multi-node-cluster-worker2 <none> <none> | |
nginx-deployment-98976f567-cd6m9 1/1 Running 0 27s 10.244.1.3 basic-multi-node-cluster-worker <none> <none> | |
nginx-deployment-98976f567-j7brf 1/1 Running 0 27s 10.244.0.5 basic-multi-node-cluster-control-plane <none> <none> | |
} | |
# Scale up the number of pods of the deployment from existing 3 to 9, and see how the scheduler distributes them among the 3 available nodes | |
kubectl scale deployment/nginx-deployment --replicas=9; kubectl rollout status deploy nginx-deployment | |
{ | |
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES | |
nginx-deployment-98976f567-28dgg 1/1 Running 0 9s 10.244.1.5 basic-multi-node-cluster-worker <none> <none> | |
nginx-deployment-98976f567-7mkvc 1/1 Running 0 9s 10.244.2.6 basic-multi-node-cluster-worker2 <none> <none> | |
nginx-deployment-98976f567-cd6m9 1/1 Running 0 2m1s 10.244.1.3 basic-multi-node-cluster-worker <none> <none> | |
nginx-deployment-98976f567-dhcpp 1/1 Running 0 9s 10.244.2.7 basic-multi-node-cluster-worker2 <none> <none> | |
nginx-deployment-98976f567-j7brf 1/1 Running 0 2m1s 10.244.0.5 basic-multi-node-cluster-control-plane <none> <none> | |
nginx-deployment-98976f567-mw8jh 1/1 Running 0 9s 10.244.2.8 basic-multi-node-cluster-worker2 <none> <none> | |
nginx-deployment-98976f567-qp8fn 1/1 Running 0 9s 10.244.0.6 basic-multi-node-cluster-control-plane <none> <none> | |
nginx-deployment-98976f567-qxm7g 1/1 Running 0 9s 10.244.0.7 basic-multi-node-cluster-control-plane <none> <none> | |
nginx-deployment-98976f567-tndks 1/1 Running 0 74s 10.244.1.4 basic-multi-node-cluster-worker <none> <none> | |
} | |
# END OF DEMONSTRATION |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment