Last active
January 14, 2020 17:13
-
-
Save hassenius/58049737b7007cfe1d099f75efd49389 to your computer and use it in GitHub Desktop.
Simple script to manage kubectl when working with many IBM Cloud Private (ICP) clusters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/local/bin/bash | |
######################################################################################### | |
## | |
## Simple script to simplify my workflow when managing kubernetes clusters | |
## Written and performed by Hans Kristian Moen | |
## | |
################ My defaults | |
# Where to stick certs and keys | |
kubecerts=~/.kube/certs | |
# Default namespace | |
defns=kube-system | |
# Default user for ssh to host | |
defsshuser=root | |
# Default string to append to access console | |
defaultconsole=":8443/console" | |
################# Some functions | |
delete_cluster() { | |
kubectl config delete-context ${kname} | |
kubectl config unset users.${kname} | |
kubectl config delete-cluster ${kname} | |
if [[ -f ${kubecerts}/${kname}.pem ]]; then | |
rm ${kubecerts}/${kname}.pem | |
fi | |
if [[ -f ${kubecerts}/${kname}.crt ]]; then | |
rm ${kubecerts}/${kname}.crt | |
fi | |
if [[ -f ${kubecerts}/${kname}.key ]]; then | |
rm ${kubecerts}/${kname}.key | |
fi | |
} | |
usage() { | |
echo | |
echo "Usage: ${0##*/} {add|delete|list|use|ssh|super|console} <arbitrary_cluster_name> [oidc] [[user@]host] [password] [namespace]" | |
echo "Example: ${0##*/} add csmo1 [email protected]" | |
echo "Example: ${0##*/} add csmo1 [email protected] default" | |
echo "Example: ${0##*/} add csmo1 oidc 172.16.40.150 MySecretPassword" | |
echo "Example: ${0##*/} add csmo1 oidc [email protected] MySecretPassword" | |
echo "Example: ${0##*/} add csmo1 oidc [email protected] MySecretPassword mynamespace" | |
echo "Example: ${0##*/} delete csmo1" | |
echo "Example: ${0##*/} ssh" | |
echo "Example: ${0##*/} ssh ubuntu" | |
echo | |
} | |
add_cluster() { | |
# Setup kubectl | |
kubectl config set-cluster ${kname} --server=https://$(echo ${host} | cut -f2 -d@ ):8001 --insecure-skip-tls-verify ## Maybe I'll add the proper cert here some day | |
} | |
add_user_cert() { | |
# Grab the certificates if we don't already have them | |
if [[ ! -f ${kubecerts}/${kname}.crt ]]; then | |
# This was the olden days. scp -o StrictHostKeyChecking=no ${host}:/opt/ibm/cluster/cfc-certs/kubecfg.crt ${kubecerts}/${kname}.crt | |
scp -o StrictHostKeyChecking=no ${host}:/etc/cfc/conf/kubecfg.crt ${kubecerts}/${kname}.crt | |
fi | |
if [[ ! -f ${kubecerts}/${kname}.key ]]; then | |
# This was the olden days. scp -o StrictHostKeyChecking=no ${host}:/opt/ibm/cluster/cfc-certs/kubecfg.key ${kubecerts}/${kname}.key | |
scp -o StrictHostKeyChecking=no ${host}:/etc/cfc/conf/kubecfg.key ${kubecerts}/${kname}.key | |
fi | |
# Set the credentials | |
kubectl config set-credentials ${kname} --username=admin --client-certificate=${kubecerts}/${kname}.crt --client-key=${kubecerts}/${kname}.key | |
} | |
add_user_oidc() { | |
token=$(curl -H "Content-Type: application/x-www-form-urlencoded;charset=UTF-8" -d "grant_type=password&username=${user}&password=${pass}&scope=openid" https://${host}:8443/idprovider/v1/auth/identitytoken --insecure) | |
idToken=$(echo $token | python -c "import sys, json; print(json.load(sys.stdin)['id_token'])") | |
accessToken=$(echo $token | python -c "import sys, json; print(json.load(sys.stdin)['access_token'])") | |
refreshToken=$(echo $token | python -c "import sys, json; print(json.load(sys.stdin)['refresh_token'])") | |
# Get the server certificate | |
mkdir -p ${kubecerts} | |
openssl s_client -connect ${host}:9443 -showcerts </dev/null 2>/dev/null|openssl x509 -outform PEM > ${kubecerts}/${kname}.pem | |
# Set the credentials | |
kubectl config set-credentials ${kname} \ | |
--auth-provider=oidc \ | |
--auth-provider-arg=idp-issuer-url=https://${host}:9443/oidc/endpoint/OP \ | |
--auth-provider-arg=client-id=${user} \ | |
--auth-provider-arg=client-secret=${pass} \ | |
--auth-provider-arg=idp-certificate-authority=${kubecerts}/${kname}.pem \ | |
--auth-provider-arg=refresh-token=${refreshToken} \ | |
--auth-provider-arg=id-token=${idToken} \ | |
--insecure-skip-tls-verify=true | |
} | |
super_privileged() { | |
# TODO: Do a kubectl get nodes -l role=master or get nodes --show-labels to figure out which master are available and offer option to connect to it | |
# TODO: Also should be able to select any of the available nodes | |
# TODO: Could use zenity for GUI dialog in Ubuntu, or "dialog" for text based dialog | |
echo "Getting nodes..." | |
#declare -a nodes | |
#nodes=$(kubectl get nodes --show-labels) | |
mapfile -t nodes < <(kubectl get nodes --show-labels) | |
for (( i=1; i<${#nodes[0]}; i++ )) | |
do | |
if [[ ${#nodes[$i]} == 0 ]] ; then | |
# For some reason mapfile (with kubectl?) can overfill the array. Break when empty lines appear | |
break | |
fi | |
echo "${i}: ${nodes[$i]}" | |
done | |
echo | |
echo -n "Select the node to provision to: " | |
read sel | |
echo "Attempting to enter: ${nodes[$sel]}" | |
hostname=$(echo ${nodes[$sel]} | sed -e "s/.*hostname=//" -e "s/,.*//") | |
echo "Spinning up super privileged container" | |
cat << EOF | kubectl apply -f - | |
apiVersion: v1 | |
kind: Pod | |
metadata: | |
name: hk-super | |
namespace: kube-system | |
spec: | |
hostNetwork: true | |
nodeSelector: | |
beta.kubernetes.io/arch: amd64 | |
kubernetes.io/hostname: $hostname | |
tolerations: | |
- key: dedicated | |
operator: Exists | |
effect: NoSchedule | |
- key: CriticalAddonsOnly | |
operator: Exists | |
- key: node-role.kubernetes.io/master | |
operator: Exists | |
effect: NoSchedule | |
containers: | |
- name: hk-super | |
image: busybox:latest | |
command: | |
- /bin/sleep | |
- "300s" | |
securityContext: | |
capabilities: | |
add: | |
- NET_ADMIN | |
privileged: true | |
seLinuxOptions: | |
level: "s0:c123,c456" | |
volumeMounts: | |
- name: root | |
mountPath: /hostroot | |
- name: net | |
mountPath: /dev/net | |
volumes: | |
- hostPath: | |
path: / | |
name: root | |
- hostPath: | |
path: /dev/net | |
name: net | |
EOF | |
echo "Wating for container to get ready" | |
sleep 1s | |
phase=$(kubectl -n kube-system get pod hk-super -o jsonpath='{ .status.phase }') | |
while [[ "${phase}" != "Running" ]] | |
do | |
echo "Current status is: ${phase}" | |
echo "Still waiting..." | |
sleep 2s | |
phase=$(kubectl -n kube-system get pod hk-super -o jsonpath='{ .status.phase }') | |
done | |
echo "Ready to rock..." | |
} | |
################# Now do the hard work | |
case $1 in | |
add) | |
# Cluster name | |
kname=$2 | |
## Add user credentials | |
if [[ "$3" == "oidc" ]]; then | |
# Figure out username, default to admin | |
if [[ $4 =~ .*@.* ]]; then | |
user=$(echo ${4} | cut -f1 -d@ ) | |
else | |
user=admin | |
fi | |
# Grab host and password | |
host=$(echo ${4} | cut -f2 -d@ ) | |
pass=$5 | |
# Do the magic | |
add_user_oidc | |
namespace=${6:-$defns} | |
else | |
# Grab certs and add user | |
host=$3 | |
add_user_cert | |
namespace=${4:-$defns} | |
fi | |
## Add cluster info | |
add_cluster | |
## Add context and use | |
kubectl config set-context ${kname} --cluster=${kname} --user=${kname} --namespace=${namespace} | |
kubectl config use-context ${kname} | |
;; | |
refresh) | |
kname=$(kubectl config current-context) | |
user=$(kubectl config view -o jsonpath='{.users[?(@.name=="'${kname}'")].user.auth-provider.config.client-id}') | |
host=$(kubectl config view -o jsonpath='{.clusters[?(@.name=="'${kname}'")].cluster.server}' | sed -e 's|https://\(.*\)\:.*|\1|') | |
pass=$(kubectl config view -o jsonpath='{.users[?(@.name=="'${kname}'")].user.auth-provider.config.client-secret}') | |
add_user_oidc | |
;; | |
delete) | |
kname=$2 | |
delete_cluster | |
;; | |
list) | |
kubectl config get-contexts -o name | |
echo "Current context: $(kubectl config current-context)" | |
;; | |
use) | |
kname=$2 | |
kubectl config use-context ${kname} | |
;; | |
ssh) | |
kname=$(kubectl config current-context) | |
user=${2:-$defsshuser} | |
echo "Connecting to active master of ${currentcontext} as ${user}" | |
host=$(kubectl config view -o jsonpath='{.clusters[?(@.name=="'${kname}'")].cluster.server}' | sed -e 's|https://\(.*\)\:.*|\1|') | |
# host=$(kubectl config view -o json | jq -r '.clusters[] | select(.name=="'${currentcontext}'") | .cluster.server' | sed -e 's|https://\(.*\)\:.*|\1|') | |
ssh -o StrictHostKeyChecking=no ${user}@${host} | |
;; | |
console) | |
currentcontext=$(kubectl config current-context) | |
echo "Connecting to the console of ${currentcontext}" | |
baseurl=$(kubectl config view -o json | jq -r '.clusters[] | select(.name=="'${currentcontext}'") | .cluster.server' | sed -e 's|\(.*\)\:.*|\1|') | |
user=$(kubectl config view -o json | jq -r '.users[] | select(.name=="'${currentcontext}'") | .user."auth-provider".config."client-id" ') | |
pass=$( kubectl config view -o json | jq -r '.users[] | select(.name=="'${currentcontext}'") | .user."auth-provider".config."client-secret" ') | |
echo "If known, username password is ${user} / ${pass}" | |
if which xdg-open &>/dev/null ; then | |
xdg-open ${baseurl}${defaultconsole} | |
elif [[ -x /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome ]] ; then | |
/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome ${baseurl}${defaultconsole} | |
elif [[ -x /usr/bin/open ]] ; then | |
open ${baseurl}${defaultconsole} | |
else | |
echo "Open ${baseurl}${defaultconsole} with your browser" | |
fi | |
;; | |
super) | |
super_privileged | |
# When ready | |
kubectl exec -it hk-super -- chroot /hostroot /bin/bash | |
echo "Destroying pod again" | |
kubectl delete pod hk-super | |
;; | |
*) | |
usage | |
exit 1 | |
esac |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment