Last active
July 26, 2019 14:15
-
-
Save felixkrohn/fcf91f1c72700c45c99fdb2dcd5abecc to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# after an accidental "oc delete pv --all" all PVs on the cluster are marked as "Terminating". | |
# As a result all PV have to be re-created, which is done by the script. | |
# Before force-deleting the PV, all deployments accessing the PV via PVC are being scaled down, and scaled up again afterwards. | |
# | |
# (by the way, the "Terminating" status is not a real status, but is what oc/kubectl print out | |
# when an object has a DeletionTimestamp, which is immutable and cannot be deleted anymore. See | |
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/describe/versioned/describe.go#L1374-L1378 | |
# if pv.ObjectMeta.DeletionTimestamp != nil { | |
# w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pv.ObjectMeta.DeletionTimestamp)) | |
# } else { | |
# w.Write(LEVEL_0, "Status:\t%v\n", pv.Status.Phase) | |
#!/bin/sh | |
WD=./pv-restore-backup | |
mkdir -p $WD | |
DRYRUN="" # used to prepend an "echo" to all write operations while developing/debugging this script | |
REPLICAANNOTATION="satt-replicacount" | |
NODESELECTORANNOTATION="satt-nodeselector" | |
oc whoami >/dev/null 2>&1 || { echo "need to 'oc login' first" ; exit 1 ; } | |
scaledownEverythingInNamespace() | |
{ | |
namespace="${1}" | |
echo "#!/bin/sh" > $WD/scaleback.${namespace}.sh | |
# # special case: don't touch default | |
# [ "$namespace" = "default" ] && continue | |
# special case: don't touch trident | |
[ "$namespace" = "trident" ] && continue | |
# operate on all types of deployment that support replicas... | |
for type in deploymentconfig daemonset statefulset deployment; do | |
# ... and all instances of that type | |
for name in $(oc -n $namespace get $type --output=jsonpath='{range .items[*].metadata}{.name}{" "}') | |
do | |
oc -n $namespace get $type $name -o yaml > $WD/$namespace.$type.$name.yaml | |
# statefulsets use a different jsonpath | |
case "${type}" in | |
statefulset ) | |
DEPLOYMENTNAME="$(oc -n $namespace get $type $name --output=jsonpath='{.spec.volumeClaimTemplates[*].metadata.name}')" | |
;; | |
deploymentconfig | deployment | daemonset ) | |
DEPLOYMENTNAME="$(oc -n $namespace get $type $name --output=jsonpath='{range .spec.template.spec.volumes[*]}{.persistentVolumeClaim.claimName}')" | |
;; | |
* ) | |
echo "${type} unsupported!" | |
;; | |
esac | |
if [ -n "${DEPLOYMENTNAME}" ] | |
then | |
case "${type}" in | |
deploymentconfig | deployment | statefulset ) | |
replicacount="$(oc -n $namespace get $type $name --output=jsonpath='{.spec.replicas}')" | |
if [ "${replicacount}" -gt 0 ] | |
then | |
echo -e "\t\t${namespace}: scaling down ${type} ${name} from ${replicacount} to 0" | |
${DRYRUN} oc -n "${namespace}" annotate "${type}" "${name}" "${REPLICAANNOTATION}=${replicacount}" --overwrite | |
${DRYRUN} oc -n "${namespace}" scale "${type}" "${name}" --replicas=0 | |
echo -e "${DRYRUN} oc -n ${namespace} scale ${type} ${name} --replicas=${replicacount} && ${DRYRUN} oc -n ${namespace} annotate ${type} ${name} ${REPLICAANNOTATION}-\n" >> $WD/scaleback.${namespace}.sh | |
fi | |
;; | |
daemonset ) | |
replicacount="$(oc -n $namespace get $type $name --output=jsonpath='{.status.desiredNumberScheduled}')" | |
if [ "${replicacount}" -gt 0 ] | |
then | |
echo -e "\t\t${namespace}: scaling down ${type} ${name} from ${replicacount} to 0" | |
# save nodeselector annotation with "key=value" notation | |
nodeselector="$(oc -n ${namespace} get ${type} ${name} --output=go-template='{{ range $key, $value := .spec.template.spec.nodeSelector}}{{$key}}={{$value}},{{end}}' | sed "s/,$//g" )" | |
${DRYRUN} oc -n "${namespace}" annotate "${type}" "${name}" "${NODESELECTORANNOTATION}=\"${nodeselector}\"" --overwrite | |
# Daemonsets can't be scaled down like the other ones - so instead, we add a never-matching nodeSelector, thereby effectively disabling scheduling | |
${DRYRUN} oc -n ${namespace} patch ${type} ${name} -p '{"spec":{"template":{"spec":{"nodeSelector":{"force-scaledown":"true"}}}}}' | |
nodeselectorjson="$(echo \"$nodeselector\"|sed -e "s/=/\":\"/g" -e "s/,/\",\"/g")" | |
# NB: "oc patch" does a merge, it's not possible to remove the temporary nodeselector by replacing it with the correct one - instead first delete it altogether, then set it to the correct value. | |
echo -e "${DRYRUN} oc -n ${namespace} patch ${type} ${name} -p '[{ \"op\": \"remove\", \"path\": \"/spec/template/spec/nodeSelector\" }]' --type json && ${DRYRUN} oc -n ${namespace} patch ${type} ${name} -p '{\"spec\":{\"template\":{\"spec\":{\"nodeSelector\":{${nodeselectorjson}}}}}}' && ${DRYRUN} oc -n ${namespace} annotate ${type} ${name} ${NODESELECTORANNOTATION}-\n" >> $WD/scaleback.${namespace}.sh | |
fi | |
;; | |
* ) | |
echo "${type} unsupported!" | |
;; | |
esac | |
unset replicacount nodeselector | |
fi | |
unset DEPLOYMENTNAME | |
done | |
done | |
} | |
echo "==================================================== remove-deletionTimeStamp ====================================================" | |
#NAMESPACELIST="felix-testpvrestore1 felix-testpvrestore2" | |
NAMESPACELIST="$(oc get pv --no-headers -o jsonpath='{range .items[?(@.metadata.deletionTimestamp != "null" )]}{.spec.claimRef.namespace}{"\n"}' | grep -v trident | sort | uniq)" | |
for namespace in ${NAMESPACELIST}; do | |
echo "considering to work on namespace $namespace. type a to skip this namespace, Enter to continue" | |
read a | |
[ "${a}" = "a" ] && continue | |
for pv in $(oc get pvc -n ${namespace} -o jsonpath='{range .items[*]}{.spec.volumeName}{"\n"}'); do | |
if [ "$(oc get pv ${pv} -o jsonpath='{.metadata.deletionTimestamp}')" != "" ] | |
then | |
claim="$(oc get pv $pv -o custom-columns=claim:.spec.claimRef.name --no-headers)" | |
echo "working on pv $pv bound to claim $claim in $namespace" | |
scaledownEverythingInNamespace "${namespace}" | |
oc get --export -oyaml pv $pv > $WD/pv.$pv.yaml | |
oc get --export -oyaml -n $namespace pvc $claim > $WD/pvc.$claimnamespace.$claim.yaml | |
${DRYRUN} oc patch pv $pv -p '{"spec":{"persistentVolumeReclaimPolicy": "Retain"}}' | |
${DRYRUN} oc patch pv $pv -p '{"metadata":{"finalizers": null}}'; | |
{ ${DRYRUN} oc create -f $WD/pv.$pv.yaml && /bin/bash $WD/scaleback.${namespace}.sh ; } || { echo "something bad happened in $namespace, aborting"; exit 1 ; } | |
fi | |
done | |
done |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment