Last active
August 17, 2020 23:41
-
-
Save CalvinHartwell/adfd5542e47b6f7554891b981285d4d3 to your computer and use it in GitHub Desktop.
istio-federation-demo
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Charmed k8s enforces its own coredns config which you cannot override and it gets reverted. | |
# So had to set the dns option to "none" on the k8s master charm. | |
# (You can apparently extend the coreDNS config by using coredns-custom but I didn't try it) | |
# then had to manually deploy the coredns using this: | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
name: coredns | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
kubernetes.io/bootstrapping: rbac-defaults | |
name: system:coredns | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- endpoints | |
- services | |
- pods | |
- namespaces | |
verbs: | |
- list | |
- watch | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
annotations: | |
rbac.authorization.kubernetes.io/autoupdate: "true" | |
labels: | |
kubernetes.io/bootstrapping: rbac-defaults | |
name: system:coredns | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: system:coredns | |
subjects: | |
- kind: ServiceAccount | |
name: coredns | |
namespace: kube-system | |
--- | |
apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
annotations: | |
deployment.kubernetes.io/revision: "1" | |
labels: | |
cdk-restart-on-ca-change: "true" | |
k8s-app: kube-dns | |
kubernetes.io/name: CoreDNS | |
managedFields: | |
- apiVersion: apps/v1 | |
fieldsType: FieldsV1 | |
fieldsV1: | |
f:metadata: | |
f:annotations: | |
.: {} | |
f:kubectl.kubernetes.io/last-applied-configuration: {} | |
f:labels: | |
.: {} | |
f:cdk-addons: {} | |
f:cdk-restart-on-ca-change: {} | |
f:k8s-app: {} | |
f:kubernetes.io/name: {} | |
f:spec: | |
f:progressDeadlineSeconds: {} | |
f:replicas: {} | |
f:revisionHistoryLimit: {} | |
f:selector: | |
f:matchLabels: | |
.: {} | |
f:k8s-app: {} | |
f:strategy: | |
f:rollingUpdate: | |
.: {} | |
f:maxSurge: {} | |
f:maxUnavailable: {} | |
f:type: {} | |
f:template: | |
f:metadata: | |
f:labels: | |
.: {} | |
f:k8s-app: {} | |
f:spec: | |
f:affinity: | |
.: {} | |
f:podAntiAffinity: | |
.: {} | |
f:requiredDuringSchedulingIgnoredDuringExecution: {} | |
f:containers: | |
k:{"name":"coredns"}: | |
.: {} | |
f:args: {} | |
f:image: {} | |
f:imagePullPolicy: {} | |
f:livenessProbe: | |
.: {} | |
f:failureThreshold: {} | |
f:httpGet: | |
.: {} | |
f:path: {} | |
f:port: {} | |
f:scheme: {} | |
f:initialDelaySeconds: {} | |
f:periodSeconds: {} | |
f:successThreshold: {} | |
f:timeoutSeconds: {} | |
f:name: {} | |
f:ports: | |
.: {} | |
k:{"containerPort":53,"protocol":"TCP"}: | |
.: {} | |
f:containerPort: {} | |
f:name: {} | |
f:protocol: {} | |
k:{"containerPort":53,"protocol":"UDP"}: | |
.: {} | |
f:containerPort: {} | |
f:name: {} | |
f:protocol: {} | |
k:{"containerPort":9153,"protocol":"TCP"}: | |
.: {} | |
f:containerPort: {} | |
f:name: {} | |
f:protocol: {} | |
f:readinessProbe: | |
.: {} | |
f:failureThreshold: {} | |
f:httpGet: | |
.: {} | |
f:path: {} | |
f:port: {} | |
f:scheme: {} | |
f:periodSeconds: {} | |
f:successThreshold: {} | |
f:timeoutSeconds: {} | |
f:resources: | |
.: {} | |
f:limits: | |
.: {} | |
f:memory: {} | |
f:requests: | |
.: {} | |
f:cpu: {} | |
f:memory: {} | |
f:securityContext: | |
.: {} | |
f:allowPrivilegeEscalation: {} | |
f:capabilities: | |
.: {} | |
f:add: {} | |
f:drop: {} | |
f:readOnlyRootFilesystem: {} | |
f:terminationMessagePath: {} | |
f:terminationMessagePolicy: {} | |
f:volumeMounts: | |
.: {} | |
k:{"mountPath":"/etc/coredns"}: | |
.: {} | |
f:mountPath: {} | |
f:name: {} | |
f:readOnly: {} | |
f:dnsPolicy: {} | |
f:nodeSelector: | |
.: {} | |
f:kubernetes.io/os: {} | |
f:priorityClassName: {} | |
f:restartPolicy: {} | |
f:schedulerName: {} | |
f:securityContext: {} | |
f:serviceAccount: {} | |
f:serviceAccountName: {} | |
f:terminationGracePeriodSeconds: {} | |
f:tolerations: {} | |
f:volumes: | |
.: {} | |
k:{"name":"config-volume"}: | |
.: {} | |
f:configMap: | |
.: {} | |
f:defaultMode: {} | |
f:items: {} | |
f:name: {} | |
f:name: {} | |
manager: kubectl | |
operation: Update | |
time: "2020-08-16T15:56:58Z" | |
- apiVersion: apps/v1 | |
fieldsType: FieldsV1 | |
fieldsV1: | |
f:metadata: | |
f:annotations: | |
f:deployment.kubernetes.io/revision: {} | |
f:status: | |
f:availableReplicas: {} | |
f:conditions: | |
.: {} | |
k:{"type":"Available"}: | |
.: {} | |
f:lastTransitionTime: {} | |
f:lastUpdateTime: {} | |
f:message: {} | |
f:reason: {} | |
f:status: {} | |
f:type: {} | |
k:{"type":"Progressing"}: | |
.: {} | |
f:lastTransitionTime: {} | |
f:lastUpdateTime: {} | |
f:message: {} | |
f:reason: {} | |
f:status: {} | |
f:type: {} | |
f:observedGeneration: {} | |
f:readyReplicas: {} | |
f:replicas: {} | |
f:updatedReplicas: {} | |
manager: kube-controller-manager | |
operation: Update | |
time: "2020-08-16T15:57:24Z" | |
name: coredns | |
namespace: kube-system | |
resourceVersion: "39611" | |
selfLink: /apis/apps/v1/namespaces/kube-system/deployments/coredns | |
uid: 7c43624b-69ba-4486-9787-f46e1c8b2f67 | |
spec: | |
progressDeadlineSeconds: 600 | |
replicas: 1 | |
revisionHistoryLimit: 10 | |
selector: | |
matchLabels: | |
k8s-app: kube-dns | |
strategy: | |
rollingUpdate: | |
maxSurge: 25% | |
maxUnavailable: 1 | |
type: RollingUpdate | |
template: | |
metadata: | |
creationTimestamp: null | |
labels: | |
k8s-app: kube-dns | |
spec: | |
affinity: | |
podAntiAffinity: | |
requiredDuringSchedulingIgnoredDuringExecution: | |
- labelSelector: | |
matchExpressions: | |
- key: k8s-app | |
operator: In | |
values: | |
- kube-dns | |
topologyKey: kubernetes.io/hostname | |
containers: | |
- args: | |
- -conf | |
- /etc/coredns/Corefile | |
image: rocks.canonical.com:443/cdk/coredns/coredns-amd64:1.6.7 | |
imagePullPolicy: IfNotPresent | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /health | |
port: 8080 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
periodSeconds: 10 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: coredns | |
ports: | |
- containerPort: 53 | |
name: dns | |
protocol: UDP | |
- containerPort: 53 | |
name: dns-tcp | |
protocol: TCP | |
- containerPort: 9153 | |
name: metrics | |
protocol: TCP | |
readinessProbe: | |
failureThreshold: 3 | |
httpGet: | |
path: /ready | |
port: 8181 | |
scheme: HTTP | |
periodSeconds: 10 | |
successThreshold: 1 | |
timeoutSeconds: 1 | |
resources: | |
limits: | |
memory: 170Mi | |
requests: | |
cpu: 100m | |
memory: 70Mi | |
securityContext: | |
allowPrivilegeEscalation: false | |
capabilities: | |
add: | |
- NET_BIND_SERVICE | |
drop: | |
- all | |
readOnlyRootFilesystem: true | |
terminationMessagePath: /dev/termination-log | |
terminationMessagePolicy: File | |
volumeMounts: | |
- mountPath: /etc/coredns | |
name: config-volume | |
readOnly: true | |
dnsPolicy: Default | |
nodeSelector: | |
kubernetes.io/os: linux | |
priorityClassName: system-cluster-critical | |
restartPolicy: Always | |
schedulerName: default-scheduler | |
securityContext: {} | |
serviceAccount: coredns | |
serviceAccountName: coredns | |
terminationGracePeriodSeconds: 30 | |
tolerations: | |
- key: CriticalAddonsOnly | |
operator: Exists | |
volumes: | |
- configMap: | |
defaultMode: 420 | |
items: | |
- key: Corefile | |
path: Corefile | |
name: coredns | |
name: config-volume | |
--- | |
apiVersion: v1 | |
kind: Service | |
metadata: | |
name: kube-dns | |
namespace: kube-system | |
annotations: | |
prometheus.io/port: "9153" | |
prometheus.io/scrape: "true" | |
labels: | |
k8s-app: kube-dns | |
kubernetes.io/cluster-service: "true" | |
kubernetes.io/name: "CoreDNS" | |
spec: | |
selector: | |
k8s-app: kube-dns | |
ports: | |
- name: dns | |
port: 53 | |
protocol: UDP | |
- name: dns-tcp | |
port: 53 | |
protocol: TCP | |
- name: metrics | |
port: 9153 | |
protocol: TCP | |
# once deployed, you can find the service IP by using kubectl get svc. | |
# the next step is to update the kubelet config on the worker nodes | |
# Usually this is done by the charm, note the clusterDNS field. | |
# This is inherited by all pods: | |
# Generated by kubernetes-worker charm, do not edit | |
address: 0.0.0.0 | |
apiVersion: kubelet.config.k8s.io/v1beta1 | |
authentication: | |
anonymous: | |
enabled: false | |
x509: | |
clientCAFile: /root/cdk/ca.crt | |
clusterDNS: | |
- 10.152.183.64 | |
clusterDomain: cluster.local | |
failSwapOn: false | |
kind: KubeletConfiguration | |
port: 10250 | |
resolvConf: /run/systemd/resolve/resolv.conf | |
tlsCertFile: /root/cdk/server.crt | |
tlsPrivateKeyFile: /root/cdk/server.key | |
# Restart the kubernetes worker service and kubelet service on the worker. | |
# Next is detailed debugging via: | |
https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/ | |
#core dns config | |
kubectl apply -f - <<EOF | |
apiVersion: v1 | |
kind: ConfigMap | |
metadata: | |
name: coredns | |
namespace: kube-system | |
data: | |
Corefile: | | |
.:53 { | |
errors | |
health | |
kubernetes cluster.local in-addr.arpa ip6.arpa { | |
pods insecure | |
upstream | |
fallthrough in-addr.arpa ip6.arpa | |
} | |
prometheus :9153 | |
proxy . /etc/resolv.conf | |
cache 30 | |
loop | |
reload | |
loadbalance | |
} | |
global:53 { | |
errors | |
cache 30 | |
proxy . $(kubectl get svc -n istio-system istiocoredns -o jsonpath={.spec.clusterIP}) | |
} | |
EOF | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
kubectl apply -f - <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
global:53 {
errors
cache 30
proxy . $(kubectl get svc -n istio-system istiocoredns -o jsonpath={.spec.clusterIP})
}
EOF