Created
March 6, 2020 13:59
-
-
Save miron9/e4a28dcdb34b3f6ee8825c35b6d5a515 to your computer and use it in GitHub Desktop.
Verbse output from kops run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
I0306 13:52:07.020988 10006 create_cluster.go:1496] Using SSH public key: /home/mirek/.ssh/id_rsa.pub | |
I0306 13:52:07.021033 10006 factory.go:68] state store do://test1.dev.fra1-state-store | |
I0306 13:52:07.021105 10006 s3context.go:92] Found S3_ENDPOINT="https://fra1.digitaloceanspaces.com", using as non-AWS S3 backend | |
I0306 13:52:07.021331 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/config" | |
I0306 13:52:07.526214 10006 channel.go:99] resolving "stable" against default channel location "https://raw.githubusercontent.com/kubernetes/kops/master/channels/" | |
I0306 13:52:07.526272 10006 channel.go:104] Loading channel from "https://raw.githubusercontent.com/kubernetes/kops/master/channels/stable" | |
I0306 13:52:07.526299 10006 context.go:177] Performing HTTP request: GET https://raw.githubusercontent.com/kubernetes/kops/master/channels/stable | |
I0306 13:52:07.835530 10006 channel.go:113] Channel contents: spec: | |
images: | |
# We put the "legacy" version first, for kops versions that don't support versions ( < 1.5.0 ) | |
- name: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2017-07-28 | |
providerID: aws | |
kubernetesVersion: ">=1.4.0 <1.5.0" | |
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.5.0 <1.6.0" | |
- name: kope.io/k8s-1.6-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.6.0 <1.7.0" | |
- name: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.7.0 <1.8.0" | |
- name: kope.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.8.0 <1.9.0" | |
- name: kope.io/k8s-1.9-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.9.0 <1.10.0" | |
- name: kope.io/k8s-1.10-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.10.0 <1.11.0" | |
# Stretch is the default for 1.11 (for nvme) | |
- name: kope.io/k8s-1.11-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.11.0 <1.12.0" | |
- name: kope.io/k8s-1.12-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.12.0 <1.13.0" | |
- name: kope.io/k8s-1.13-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.13.0 <1.14.0" | |
- name: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.14.0 <1.15.0" | |
- name: kope.io/k8s-1.15-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.15.0 <1.16.0" | |
- name: kope.io/k8s-1.16-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.16.0 <1.17.0" | |
- name: kope.io/k8s-1.17-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.17.0" | |
- providerID: gce | |
kubernetesVersion: "<1.16.0-alpha.1" | |
name: "cos-cloud/cos-stable-65-10323-99-0" | |
- providerID: gce | |
kubernetesVersion: ">=1.16.0-alpha.1" | |
name: "cos-cloud/cos-stable-77-12371-114-0" | |
cluster: | |
kubernetesVersion: v1.5.8 | |
networking: | |
kubenet: {} | |
kubernetesVersions: | |
- range: ">=1.17.0" | |
recommendedVersion: 1.17.3 | |
requiredVersion: 1.17.0 | |
- range: ">=1.16.0" | |
recommendedVersion: 1.16.7 | |
requiredVersion: 1.16.0 | |
- range: ">=1.15.0" | |
recommendedVersion: 1.15.10 | |
requiredVersion: 1.15.0 | |
- range: ">=1.14.0" | |
recommendedVersion: 1.14.10 | |
requiredVersion: 1.14.0 | |
- range: ">=1.13.0" | |
recommendedVersion: 1.13.12 | |
requiredVersion: 1.13.0 | |
- range: ">=1.12.0" | |
recommendedVersion: 1.12.10 | |
requiredVersion: 1.12.0 | |
- range: ">=1.11.0" | |
recommendedVersion: 1.11.10 | |
requiredVersion: 1.11.0 | |
- range: "<1.11.0" | |
recommendedVersion: 1.11.10 | |
requiredVersion: 1.11.10 | |
kopsVersions: | |
- range: ">=1.17.0-alpha.1" | |
#recommendedVersion: "1.17.0" | |
#requiredVersion: 1.17.0 | |
kubernetesVersion: 1.17.3 | |
- range: ">=1.16.0-alpha.1" | |
#recommendedVersion: "1.16.0" | |
#requiredVersion: 1.16.0 | |
kubernetesVersion: 1.16.7 | |
- range: ">=1.15.0-alpha.1" | |
#recommendedVersion: "1.15.0" | |
#requiredVersion: 1.15.0 | |
kubernetesVersion: 1.15.10 | |
- range: ">=1.14.0-alpha.1" | |
#recommendedVersion: "1.14.0" | |
#requiredVersion: 1.14.0 | |
kubernetesVersion: 1.14.10 | |
- range: ">=1.13.0-alpha.1" | |
#recommendedVersion: "1.13.0" | |
#requiredVersion: 1.13.0 | |
kubernetesVersion: 1.13.12 | |
- range: ">=1.12.0-alpha.1" | |
recommendedVersion: "1.12.1" | |
#requiredVersion: 1.12.0 | |
kubernetesVersion: 1.12.10 | |
- range: ">=1.11.0-alpha.1" | |
recommendedVersion: "1.11.1" | |
#requiredVersion: 1.11.0 | |
kubernetesVersion: 1.11.10 | |
- range: "<1.11.0-alpha.1" | |
recommendedVersion: "1.11.1" | |
#requiredVersion: 1.10.0 | |
kubernetesVersion: 1.11.10 | |
I0306 13:52:07.835600 10006 channel.go:241] version range ">=1.17.0-alpha.1" does not apply to version "1.15.2"; skipping | |
I0306 13:52:07.835639 10006 channel.go:241] version range ">=1.16.0-alpha.1" does not apply to version "1.15.2"; skipping | |
I0306 13:52:07.835775 10006 create_cluster.go:1018] networking mode=calico => {"calico":{"majorVersion":"v3"}} | |
I0306 13:52:07.835869 10006 defaults.go:224] Not setting up Proxy Excludes | |
I0306 13:52:07.836436 10006 populate_cluster_spec.go:371] Defaulted KubeControllerManager.ClusterCIDR to 100.96.0.0/11 | |
I0306 13:52:07.836464 10006 populate_cluster_spec.go:378] Defaulted ServiceClusterIPRange to 100.64.0.0/13 | |
I0306 13:52:07.836500 10006 defaults.go:224] Not setting up Proxy Excludes | |
I0306 13:52:07.836562 10006 utils.go:167] Querying for all DNS zones to find match for "test1.dev.fra1.do.services.example.com" | |
I0306 13:52:08.871419 10006 populate_cluster_spec.go:253] Defaulting DNS zone to: test1.dev.fra1.do.services.example.com | |
I0306 13:52:08.871473 10006 tagbuilder.go:95] tags: [_do _k8s_1_6] | |
I0306 13:52:08.871934 10006 options_loader.go:130] executing builder *components.DefaultsOptionsBuilder | |
I0306 13:52:08.871967 10006 options_loader.go:130] executing builder *components.EtcdOptionsBuilder | |
I0306 13:52:08.872000 10006 options_loader.go:130] executing builder *etcdmanager.EtcdManagerOptionsBuilder | |
I0306 13:52:08.872025 10006 options_loader.go:130] executing builder *nodeauthorizer.OptionsBuilder | |
I0306 13:52:08.872043 10006 options_loader.go:130] executing builder *components.KubeAPIServerOptionsBuilder | |
I0306 13:52:08.872131 10006 options_loader.go:130] executing builder *components.DockerOptionsBuilder | |
I0306 13:52:08.872157 10006 options_loader.go:130] executing builder *components.NetworkingOptionsBuilder | |
I0306 13:52:08.872177 10006 options_loader.go:130] executing builder *components.KubeDnsOptionsBuilder | |
I0306 13:52:08.872205 10006 options_loader.go:130] executing builder *components.KubeletOptionsBuilder | |
I0306 13:52:08.872248 10006 kubelet.go:171] Cloud Provider: digitalocean | |
I0306 13:52:08.872280 10006 options_loader.go:130] executing builder *components.KubeControllerManagerOptionsBuilder | |
I0306 13:52:08.872314 10006 kubecontrollermanager.go:74] Kubernetes version "1.15.10" supports AttachDetachReconcileSyncPeriod; will configure | |
I0306 13:52:08.872353 10006 kubecontrollermanager.go:79] AttachDetachReconcileSyncPeriod is not set; will set to default 1m0s | |
I0306 13:52:08.872394 10006 options_loader.go:130] executing builder *components.KubeSchedulerOptionsBuilder | |
I0306 13:52:08.872421 10006 options_loader.go:130] executing builder *components.KubeProxyOptionsBuilder | |
I0306 13:52:08.873485 10006 options_loader.go:130] executing builder *components.DefaultsOptionsBuilder | |
I0306 13:52:08.873579 10006 options_loader.go:130] executing builder *components.EtcdOptionsBuilder | |
I0306 13:52:08.873640 10006 options_loader.go:130] executing builder *etcdmanager.EtcdManagerOptionsBuilder | |
I0306 13:52:08.873725 10006 options_loader.go:130] executing builder *nodeauthorizer.OptionsBuilder | |
I0306 13:52:08.873770 10006 options_loader.go:130] executing builder *components.KubeAPIServerOptionsBuilder | |
I0306 13:52:08.873884 10006 options_loader.go:130] executing builder *components.DockerOptionsBuilder | |
I0306 13:52:08.873940 10006 options_loader.go:130] executing builder *components.NetworkingOptionsBuilder | |
I0306 13:52:08.873987 10006 options_loader.go:130] executing builder *components.KubeDnsOptionsBuilder | |
I0306 13:52:08.874030 10006 options_loader.go:130] executing builder *components.KubeletOptionsBuilder | |
I0306 13:52:08.874080 10006 kubelet.go:171] Cloud Provider: digitalocean | |
I0306 13:52:08.874131 10006 options_loader.go:130] executing builder *components.KubeControllerManagerOptionsBuilder | |
I0306 13:52:08.874180 10006 kubecontrollermanager.go:74] Kubernetes version "1.15.10" supports AttachDetachReconcileSyncPeriod; will configure | |
I0306 13:52:08.874244 10006 options_loader.go:130] executing builder *components.KubeSchedulerOptionsBuilder | |
I0306 13:52:08.874292 10006 options_loader.go:130] executing builder *components.KubeProxyOptionsBuilder | |
I0306 13:52:08.875658 10006 spec_builder.go:49] options: { | |
"channel": "stable", | |
"configBase": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com", | |
"cloudProvider": "digitalocean", | |
"kubernetesVersion": "1.15.10", | |
"subnets": [ | |
{ | |
"name": "fra1", | |
"zone": "fra1", | |
"region": "fra1", | |
"type": "Public" | |
} | |
], | |
"masterPublicName": "api.test1.dev.fra1.do.services.example.com", | |
"masterInternalName": "api.internal.test1.dev.fra1.do.services.example.com", | |
"topology": { | |
"masters": "public", | |
"nodes": "public", | |
"dns": { | |
"type": "Public" | |
} | |
}, | |
"secretStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets", | |
"keyStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki", | |
"configStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com", | |
"dnsZone": "test1.dev.fra1.do.services.example.com", | |
"clusterDNSDomain": "cluster.local", | |
"serviceClusterIPRange": "100.64.0.0/13", | |
"nonMasqueradeCIDR": "100.64.0.0/10", | |
"sshAccess": [ | |
"0.0.0.0/0" | |
], | |
"kubernetesApiAccess": [ | |
"0.0.0.0/0" | |
], | |
"etcdClusters": [ | |
{ | |
"name": "main", | |
"provider": "Manager", | |
"etcdMembers": [ | |
{ | |
"name": "1", | |
"instanceGroup": "master-fra1" | |
} | |
], | |
"enableEtcdTLS": true, | |
"enableTLSAuth": true, | |
"version": "3.2.24", | |
"backups": { | |
"backupStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/main" | |
}, | |
"manager": {}, | |
"memoryRequest": "100Mi", | |
"cpuRequest": "200m" | |
}, | |
{ | |
"name": "events", | |
"provider": "Manager", | |
"etcdMembers": [ | |
{ | |
"name": "1", | |
"instanceGroup": "master-fra1" | |
} | |
], | |
"enableEtcdTLS": true, | |
"enableTLSAuth": true, | |
"version": "3.2.24", | |
"backups": { | |
"backupStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/events" | |
}, | |
"manager": {}, | |
"memoryRequest": "100Mi", | |
"cpuRequest": "100m" | |
} | |
], | |
"docker": { | |
"ipMasq": false, | |
"ipTables": false, | |
"logDriver": "json-file", | |
"logLevel": "warn", | |
"logOpt": [ | |
"max-size=10m", | |
"max-file=5" | |
], | |
"storage": "overlay2,overlay,aufs", | |
"version": "18.06.3" | |
}, | |
"kubeDNS": { | |
"cacheMaxSize": 1000, | |
"cacheMaxConcurrent": 150, | |
"domain": "cluster.local", | |
"replicas": 2, | |
"serverIP": "100.64.0.10", | |
"memoryRequest": "70Mi", | |
"cpuRequest": "100m", | |
"memoryLimit": "170Mi" | |
}, | |
"kubeAPIServer": { | |
"image": "k8s.gcr.io/kube-apiserver:v1.15.10", | |
"logLevel": 2, | |
"cloudProvider": "external", | |
"securePort": 443, | |
"insecurePort": 8080, | |
"bindAddress": "0.0.0.0", | |
"insecureBindAddress": "127.0.0.1", | |
"enableAdmissionPlugins": [ | |
"NamespaceLifecycle", | |
"LimitRanger", | |
"ServiceAccount", | |
"PersistentVolumeLabel", | |
"DefaultStorageClass", | |
"DefaultTolerationSeconds", | |
"MutatingAdmissionWebhook", | |
"ValidatingAdmissionWebhook", | |
"NodeRestriction", | |
"ResourceQuota" | |
], | |
"serviceClusterIPRange": "100.64.0.0/13", | |
"etcdServers": [ | |
"http://127.0.0.1:4001" | |
], | |
"etcdServersOverrides": [ | |
"/events#http://127.0.0.1:4002" | |
], | |
"allowPrivileged": true, | |
"apiServerCount": 1, | |
"anonymousAuth": false, | |
"kubeletPreferredAddressTypes": [ | |
"InternalIP", | |
"Hostname", | |
"ExternalIP" | |
], | |
"storageBackend": "etcd3", | |
"authorizationMode": "RBAC", | |
"requestheaderUsernameHeaders": [ | |
"X-Remote-User" | |
], | |
"requestheaderGroupHeaders": [ | |
"X-Remote-Group" | |
], | |
"requestheaderExtraHeaderPrefixes": [ | |
"X-Remote-Extra-" | |
], | |
"requestheaderAllowedNames": [ | |
"aggregator" | |
] | |
}, | |
"kubeControllerManager": { | |
"logLevel": 2, | |
"image": "k8s.gcr.io/kube-controller-manager:v1.15.10", | |
"cloudProvider": "external", | |
"clusterName": "test1.dev.fra1.do.services.example.com", | |
"clusterCIDR": "100.96.0.0/11", | |
"allocateNodeCIDRs": true, | |
"configureCloudRoutes": false, | |
"leaderElection": { | |
"leaderElect": true | |
}, | |
"attachDetachReconcileSyncPeriod": "1m0s", | |
"useServiceAccountCredentials": true | |
}, | |
"kubeScheduler": { | |
"logLevel": 2, | |
"image": "k8s.gcr.io/kube-scheduler:v1.15.10", | |
"leaderElection": { | |
"leaderElect": true | |
} | |
}, | |
"kubeProxy": { | |
"image": "k8s.gcr.io/kube-proxy:v1.15.10", | |
"cpuRequest": "100m", | |
"logLevel": 2, | |
"clusterCIDR": "100.96.0.0/11", | |
"hostnameOverride": "@digitalocean" | |
}, | |
"kubelet": { | |
"anonymousAuth": false, | |
"kubeconfigPath": "/var/lib/kubelet/kubeconfig", | |
"logLevel": 2, | |
"podManifestPath": "/etc/kubernetes/manifests", | |
"hostnameOverride": "@digitalocean", | |
"podInfraContainerImage": "k8s.gcr.io/pause-amd64:3.0", | |
"enableDebuggingHandlers": true, | |
"clusterDomain": "cluster.local", | |
"clusterDNS": "100.64.0.10", | |
"networkPluginName": "cni", | |
"cloudProvider": "external", | |
"cgroupRoot": "/", | |
"nonMasqueradeCIDR": "100.64.0.0/10", | |
"evictionHard": "memory.available\u003c100Mi,nodefs.available\u003c10%,nodefs.inodesFree\u003c5%,imagefs.available\u003c10%,imagefs.inodesFree\u003c5%", | |
"featureGates": { | |
"ExperimentalCriticalPodAnnotation": "true" | |
} | |
}, | |
"masterKubelet": { | |
"anonymousAuth": false, | |
"kubeconfigPath": "/var/lib/kubelet/kubeconfig", | |
"logLevel": 2, | |
"podManifestPath": "/etc/kubernetes/manifests", | |
"hostnameOverride": "@digitalocean", | |
"podInfraContainerImage": "k8s.gcr.io/pause-amd64:3.0", | |
"enableDebuggingHandlers": true, | |
"clusterDomain": "cluster.local", | |
"clusterDNS": "100.64.0.10", | |
"networkPluginName": "cni", | |
"cloudProvider": "external", | |
"cgroupRoot": "/", | |
"registerSchedulable": false, | |
"nonMasqueradeCIDR": "100.64.0.0/10", | |
"evictionHard": "memory.available\u003c100Mi,nodefs.available\u003c10%,nodefs.inodesFree\u003c5%,imagefs.available\u003c10%,imagefs.inodesFree\u003c5%", | |
"featureGates": { | |
"ExperimentalCriticalPodAnnotation": "true" | |
} | |
}, | |
"networking": { | |
"calico": { | |
"majorVersion": "v3" | |
} | |
}, | |
"api": { | |
"dns": {} | |
}, | |
"authorization": { | |
"rbac": {} | |
}, | |
"iam": { | |
"legacy": false, | |
"allowContainerRegistry": true | |
} | |
} | |
I0306 13:52:08.896870 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/config" | |
I0306 13:52:08.974991 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/config" | |
I0306 13:52:08.975043 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/config" SSE="-" ACL="" | |
I0306 13:52:09.152840 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" | |
I0306 13:52:09.231678 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" | |
I0306 13:52:09.231725 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" SSE="-" ACL="" | |
I0306 13:52:09.324331 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes" | |
I0306 13:52:09.404550 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes" | |
I0306 13:52:09.404597 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/instancegroup/nodes" SSE="-" ACL="" | |
I0306 13:52:09.545664 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/cluster.spec" | |
I0306 13:52:09.545724 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/cluster.spec" SSE="-" ACL="" | |
I0306 13:52:09.632408 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/ssh/public/admin/7109f8402327a178c4d6f5c17105959a" | |
I0306 13:52:09.632461 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/ssh/public/admin/7109f8402327a178c4d6f5c17105959a" SSE="-" ACL="" | |
I0306 13:52:09.778977 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/config" | |
I0306 13:52:09.880210 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/instancegroup/" | |
I0306 13:52:09.974512 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup: [do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1 do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes] | |
I0306 13:52:09.974577 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" | |
I0306 13:52:10.089530 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes" | |
I0306 13:52:10.178938 10006 channel.go:99] resolving "stable" against default channel location "https://raw.githubusercontent.com/kubernetes/kops/master/channels/" | |
I0306 13:52:10.178997 10006 channel.go:104] Loading channel from "https://raw.githubusercontent.com/kubernetes/kops/master/channels/stable" | |
I0306 13:52:10.179023 10006 context.go:177] Performing HTTP request: GET https://raw.githubusercontent.com/kubernetes/kops/master/channels/stable | |
I0306 13:52:10.233424 10006 channel.go:113] Channel contents: spec: | |
images: | |
# We put the "legacy" version first, for kops versions that don't support versions ( < 1.5.0 ) | |
- name: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2017-07-28 | |
providerID: aws | |
kubernetesVersion: ">=1.4.0 <1.5.0" | |
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.5.0 <1.6.0" | |
- name: kope.io/k8s-1.6-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.6.0 <1.7.0" | |
- name: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.7.0 <1.8.0" | |
- name: kope.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.8.0 <1.9.0" | |
- name: kope.io/k8s-1.9-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.9.0 <1.10.0" | |
- name: kope.io/k8s-1.10-debian-jessie-amd64-hvm-ebs-2018-08-17 | |
providerID: aws | |
kubernetesVersion: ">=1.10.0 <1.11.0" | |
# Stretch is the default for 1.11 (for nvme) | |
- name: kope.io/k8s-1.11-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.11.0 <1.12.0" | |
- name: kope.io/k8s-1.12-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.12.0 <1.13.0" | |
- name: kope.io/k8s-1.13-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.13.0 <1.14.0" | |
- name: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.14.0 <1.15.0" | |
- name: kope.io/k8s-1.15-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.15.0 <1.16.0" | |
- name: kope.io/k8s-1.16-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.16.0 <1.17.0" | |
- name: kope.io/k8s-1.17-debian-stretch-amd64-hvm-ebs-2020-01-17 | |
providerID: aws | |
kubernetesVersion: ">=1.17.0" | |
- providerID: gce | |
kubernetesVersion: "<1.16.0-alpha.1" | |
name: "cos-cloud/cos-stable-65-10323-99-0" | |
- providerID: gce | |
kubernetesVersion: ">=1.16.0-alpha.1" | |
name: "cos-cloud/cos-stable-77-12371-114-0" | |
cluster: | |
kubernetesVersion: v1.5.8 | |
networking: | |
kubenet: {} | |
kubernetesVersions: | |
- range: ">=1.17.0" | |
recommendedVersion: 1.17.3 | |
requiredVersion: 1.17.0 | |
- range: ">=1.16.0" | |
recommendedVersion: 1.16.7 | |
requiredVersion: 1.16.0 | |
- range: ">=1.15.0" | |
recommendedVersion: 1.15.10 | |
requiredVersion: 1.15.0 | |
- range: ">=1.14.0" | |
recommendedVersion: 1.14.10 | |
requiredVersion: 1.14.0 | |
- range: ">=1.13.0" | |
recommendedVersion: 1.13.12 | |
requiredVersion: 1.13.0 | |
- range: ">=1.12.0" | |
recommendedVersion: 1.12.10 | |
requiredVersion: 1.12.0 | |
- range: ">=1.11.0" | |
recommendedVersion: 1.11.10 | |
requiredVersion: 1.11.0 | |
- range: "<1.11.0" | |
recommendedVersion: 1.11.10 | |
requiredVersion: 1.11.10 | |
kopsVersions: | |
- range: ">=1.17.0-alpha.1" | |
#recommendedVersion: "1.17.0" | |
#requiredVersion: 1.17.0 | |
kubernetesVersion: 1.17.3 | |
- range: ">=1.16.0-alpha.1" | |
#recommendedVersion: "1.16.0" | |
#requiredVersion: 1.16.0 | |
kubernetesVersion: 1.16.7 | |
- range: ">=1.15.0-alpha.1" | |
#recommendedVersion: "1.15.0" | |
#requiredVersion: 1.15.0 | |
kubernetesVersion: 1.15.10 | |
- range: ">=1.14.0-alpha.1" | |
#recommendedVersion: "1.14.0" | |
#requiredVersion: 1.14.0 | |
kubernetesVersion: 1.14.10 | |
- range: ">=1.13.0-alpha.1" | |
#recommendedVersion: "1.13.0" | |
#requiredVersion: 1.13.0 | |
kubernetesVersion: 1.13.12 | |
- range: ">=1.12.0-alpha.1" | |
recommendedVersion: "1.12.1" | |
#requiredVersion: 1.12.0 | |
kubernetesVersion: 1.12.10 | |
- range: ">=1.11.0-alpha.1" | |
recommendedVersion: "1.11.1" | |
#requiredVersion: 1.11.0 | |
kubernetesVersion: 1.11.10 | |
- range: "<1.11.0-alpha.1" | |
recommendedVersion: "1.11.1" | |
#requiredVersion: 1.10.0 | |
kubernetesVersion: 1.11.10 | |
I0306 13:52:10.233877 10006 populate_cluster_spec.go:371] Defaulted KubeControllerManager.ClusterCIDR to 100.96.0.0/11 | |
I0306 13:52:10.233902 10006 populate_cluster_spec.go:378] Defaulted ServiceClusterIPRange to 100.64.0.0/13 | |
I0306 13:52:10.233949 10006 defaults.go:224] Not setting up Proxy Excludes | |
I0306 13:52:10.234013 10006 utils.go:167] Querying for all DNS zones to find match for "test1.dev.fra1.do.services.example.com" | |
I0306 13:52:11.192343 10006 populate_cluster_spec.go:253] Defaulting DNS zone to: test1.dev.fra1.do.services.example.com | |
I0306 13:52:11.192397 10006 tagbuilder.go:95] tags: [_do _k8s_1_6] | |
I0306 13:52:11.192908 10006 options_loader.go:130] executing builder *components.DefaultsOptionsBuilder | |
I0306 13:52:11.192942 10006 options_loader.go:130] executing builder *components.EtcdOptionsBuilder | |
I0306 13:52:11.192973 10006 options_loader.go:130] executing builder *etcdmanager.EtcdManagerOptionsBuilder | |
I0306 13:52:11.193000 10006 options_loader.go:130] executing builder *nodeauthorizer.OptionsBuilder | |
I0306 13:52:11.193018 10006 options_loader.go:130] executing builder *components.KubeAPIServerOptionsBuilder | |
I0306 13:52:11.193116 10006 options_loader.go:130] executing builder *components.DockerOptionsBuilder | |
I0306 13:52:11.193153 10006 options_loader.go:130] executing builder *components.NetworkingOptionsBuilder | |
I0306 13:52:11.193173 10006 options_loader.go:130] executing builder *components.KubeDnsOptionsBuilder | |
I0306 13:52:11.193202 10006 options_loader.go:130] executing builder *components.KubeletOptionsBuilder | |
I0306 13:52:11.193232 10006 kubelet.go:171] Cloud Provider: digitalocean | |
I0306 13:52:11.193261 10006 options_loader.go:130] executing builder *components.KubeControllerManagerOptionsBuilder | |
I0306 13:52:11.193290 10006 kubecontrollermanager.go:74] Kubernetes version "1.15.10" supports AttachDetachReconcileSyncPeriod; will configure | |
I0306 13:52:11.193324 10006 kubecontrollermanager.go:79] AttachDetachReconcileSyncPeriod is not set; will set to default 1m0s | |
I0306 13:52:11.193361 10006 options_loader.go:130] executing builder *components.KubeSchedulerOptionsBuilder | |
I0306 13:52:11.193388 10006 options_loader.go:130] executing builder *components.KubeProxyOptionsBuilder | |
I0306 13:52:11.194418 10006 options_loader.go:130] executing builder *components.DefaultsOptionsBuilder | |
I0306 13:52:11.194453 10006 options_loader.go:130] executing builder *components.EtcdOptionsBuilder | |
I0306 13:52:11.194478 10006 options_loader.go:130] executing builder *etcdmanager.EtcdManagerOptionsBuilder | |
I0306 13:52:11.194497 10006 options_loader.go:130] executing builder *nodeauthorizer.OptionsBuilder | |
I0306 13:52:11.194515 10006 options_loader.go:130] executing builder *components.KubeAPIServerOptionsBuilder | |
I0306 13:52:11.194582 10006 options_loader.go:130] executing builder *components.DockerOptionsBuilder | |
I0306 13:52:11.194605 10006 options_loader.go:130] executing builder *components.NetworkingOptionsBuilder | |
I0306 13:52:11.194624 10006 options_loader.go:130] executing builder *components.KubeDnsOptionsBuilder | |
I0306 13:52:11.194643 10006 options_loader.go:130] executing builder *components.KubeletOptionsBuilder | |
I0306 13:52:11.194666 10006 kubelet.go:171] Cloud Provider: digitalocean | |
I0306 13:52:11.194689 10006 options_loader.go:130] executing builder *components.KubeControllerManagerOptionsBuilder | |
I0306 13:52:11.194711 10006 kubecontrollermanager.go:74] Kubernetes version "1.15.10" supports AttachDetachReconcileSyncPeriod; will configure | |
I0306 13:52:11.194745 10006 options_loader.go:130] executing builder *components.KubeSchedulerOptionsBuilder | |
I0306 13:52:11.194765 10006 options_loader.go:130] executing builder *components.KubeProxyOptionsBuilder | |
I0306 13:52:11.196093 10006 spec_builder.go:49] options: { | |
"channel": "stable", | |
"configBase": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com", | |
"cloudProvider": "digitalocean", | |
"kubernetesVersion": "1.15.10", | |
"subnets": [ | |
{ | |
"name": "fra1", | |
"zone": "fra1", | |
"region": "fra1", | |
"type": "Public" | |
} | |
], | |
"masterPublicName": "api.test1.dev.fra1.do.services.example.com", | |
"masterInternalName": "api.internal.test1.dev.fra1.do.services.example.com", | |
"topology": { | |
"masters": "public", | |
"nodes": "public", | |
"dns": { | |
"type": "Public" | |
} | |
}, | |
"secretStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets", | |
"keyStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki", | |
"configStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com", | |
"dnsZone": "test1.dev.fra1.do.services.example.com", | |
"clusterDNSDomain": "cluster.local", | |
"serviceClusterIPRange": "100.64.0.0/13", | |
"nonMasqueradeCIDR": "100.64.0.0/10", | |
"sshAccess": [ | |
"0.0.0.0/0" | |
], | |
"kubernetesApiAccess": [ | |
"0.0.0.0/0" | |
], | |
"etcdClusters": [ | |
{ | |
"name": "main", | |
"provider": "Manager", | |
"etcdMembers": [ | |
{ | |
"name": "1", | |
"instanceGroup": "master-fra1" | |
} | |
], | |
"enableEtcdTLS": true, | |
"enableTLSAuth": true, | |
"version": "3.2.24", | |
"backups": { | |
"backupStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/main" | |
}, | |
"manager": {}, | |
"memoryRequest": "100Mi", | |
"cpuRequest": "200m" | |
}, | |
{ | |
"name": "events", | |
"provider": "Manager", | |
"etcdMembers": [ | |
{ | |
"name": "1", | |
"instanceGroup": "master-fra1" | |
} | |
], | |
"enableEtcdTLS": true, | |
"enableTLSAuth": true, | |
"version": "3.2.24", | |
"backups": { | |
"backupStore": "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/events" | |
}, | |
"manager": {}, | |
"memoryRequest": "100Mi", | |
"cpuRequest": "100m" | |
} | |
], | |
"docker": { | |
"ipMasq": false, | |
"ipTables": false, | |
"logDriver": "json-file", | |
"logLevel": "warn", | |
"logOpt": [ | |
"max-size=10m", | |
"max-file=5" | |
], | |
"storage": "overlay2,overlay,aufs", | |
"version": "18.06.3" | |
}, | |
"kubeDNS": { | |
"cacheMaxSize": 1000, | |
"cacheMaxConcurrent": 150, | |
"domain": "cluster.local", | |
"replicas": 2, | |
"serverIP": "100.64.0.10", | |
"memoryRequest": "70Mi", | |
"cpuRequest": "100m", | |
"memoryLimit": "170Mi" | |
}, | |
"kubeAPIServer": { | |
"image": "k8s.gcr.io/kube-apiserver:v1.15.10", | |
"logLevel": 2, | |
"cloudProvider": "external", | |
"securePort": 443, | |
"insecurePort": 8080, | |
"bindAddress": "0.0.0.0", | |
"insecureBindAddress": "127.0.0.1", | |
"enableAdmissionPlugins": [ | |
"NamespaceLifecycle", | |
"LimitRanger", | |
"ServiceAccount", | |
"PersistentVolumeLabel", | |
"DefaultStorageClass", | |
"DefaultTolerationSeconds", | |
"MutatingAdmissionWebhook", | |
"ValidatingAdmissionWebhook", | |
"NodeRestriction", | |
"ResourceQuota" | |
], | |
"serviceClusterIPRange": "100.64.0.0/13", | |
"etcdServers": [ | |
"http://127.0.0.1:4001" | |
], | |
"etcdServersOverrides": [ | |
"/events#http://127.0.0.1:4002" | |
], | |
"allowPrivileged": true, | |
"apiServerCount": 1, | |
"anonymousAuth": false, | |
"kubeletPreferredAddressTypes": [ | |
"InternalIP", | |
"Hostname", | |
"ExternalIP" | |
], | |
"storageBackend": "etcd3", | |
"authorizationMode": "RBAC", | |
"requestheaderUsernameHeaders": [ | |
"X-Remote-User" | |
], | |
"requestheaderGroupHeaders": [ | |
"X-Remote-Group" | |
], | |
"requestheaderExtraHeaderPrefixes": [ | |
"X-Remote-Extra-" | |
], | |
"requestheaderAllowedNames": [ | |
"aggregator" | |
] | |
}, | |
"kubeControllerManager": { | |
"logLevel": 2, | |
"image": "k8s.gcr.io/kube-controller-manager:v1.15.10", | |
"cloudProvider": "external", | |
"clusterName": "test1.dev.fra1.do.services.example.com", | |
"clusterCIDR": "100.96.0.0/11", | |
"allocateNodeCIDRs": true, | |
"configureCloudRoutes": false, | |
"leaderElection": { | |
"leaderElect": true | |
}, | |
"attachDetachReconcileSyncPeriod": "1m0s", | |
"useServiceAccountCredentials": true | |
}, | |
"kubeScheduler": { | |
"logLevel": 2, | |
"image": "k8s.gcr.io/kube-scheduler:v1.15.10", | |
"leaderElection": { | |
"leaderElect": true | |
} | |
}, | |
"kubeProxy": { | |
"image": "k8s.gcr.io/kube-proxy:v1.15.10", | |
"cpuRequest": "100m", | |
"logLevel": 2, | |
"clusterCIDR": "100.96.0.0/11", | |
"hostnameOverride": "@digitalocean" | |
}, | |
"kubelet": { | |
"anonymousAuth": false, | |
"kubeconfigPath": "/var/lib/kubelet/kubeconfig", | |
"logLevel": 2, | |
"podManifestPath": "/etc/kubernetes/manifests", | |
"hostnameOverride": "@digitalocean", | |
"podInfraContainerImage": "k8s.gcr.io/pause-amd64:3.0", | |
"enableDebuggingHandlers": true, | |
"clusterDomain": "cluster.local", | |
"clusterDNS": "100.64.0.10", | |
"networkPluginName": "cni", | |
"cloudProvider": "external", | |
"cgroupRoot": "/", | |
"nonMasqueradeCIDR": "100.64.0.0/10", | |
"evictionHard": "memory.available\u003c100Mi,nodefs.available\u003c10%,nodefs.inodesFree\u003c5%,imagefs.available\u003c10%,imagefs.inodesFree\u003c5%", | |
"featureGates": { | |
"ExperimentalCriticalPodAnnotation": "true" | |
} | |
}, | |
"masterKubelet": { | |
"anonymousAuth": false, | |
"kubeconfigPath": "/var/lib/kubelet/kubeconfig", | |
"logLevel": 2, | |
"podManifestPath": "/etc/kubernetes/manifests", | |
"hostnameOverride": "@digitalocean", | |
"podInfraContainerImage": "k8s.gcr.io/pause-amd64:3.0", | |
"enableDebuggingHandlers": true, | |
"clusterDomain": "cluster.local", | |
"clusterDNS": "100.64.0.10", | |
"networkPluginName": "cni", | |
"cloudProvider": "external", | |
"cgroupRoot": "/", | |
"registerSchedulable": false, | |
"nonMasqueradeCIDR": "100.64.0.0/10", | |
"evictionHard": "memory.available\u003c100Mi,nodefs.available\u003c10%,nodefs.inodesFree\u003c5%,imagefs.available\u003c10%,imagefs.inodesFree\u003c5%", | |
"featureGates": { | |
"ExperimentalCriticalPodAnnotation": "true" | |
} | |
}, | |
"networking": { | |
"calico": { | |
"majorVersion": "v3" | |
} | |
}, | |
"api": { | |
"dns": {} | |
}, | |
"authorization": { | |
"rbac": {} | |
}, | |
"iam": { | |
"legacy": false, | |
"allowContainerRegistry": true | |
} | |
} | |
I0306 13:52:11.196450 10006 channel.go:241] version range ">=1.17.0-alpha.1" does not apply to version "1.15.2"; skipping | |
I0306 13:52:11.196483 10006 channel.go:241] version range ">=1.16.0-alpha.1" does not apply to version "1.15.2"; skipping | |
I0306 13:52:11.196503 10006 channel.go:152] VersionRecommendationSpec does not specify RecommendedVersion | |
I0306 13:52:11.196512 10006 channel.go:192] VersionRecommendationSpec does not specify RequiredVersion | |
I0306 13:52:11.196542 10006 channel.go:220] version range ">=1.17.0" does not apply to version "1.15.10"; skipping | |
I0306 13:52:11.196558 10006 channel.go:220] version range ">=1.16.0" does not apply to version "1.15.10"; skipping | |
I0306 13:52:11.196581 10006 channel.go:144] RecommendedVersion="1.15.10", Have="1.15.10". No upgrade needed. | |
I0306 13:52:11.196601 10006 channel.go:184] RequiredVersion="1.15.0", Have="1.15.10". No upgrade needed. | |
I0306 13:52:11.196764 10006 context.go:177] Performing HTTP request: GET https://storage.googleapis.com/kubernetes-release/release/v1.15.10/bin/linux/amd64/kubelet.sha256 | |
I0306 13:52:11.466956 10006 builder.go:323] Found hash "33859bf393da38e36d2a7fc76f6f207d75763338f057d65bef56a19be9f87ca2" for "https://storage.googleapis.com/kubernetes-release/release/v1.15.10/bin/linux/amd64/kubelet" | |
I0306 13:52:11.467054 10006 builder.go:240] adding file: &{DownloadURL:https://storage.googleapis.com/kubernetes-release/release/v1.15.10/bin/linux/amd64/kubelet CanonicalURL:<nil> SHAValue:33859bf393da38e36d2a7fc76f6f207d75763338f057d65bef56a19be9f87ca2} | |
I0306 13:52:11.467156 10006 context.go:177] Performing HTTP request: GET https://storage.googleapis.com/kubernetes-release/release/v1.15.10/bin/linux/amd64/kubectl.sha256 | |
I0306 13:52:11.519418 10006 builder.go:323] Found hash "38a0f73464f1c39ca383fd43196f84bdbe6e553fe3e677b6e7012ef7ad5eaf2b" for "https://storage.googleapis.com/kubernetes-release/release/v1.15.10/bin/linux/amd64/kubectl" | |
I0306 13:52:11.519503 10006 builder.go:240] adding file: &{DownloadURL:https://storage.googleapis.com/kubernetes-release/release/v1.15.10/bin/linux/amd64/kubectl CanonicalURL:<nil> SHAValue:38a0f73464f1c39ca383fd43196f84bdbe6e553fe3e677b6e7012ef7ad5eaf2b} | |
I0306 13:52:11.519611 10006 networking.go:175] Adding default CNI asset for k8s >= 1.11: https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz | |
I0306 13:52:11.519673 10006 urls.go:81] Using default base url: "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/" | |
I0306 13:52:11.519715 10006 context.go:177] Performing HTTP request: GET https://kubeupv2.s3.amazonaws.com/kops/1.15.2/linux/amd64/utils.tar.gz.sha256 | |
I0306 13:52:12.444824 10006 builder.go:323] Found hash "d500fdf96df3d31897af94808e7d4fbe83d15b421e9b6b4ea8c0ce8859d7111e" for "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/linux/amd64/utils.tar.gz" | |
I0306 13:52:12.444892 10006 builder.go:240] adding file: &{DownloadURL:https://kubeupv2.s3.amazonaws.com/kops/1.15.2/linux/amd64/utils.tar.gz CanonicalURL:<nil> SHAValue:d500fdf96df3d31897af94808e7d4fbe83d15b421e9b6b4ea8c0ce8859d7111e} | |
I0306 13:52:12.444991 10006 urls.go:73] Using cached kopsBaseUrl url: "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/" | |
I0306 13:52:12.445034 10006 context.go:177] Performing HTTP request: GET https://kubeupv2.s3.amazonaws.com/kops/1.15.2/linux/amd64/nodeup.sha256 | |
I0306 13:52:12.703303 10006 builder.go:323] Found hash "c0ccfa76fc5c099896b85952f2fe126f11c1d9fbfb8f12a69bd5fcd0413addc8" for "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/linux/amd64/nodeup" | |
I0306 13:52:12.703373 10006 builder.go:240] adding file: &{DownloadURL:https://kubeupv2.s3.amazonaws.com/kops/1.15.2/linux/amd64/nodeup CanonicalURL:<nil> SHAValue:c0ccfa76fc5c099896b85952f2fe126f11c1d9fbfb8f12a69bd5fcd0413addc8} | |
I0306 13:52:12.703463 10006 urls.go:137] Using default nodeup location: "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/linux/amd64/nodeup" | |
I0306 13:52:12.703504 10006 urls.go:73] Using cached kopsBaseUrl url: "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/" | |
I0306 13:52:12.703573 10006 context.go:177] Performing HTTP request: GET https://kubeupv2.s3.amazonaws.com/kops/1.15.2/images/protokube.tar.gz.sha256 | |
I0306 13:52:13.034733 10006 builder.go:323] Found hash "de137414209702df28e1e514c740c2ec82c4c40d20e051aad465247979414f12" for "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/images/protokube.tar.gz" | |
I0306 13:52:13.034802 10006 builder.go:240] adding file: &{DownloadURL:https://kubeupv2.s3.amazonaws.com/kops/1.15.2/images/protokube.tar.gz CanonicalURL:<nil> SHAValue:de137414209702df28e1e514c740c2ec82c4c40d20e051aad465247979414f12} | |
I0306 13:52:13.034856 10006 urls.go:178] Using default protokube location: "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/images/protokube.tar.gz" | |
I0306 13:52:13.034939 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/ssh/public/admin/" | |
I0306 13:52:13.125057 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/ssh/public/admin: [do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/ssh/public/admin/7109f8402327a178c4d6f5c17105959a] | |
I0306 13:52:13.125110 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/ssh/public/admin/7109f8402327a178c4d6f5c17105959a" | |
I0306 13:52:13.943160 10006 dns.go:104] Doing DNS lookup to verify NS records for "test1.dev.fra1.do.services.example.com" | |
I0306 13:52:13.992982 10006 dns.go:121] Found NS records for "test1.dev.fra1.do.services.example.com": [ns3.digitalocean.com. ns1.digitalocean.com. ns2.digitalocean.com.] | |
I0306 13:52:13.993061 10006 tagbuilder.go:95] tags: [_do _k8s_1_6] | |
I0306 13:52:13.993770 10006 templates.go:80] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:13.994009 10006 templates.go:80] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:13.994152 10006 templates.go:80] loading (templated) resource "addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:13.994276 10006 templates.go:88] loading resource "addons/limit-range.addons.k8s.io/addon.yaml" | |
I0306 13:52:13.994372 10006 templates.go:88] loading resource "addons/limit-range.addons.k8s.io/v1.5.0.yaml" | |
I0306 13:52:13.995182 10006 templates.go:80] loading (templated) resource "addons/networking.cilium.io/k8s-1.12.yaml" | |
I0306 13:52:13.995947 10006 templates.go:80] loading (templated) resource "addons/networking.cilium.io/k8s-1.7.yaml" | |
I0306 13:52:13.996161 10006 templates.go:88] loading resource "addons/networking.kope.io/pre-k8s-1.6.yaml" | |
I0306 13:52:13.996328 10006 templates.go:88] loading resource "addons/networking.kope.io/k8s-1.12.yaml" | |
I0306 13:52:13.996517 10006 templates.go:88] loading resource "addons/networking.kope.io/k8s-1.6.yaml" | |
I0306 13:52:13.996701 10006 templates.go:80] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml" | |
I0306 13:52:13.996864 10006 templates.go:80] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:13.997039 10006 templates.go:80] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.9.yaml" | |
I0306 13:52:13.997135 10006 templates.go:88] loading resource "addons/core.addons.k8s.io/addon.yaml" | |
I0306 13:52:13.997342 10006 templates.go:80] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:13.997588 10006 templates.go:80] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.7.yaml" | |
I0306 13:52:13.997687 10006 templates.go:88] loading resource "addons/core.addons.k8s.io/v1.4.0.yaml" | |
I0306 13:52:13.997862 10006 templates.go:88] loading resource "addons/external-dns.addons.k8s.io/README.md" | |
I0306 13:52:13.998085 10006 templates.go:80] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:13.998266 10006 templates.go:80] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:13.998460 10006 templates.go:80] loading (templated) resource "addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:14.005778 10006 templates.go:80] loading (templated) resource "addons/networking.flannel/k8s-1.12.yaml" | |
I0306 13:52:14.006374 10006 templates.go:80] loading (templated) resource "addons/networking.flannel/k8s-1.6.yaml" | |
I0306 13:52:14.007023 10006 templates.go:80] loading (templated) resource "addons/networking.flannel/pre-k8s-1.6.yaml" | |
I0306 13:52:14.011064 10006 templates.go:88] loading resource "addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" | |
I0306 13:52:14.011573 10006 templates.go:80] loading (templated) resource "addons/networking.kuberouter/k8s-1.6.yaml" | |
I0306 13:52:14.011875 10006 templates.go:80] loading (templated) resource "addons/networking.kuberouter/k8s-1.12.yaml" | |
I0306 13:52:14.012142 10006 templates.go:88] loading resource "addons/storage-aws.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.012669 10006 templates.go:88] loading resource "addons/storage-aws.addons.k8s.io/v1.15.0.yaml" | |
I0306 13:52:14.012966 10006 templates.go:88] loading resource "addons/storage-aws.addons.k8s.io/v1.6.0.yaml" | |
I0306 13:52:14.013283 10006 templates.go:80] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.013552 10006 templates.go:80] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml" | |
I0306 13:52:14.013810 10006 templates.go:88] loading resource "addons/rbac.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:14.013984 10006 templates.go:80] loading (templated) resource "addons/authentication.aws/k8s-1.10.yaml" | |
I0306 13:52:14.014142 10006 templates.go:80] loading (templated) resource "addons/authentication.aws/k8s-1.12.yaml" | |
I0306 13:52:14.014408 10006 templates.go:80] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:14.014725 10006 templates.go:80] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.015116 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml" | |
I0306 13:52:14.015870 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.12.yaml" | |
I0306 13:52:14.016342 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.6.yaml" | |
I0306 13:52:14.016861 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.8.yaml" | |
I0306 13:52:14.017495 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.9.yaml" | |
I0306 13:52:14.017729 10006 templates.go:80] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml" | |
I0306 13:52:14.017969 10006 templates.go:80] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml" | |
I0306 13:52:14.018406 10006 templates.go:80] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.018852 10006 templates.go:80] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:14.019253 10006 templates.go:80] loading (templated) resource "addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:14.019491 10006 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.8.yaml" | |
I0306 13:52:14.019741 10006 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml" | |
I0306 13:52:14.019966 10006 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml" | |
I0306 13:52:14.020198 10006 templates.go:80] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml" | |
I0306 13:52:14.020436 10006 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.12.yaml" | |
I0306 13:52:14.020646 10006 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.6.yaml" | |
I0306 13:52:14.020868 10006 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.7.yaml" | |
I0306 13:52:14.021079 10006 templates.go:80] loading (templated) resource "addons/networking.weave/k8s-1.8.yaml" | |
I0306 13:52:14.021244 10006 templates.go:80] loading (templated) resource "addons/networking.weave/pre-k8s-1.6.yaml" | |
I0306 13:52:14.021364 10006 templates.go:88] loading resource "addons/scheduler.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.021450 10006 templates.go:88] loading resource "addons/storage-gce.addons.k8s.io/v1.6.0.yaml" | |
I0306 13:52:14.021524 10006 templates.go:88] loading resource "addons/storage-gce.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.021700 10006 templates.go:80] loading (templated) resource "addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:14.022357 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7-v3.yaml" | |
I0306 13:52:14.022835 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7.yaml" | |
I0306 13:52:14.023212 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/pre-k8s-1.6.yaml" | |
I0306 13:52:14.024270 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.12.yaml" | |
I0306 13:52:14.024912 10006 templates.go:80] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.6.yaml" | |
I0306 13:52:14.025291 10006 templates.go:80] loading (templated) resource "addons/networking.romana/k8s-1.12.yaml" | |
I0306 13:52:14.025622 10006 templates.go:80] loading (templated) resource "addons/networking.romana/k8s-1.7.yaml" | |
I0306 13:52:14.025855 10006 templates.go:88] loading resource "addons/authentication.kope.io/k8s-1.12.yaml" | |
I0306 13:52:14.026078 10006 templates.go:88] loading resource "addons/authentication.kope.io/k8s-1.8.yaml" | |
I0306 13:52:14.026280 10006 templates.go:80] loading (templated) resource "addons/openstack.addons.k8s.io/k8s-1.11.yaml" | |
I0306 13:52:14.026545 10006 tree_walker.go:98] visit "cloudup/resources" | |
I0306 13:52:14.026591 10006 tree_walker.go:98] visit "cloudup/resources/addons" | |
I0306 13:52:14.026694 10006 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io" | |
I0306 13:52:14.026738 10006 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.026955 10006 loader.go:354] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.026990 10006 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template" | |
I0306 13:52:14.027192 10006 loader.go:354] loading (templated) resource "addons/dns-controller.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:14.027215 10006 tree_walker.go:98] visit "cloudup/resources/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml.template" | |
I0306 13:52:14.027366 10006 loader.go:354] loading (templated) resource "addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:14.027389 10006 tree_walker.go:98] visit "cloudup/resources/addons/limit-range.addons.k8s.io" | |
I0306 13:52:14.027425 10006 tree_walker.go:98] visit "cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml" | |
I0306 13:52:14.027523 10006 loader.go:362] loading resource "addons/limit-range.addons.k8s.io/addon.yaml" | |
I0306 13:52:14.027544 10006 tree_walker.go:98] visit "cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml" | |
I0306 13:52:14.027707 10006 loader.go:362] loading resource "addons/limit-range.addons.k8s.io/v1.5.0.yaml" | |
I0306 13:52:14.027729 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.cilium.io" | |
I0306 13:52:14.027762 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.028542 10006 loader.go:354] loading (templated) resource "addons/networking.cilium.io/k8s-1.12.yaml" | |
I0306 13:52:14.028590 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template" | |
I0306 13:52:14.029314 10006 loader.go:354] loading (templated) resource "addons/networking.cilium.io/k8s-1.7.yaml" | |
I0306 13:52:14.029342 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io" | |
I0306 13:52:14.029380 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io/pre-k8s-1.6.yaml" | |
I0306 13:52:14.029550 10006 loader.go:362] loading resource "addons/networking.kope.io/pre-k8s-1.6.yaml" | |
I0306 13:52:14.029571 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml" | |
I0306 13:52:14.029768 10006 loader.go:362] loading resource "addons/networking.kope.io/k8s-1.12.yaml" | |
I0306 13:52:14.029795 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml" | |
I0306 13:52:14.029975 10006 loader.go:362] loading resource "addons/networking.kope.io/k8s-1.6.yaml" | |
I0306 13:52:14.029995 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel" | |
I0306 13:52:14.030032 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template" | |
I0306 13:52:14.030286 10006 loader.go:354] loading (templated) resource "addons/networking.flannel/k8s-1.12.yaml" | |
I0306 13:52:14.030322 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template" | |
I0306 13:52:14.030530 10006 loader.go:354] loading (templated) resource "addons/networking.flannel/k8s-1.6.yaml" | |
I0306 13:52:14.030565 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.flannel/pre-k8s-1.6.yaml.template" | |
I0306 13:52:14.030756 10006 loader.go:354] loading (templated) resource "addons/networking.flannel/pre-k8s-1.6.yaml" | |
I0306 13:52:14.030780 10006 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io" | |
I0306 13:52:14.030818 10006 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template" | |
I0306 13:52:14.030995 10006 loader.go:354] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml" | |
I0306 13:52:14.031023 10006 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.031164 10006 loader.go:354] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.031187 10006 tree_walker.go:98] visit "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.9.yaml.template" | |
I0306 13:52:14.031392 10006 loader.go:354] loading (templated) resource "addons/podsecuritypolicy.addons.k8s.io/k8s-1.9.yaml" | |
I0306 13:52:14.031414 10006 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io" | |
I0306 13:52:14.031452 10006 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml" | |
I0306 13:52:14.031609 10006 loader.go:362] loading resource "addons/core.addons.k8s.io/v1.4.0.yaml" | |
I0306 13:52:14.031633 10006 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/addon.yaml" | |
I0306 13:52:14.031733 10006 loader.go:362] loading resource "addons/core.addons.k8s.io/addon.yaml" | |
I0306 13:52:14.031755 10006 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.031981 10006 loader.go:354] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.032009 10006 tree_walker.go:98] visit "cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template" | |
I0306 13:52:14.032236 10006 loader.go:354] loading (templated) resource "addons/core.addons.k8s.io/k8s-1.7.yaml" | |
I0306 13:52:14.032264 10006 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io" | |
I0306 13:52:14.032347 10006 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/README.md" | |
I0306 13:52:14.032600 10006 loader.go:362] loading resource "addons/external-dns.addons.k8s.io/README.md" | |
I0306 13:52:14.032627 10006 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.032800 10006 loader.go:354] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.032825 10006 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template" | |
I0306 13:52:14.033025 10006 loader.go:354] loading (templated) resource "addons/external-dns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:14.033048 10006 tree_walker.go:98] visit "cloudup/resources/addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml.template" | |
I0306 13:52:14.033190 10006 loader.go:354] loading (templated) resource "addons/external-dns.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:14.033213 10006 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io" | |
I0306 13:52:14.033250 10006 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" | |
I0306 13:52:14.033488 10006 loader.go:362] loading resource "addons/storage-aws.addons.k8s.io/v1.15.0.yaml" | |
I0306 13:52:14.033510 10006 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.6.0.yaml" | |
I0306 13:52:14.033724 10006 loader.go:362] loading resource "addons/storage-aws.addons.k8s.io/v1.6.0.yaml" | |
I0306 13:52:14.033747 10006 tree_walker.go:98] visit "cloudup/resources/addons/storage-aws.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.033858 10006 loader.go:362] loading resource "addons/storage-aws.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.033886 10006 tree_walker.go:98] visit "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io" | |
I0306 13:52:14.033917 10006 tree_walker.go:98] visit "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" | |
I0306 13:52:14.034079 10006 loader.go:362] loading resource "addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" | |
I0306 13:52:14.034104 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.kuberouter" | |
I0306 13:52:14.034137 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template" | |
I0306 13:52:14.034397 10006 loader.go:354] loading (templated) resource "addons/networking.kuberouter/k8s-1.12.yaml" | |
I0306 13:52:14.034419 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template" | |
I0306 13:52:14.034663 10006 loader.go:354] loading (templated) resource "addons/networking.kuberouter/k8s-1.6.yaml" | |
I0306 13:52:14.034729 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal" | |
I0306 13:52:14.034809 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml.template" | |
I0306 13:52:14.035242 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/pre-k8s-1.6.yaml" | |
I0306 13:52:14.035308 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template" | |
I0306 13:52:14.035983 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.12.yaml" | |
I0306 13:52:14.036043 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.6.yaml.template" | |
I0306 13:52:14.036493 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.6.yaml" | |
I0306 13:52:14.036547 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.8.yaml.template" | |
I0306 13:52:14.037078 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.8.yaml" | |
I0306 13:52:14.037138 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template" | |
I0306 13:52:14.037708 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org.canal/k8s-1.9.yaml" | |
I0306 13:52:14.037804 10006 tree_walker.go:98] visit "cloudup/resources/addons/node-authorizer.addons.k8s.io" | |
I0306 13:52:14.037881 10006 tree_walker.go:98] visit "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template" | |
I0306 13:52:14.038212 10006 loader.go:354] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml" | |
I0306 13:52:14.038271 10006 tree_walker.go:98] visit "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.038542 10006 loader.go:354] loading (templated) resource "addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.038595 10006 tree_walker.go:98] visit "cloudup/resources/addons/rbac.addons.k8s.io" | |
I0306 13:52:14.038651 10006 tree_walker.go:98] visit "cloudup/resources/addons/rbac.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:14.038840 10006 loader.go:362] loading resource "addons/rbac.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:14.038893 10006 tree_walker.go:98] visit "cloudup/resources/addons/authentication.aws" | |
I0306 13:52:14.038953 10006 tree_walker.go:98] visit "cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template" | |
I0306 13:52:14.039161 10006 loader.go:354] loading (templated) resource "addons/authentication.aws/k8s-1.10.yaml" | |
I0306 13:52:14.039213 10006 tree_walker.go:98] visit "cloudup/resources/addons/authentication.aws/k8s-1.12.yaml.template" | |
I0306 13:52:14.039430 10006 loader.go:354] loading (templated) resource "addons/authentication.aws/k8s-1.12.yaml" | |
I0306 13:52:14.039481 10006 tree_walker.go:98] visit "cloudup/resources/addons/coredns.addons.k8s.io" | |
I0306 13:52:14.039543 10006 tree_walker.go:98] visit "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.039972 10006 loader.go:354] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.040039 10006 tree_walker.go:98] visit "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template" | |
I0306 13:52:14.040345 10006 loader.go:354] loading (templated) resource "addons/coredns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:14.040397 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave" | |
I0306 13:52:14.040470 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template" | |
I0306 13:52:14.040768 10006 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.12.yaml" | |
I0306 13:52:14.040825 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.6.yaml.template" | |
I0306 13:52:14.046950 10006 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.6.yaml" | |
I0306 13:52:14.047104 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.7.yaml.template" | |
I0306 13:52:14.047562 10006 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.7.yaml" | |
I0306 13:52:14.047677 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/k8s-1.8.yaml.template" | |
I0306 13:52:14.048087 10006 loader.go:354] loading (templated) resource "addons/networking.weave/k8s-1.8.yaml" | |
I0306 13:52:14.048186 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.weave/pre-k8s-1.6.yaml.template" | |
I0306 13:52:14.048488 10006 loader.go:354] loading (templated) resource "addons/networking.weave/pre-k8s-1.6.yaml" | |
I0306 13:52:14.048575 10006 tree_walker.go:98] visit "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io" | |
I0306 13:52:14.048681 10006 tree_walker.go:98] visit "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template" | |
I0306 13:52:14.049009 10006 loader.go:354] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml" | |
I0306 13:52:14.049102 10006 tree_walker.go:98] visit "cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template" | |
I0306 13:52:14.049387 10006 loader.go:354] loading (templated) resource "addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml" | |
I0306 13:52:14.049456 10006 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io" | |
I0306 13:52:14.049553 10006 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml.template" | |
I0306 13:52:14.050050 10006 loader.go:354] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:14.050123 10006 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template" | |
I0306 13:52:14.050640 10006 loader.go:354] loading (templated) resource "addons/kube-dns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:14.050719 10006 tree_walker.go:98] visit "cloudup/resources/addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml.template" | |
I0306 13:52:14.051100 10006 loader.go:354] loading (templated) resource "addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:14.051167 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni" | |
I0306 13:52:14.051264 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml.template" | |
I0306 13:52:14.051580 10006 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.7.yaml" | |
I0306 13:52:14.051689 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.8.yaml.template" | |
I0306 13:52:14.052012 10006 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.8.yaml" | |
I0306 13:52:14.052075 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template" | |
I0306 13:52:14.052379 10006 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml" | |
I0306 13:52:14.052441 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template" | |
I0306 13:52:14.052737 10006 loader.go:354] loading (templated) resource "addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml" | |
I0306 13:52:14.052797 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.romana" | |
I0306 13:52:14.052867 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.romana/k8s-1.12.yaml.template" | |
I0306 13:52:14.053191 10006 loader.go:354] loading (templated) resource "addons/networking.romana/k8s-1.12.yaml" | |
I0306 13:52:14.053259 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.romana/k8s-1.7.yaml.template" | |
I0306 13:52:14.053565 10006 loader.go:354] loading (templated) resource "addons/networking.romana/k8s-1.7.yaml" | |
I0306 13:52:14.053635 10006 tree_walker.go:98] visit "cloudup/resources/addons/scheduler.addons.k8s.io" | |
I0306 13:52:14.053740 10006 tree_walker.go:98] visit "cloudup/resources/addons/scheduler.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.053958 10006 loader.go:362] loading resource "addons/scheduler.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.054011 10006 tree_walker.go:98] visit "cloudup/resources/addons/storage-gce.addons.k8s.io" | |
I0306 13:52:14.054079 10006 tree_walker.go:98] visit "cloudup/resources/addons/storage-gce.addons.k8s.io/v1.6.0.yaml" | |
I0306 13:52:14.054284 10006 loader.go:362] loading resource "addons/storage-gce.addons.k8s.io/v1.6.0.yaml" | |
I0306 13:52:14.054339 10006 tree_walker.go:98] visit "cloudup/resources/addons/storage-gce.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.054495 10006 loader.go:362] loading resource "addons/storage-gce.addons.k8s.io/v1.7.0.yaml" | |
I0306 13:52:14.054538 10006 tree_walker.go:98] visit "cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io" | |
I0306 13:52:14.054575 10006 tree_walker.go:98] visit "cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml.template" | |
I0306 13:52:14.054898 10006 loader.go:354] loading (templated) resource "addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:14.054928 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org" | |
I0306 13:52:14.054981 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template" | |
I0306 13:52:14.055926 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.12.yaml" | |
I0306 13:52:14.056055 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.6.yaml.template" | |
I0306 13:52:14.056762 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.6.yaml" | |
I0306 13:52:14.056862 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template" | |
I0306 13:52:14.057659 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7-v3.yaml" | |
I0306 13:52:14.057788 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template" | |
I0306 13:52:14.058430 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/k8s-1.7.yaml" | |
I0306 13:52:14.058506 10006 tree_walker.go:98] visit "cloudup/resources/addons/networking.projectcalico.org/pre-k8s-1.6.yaml.template" | |
I0306 13:52:14.059069 10006 loader.go:354] loading (templated) resource "addons/networking.projectcalico.org/pre-k8s-1.6.yaml" | |
I0306 13:52:14.059145 10006 tree_walker.go:98] visit "cloudup/resources/addons/authentication.kope.io" | |
I0306 13:52:14.059222 10006 tree_walker.go:98] visit "cloudup/resources/addons/authentication.kope.io/k8s-1.12.yaml" | |
I0306 13:52:14.059544 10006 loader.go:362] loading resource "addons/authentication.kope.io/k8s-1.12.yaml" | |
I0306 13:52:14.059602 10006 tree_walker.go:98] visit "cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml" | |
I0306 13:52:14.059886 10006 loader.go:362] loading resource "addons/authentication.kope.io/k8s-1.8.yaml" | |
I0306 13:52:14.059944 10006 tree_walker.go:98] visit "cloudup/resources/addons/openstack.addons.k8s.io" | |
I0306 13:52:14.060011 10006 tree_walker.go:98] visit "cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.11.yaml.template" | |
I0306 13:52:14.060341 10006 loader.go:354] loading (templated) resource "addons/openstack.addons.k8s.io/k8s-1.11.yaml" | |
I0306 13:52:14.061064 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: v1 | |
kind: Namespace | |
metadata: | |
name: kube-system | |
I0306 13:52:14.061146 10006 bootstrapchannelbuilder.go:79] hash 3ffe9ac576f9eec72e2bdfbd2ea17d56d9b17b90 | |
I0306 13:52:14.068121 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0" | |
I0306 13:52:14.070019 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/kubedns-amd64:1.9" | |
I0306 13:52:14.070170 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.070238 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 8080.000000 | |
I0306 13:52:14.070291 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.070338 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.070387 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.070453 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[0].containerPort: 10053.000000 | |
I0306 13:52:14.070514 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[1].containerPort: 10053.000000 | |
I0306 13:52:14.070575 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[2].containerPort: 10055.000000 | |
I0306 13:52:14.070646 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.initialDelaySeconds: 3.000000 | |
I0306 13:52:14.070701 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.070751 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 8081.000000 | |
I0306 13:52:14.070834 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.070887 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.070932 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.070977 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.071038 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.httpGet.port: 8080.000000 | |
I0306 13:52:14.071091 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[0].containerPort: 53.000000 | |
I0306 13:52:14.071143 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[1].containerPort: 53.000000 | |
I0306 13:52:14.071219 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.10" | |
I0306 13:52:14.071298 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].ports.[0].containerPort: 10054.000000 | |
I0306 13:52:14.071358 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/dnsmasq-metrics-amd64:1.0" | |
I0306 13:52:14.071671 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.071752 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.071811 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.httpGet.port: 10054.000000 | |
I0306 13:52:14.071874 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.071920 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.071968 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/exechealthz-amd64:1.2" | |
I0306 13:52:14.072025 10006 visitor.go:40] float64 value at spec.template.spec.containers.[3].ports.[0].containerPort: 8080.000000 | |
I0306 13:52:14.072099 10006 visitor.go:40] float64 value at spec.strategy.rollingUpdate.maxUnavailable: 0.000000 | |
I0306 13:52:14.075457 10006 visitor.go:40] float64 value at spec.ports.[0].port: 53.000000 | |
I0306 13:52:14.075507 10006 visitor.go:40] float64 value at spec.ports.[1].port: 53.000000 | |
I0306 13:52:14.076124 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: extensions/v1beta1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns-autoscaler | |
kubernetes.io/cluster-service: "true" | |
name: kube-dns-autoscaler | |
namespace: kube-system | |
spec: | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", | |
"operator":"Exists"}]' | |
labels: | |
k8s-app: kube-dns-autoscaler | |
spec: | |
containers: | |
- command: | |
- /cluster-proportional-autoscaler | |
- --namespace=kube-system | |
- --configmap=kube-dns-autoscaler | |
- --mode=linear | |
- --target=Deployment/kube-dns | |
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":2}} | |
- --logtostderr=true | |
- --v=2 | |
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0 | |
name: autoscaler | |
resources: | |
requests: | |
cpu: 20m | |
memory: 10Mi | |
--- | |
apiVersion: extensions/v1beta1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns | |
kubernetes.io/cluster-service: "true" | |
name: kube-dns | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: kube-dns | |
strategy: | |
rollingUpdate: | |
maxSurge: 10% | |
maxUnavailable: 0 | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", | |
"operator":"Exists"}]' | |
labels: | |
k8s-app: kube-dns | |
spec: | |
containers: | |
- args: | |
- --domain=cluster.local. | |
- --dns-port=10053 | |
- --config-map=kube-dns | |
- --v=2 | |
env: | |
- name: PROMETHEUS_PORT | |
value: "10055" | |
image: k8s.gcr.io/kubedns-amd64:1.9 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /healthz-kubedns | |
port: 8080 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: kubedns | |
ports: | |
- containerPort: 10053 | |
name: dns-local | |
protocol: UDP | |
- containerPort: 10053 | |
name: dns-tcp-local | |
protocol: TCP | |
- containerPort: 10055 | |
name: metrics | |
protocol: TCP | |
readinessProbe: | |
httpGet: | |
path: /readiness | |
port: 8081 | |
scheme: HTTP | |
initialDelaySeconds: 3 | |
timeoutSeconds: 5 | |
resources: | |
limits: | |
memory: 170Mi | |
requests: | |
cpu: 100m | |
memory: 70Mi | |
- args: | |
- --cache-size=1000 | |
- --dns-forward-max=150 | |
- --no-resolv | |
- --server=127.0.0.1#10053 | |
- --log-facility=- | |
- --min-port=1024 | |
image: k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.10 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /healthz-dnsmasq | |
port: 8080 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: dnsmasq | |
ports: | |
- containerPort: 53 | |
name: dns | |
protocol: UDP | |
- containerPort: 53 | |
name: dns-tcp | |
protocol: TCP | |
resources: | |
requests: | |
cpu: 150m | |
memory: 10Mi | |
- args: | |
- --v=2 | |
- --logtostderr | |
image: k8s.gcr.io/dnsmasq-metrics-amd64:1.0 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /metrics | |
port: 10054 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: dnsmasq-metrics | |
ports: | |
- containerPort: 10054 | |
name: metrics | |
protocol: TCP | |
resources: | |
requests: | |
memory: 10Mi | |
- args: | |
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null | |
- --url=/healthz-dnsmasq | |
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null | |
- --url=/healthz-kubedns | |
- --port=8080 | |
- --quiet | |
image: k8s.gcr.io/exechealthz-amd64:1.2 | |
name: healthz | |
ports: | |
- containerPort: 8080 | |
protocol: TCP | |
resources: | |
limits: | |
memory: 50Mi | |
requests: | |
cpu: 10m | |
memory: 50Mi | |
dnsPolicy: Default | |
--- | |
apiVersion: v1 | |
kind: Service | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns | |
kubernetes.io/cluster-service: "true" | |
kubernetes.io/name: KubeDNS | |
name: kube-dns | |
namespace: kube-system | |
spec: | |
clusterIP: 100.64.0.10 | |
ports: | |
- name: dns | |
port: 53 | |
protocol: UDP | |
- name: dns-tcp | |
port: 53 | |
protocol: TCP | |
selector: | |
k8s-app: kube-dns | |
I0306 13:52:14.076205 10006 bootstrapchannelbuilder.go:79] hash 895c961cb9365cbedb22edd20a7648182ae7ed3f | |
I0306 13:52:14.081860 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2" | |
I0306 13:52:14.083275 10006 visitor.go:40] float64 value at spec.strategy.rollingUpdate.maxUnavailable: 0.000000 | |
I0306 13:52:14.083390 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10" | |
I0306 13:52:14.083448 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.083491 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 10054.000000 | |
I0306 13:52:14.083531 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.083577 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.083615 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.083654 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.initialDelaySeconds: 3.000000 | |
I0306 13:52:14.083691 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.083730 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 8081.000000 | |
I0306 13:52:14.083775 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[0].containerPort: 10053.000000 | |
I0306 13:52:14.083814 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[1].containerPort: 10053.000000 | |
I0306 13:52:14.083853 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[2].containerPort: 10055.000000 | |
I0306 13:52:14.083914 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10" | |
I0306 13:52:14.083967 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.084006 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.084043 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.084080 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.httpGet.port: 10054.000000 | |
I0306 13:52:14.084118 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.084159 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[0].containerPort: 53.000000 | |
I0306 13:52:14.084201 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[1].containerPort: 53.000000 | |
I0306 13:52:14.084256 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].ports.[0].containerPort: 10054.000000 | |
I0306 13:52:14.084304 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10" | |
I0306 13:52:14.084351 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.084390 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.084429 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.httpGet.port: 10054.000000 | |
I0306 13:52:14.084465 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.084501 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.084545 10006 visitor.go:35] string value at spec.template.spec.volumes.[0].configMap.optional: true | |
I0306 13:52:14.087979 10006 visitor.go:40] float64 value at spec.ports.[0].port: 53.000000 | |
I0306 13:52:14.088077 10006 visitor.go:40] float64 value at spec.ports.[1].port: 53.000000 | |
I0306 13:52:14.089923 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: extensions/v1beta1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns-autoscaler | |
kubernetes.io/cluster-service: "true" | |
name: kube-dns-autoscaler | |
namespace: kube-system | |
spec: | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", | |
"operator":"Exists"}]' | |
labels: | |
k8s-app: kube-dns-autoscaler | |
spec: | |
containers: | |
- command: | |
- /cluster-proportional-autoscaler | |
- --namespace=kube-system | |
- --configmap=kube-dns-autoscaler | |
- --target=Deployment/kube-dns | |
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} | |
- --logtostderr=true | |
- --v=2 | |
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 | |
name: autoscaler | |
resources: | |
requests: | |
cpu: 20m | |
memory: 10Mi | |
serviceAccountName: kube-dns-autoscaler | |
tolerations: | |
- key: CriticalAddonsOnly | |
operator: Exists | |
--- | |
apiVersion: extensions/v1beta1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns | |
kubernetes.io/cluster-service: "true" | |
name: kube-dns | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: kube-dns | |
strategy: | |
rollingUpdate: | |
maxSurge: 10% | |
maxUnavailable: 0 | |
template: | |
metadata: | |
annotations: | |
prometheus.io/port: "10055" | |
prometheus.io/scrape: "true" | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", | |
"operator":"Exists"}]' | |
labels: | |
k8s-app: kube-dns | |
spec: | |
containers: | |
- args: | |
- --config-dir=/kube-dns-config | |
- --dns-port=10053 | |
- --domain=cluster.local. | |
- --v=2 | |
env: | |
- name: PROMETHEUS_PORT | |
value: "10055" | |
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /healthcheck/kubedns | |
port: 10054 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: kubedns | |
ports: | |
- containerPort: 10053 | |
name: dns-local | |
protocol: UDP | |
- containerPort: 10053 | |
name: dns-tcp-local | |
protocol: TCP | |
- containerPort: 10055 | |
name: metrics | |
protocol: TCP | |
readinessProbe: | |
httpGet: | |
path: /readiness | |
port: 8081 | |
scheme: HTTP | |
initialDelaySeconds: 3 | |
timeoutSeconds: 5 | |
resources: | |
limits: | |
memory: 170Mi | |
requests: | |
cpu: 100m | |
memory: 70Mi | |
volumeMounts: | |
- mountPath: /kube-dns-config | |
name: kube-dns-config | |
- args: | |
- -v=2 | |
- -logtostderr | |
- -configDir=/etc/k8s/dns/dnsmasq-nanny | |
- -restartDnsmasq=true | |
- -- | |
- -k | |
- --cache-size=1000 | |
- --dns-forward-max=150 | |
- --no-negcache | |
- --log-facility=- | |
- --server=/cluster.local/127.0.0.1#10053 | |
- --server=/in-addr.arpa/127.0.0.1#10053 | |
- --server=/in6.arpa/127.0.0.1#10053 | |
- --min-port=1024 | |
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /healthcheck/dnsmasq | |
port: 10054 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: dnsmasq | |
ports: | |
- containerPort: 53 | |
name: dns | |
protocol: UDP | |
- containerPort: 53 | |
name: dns-tcp | |
protocol: TCP | |
resources: | |
requests: | |
cpu: 150m | |
memory: 20Mi | |
volumeMounts: | |
- mountPath: /etc/k8s/dns/dnsmasq-nanny | |
name: kube-dns-config | |
- args: | |
- --v=2 | |
- --logtostderr | |
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A | |
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A | |
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /metrics | |
port: 10054 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: sidecar | |
ports: | |
- containerPort: 10054 | |
name: metrics | |
protocol: TCP | |
resources: | |
requests: | |
cpu: 10m | |
memory: 20Mi | |
dnsPolicy: Default | |
serviceAccountName: kube-dns | |
volumes: | |
- configMap: | |
name: kube-dns | |
optional: true | |
name: kube-dns-config | |
--- | |
apiVersion: v1 | |
kind: Service | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns | |
kubernetes.io/cluster-service: "true" | |
kubernetes.io/name: KubeDNS | |
name: kube-dns | |
namespace: kube-system | |
spec: | |
clusterIP: 100.64.0.10 | |
ports: | |
- name: dns | |
port: 53 | |
protocol: UDP | |
- name: dns-tcp | |
port: 53 | |
protocol: TCP | |
selector: | |
k8s-app: kube-dns | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
name: kube-dns-autoscaler | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
name: kube-dns-autoscaler | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- nodes | |
verbs: | |
- list | |
- apiGroups: | |
- "" | |
resources: | |
- replicationcontrollers/scale | |
verbs: | |
- get | |
- update | |
- apiGroups: | |
- extensions | |
resources: | |
- deployments/scale | |
- replicasets/scale | |
verbs: | |
- get | |
- update | |
- apiGroups: | |
- "" | |
resources: | |
- configmaps | |
verbs: | |
- get | |
- create | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
name: kube-dns-autoscaler | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: kube-dns-autoscaler | |
subjects: | |
- kind: ServiceAccount | |
name: kube-dns-autoscaler | |
namespace: kube-system | |
I0306 13:52:14.091054 10006 bootstrapchannelbuilder.go:79] hash 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895 | |
I0306 13:52:14.096624 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0" | |
I0306 13:52:14.097919 10006 visitor.go:40] float64 value at spec.strategy.rollingUpdate.maxUnavailable: 0.000000 | |
I0306 13:52:14.098052 10006 visitor.go:35] string value at spec.template.spec.volumes.[0].configMap.optional: true | |
I0306 13:52:14.098103 10006 visitor.go:40] float64 value at spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.[0].weight: 1.000000 | |
I0306 13:52:14.098159 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[0].containerPort: 10053.000000 | |
I0306 13:52:14.098201 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[1].containerPort: 10053.000000 | |
I0306 13:52:14.098240 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].ports.[2].containerPort: 10055.000000 | |
I0306 13:52:14.098284 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.098322 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 10054.000000 | |
I0306 13:52:14.098361 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.098413 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.098450 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.098490 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.httpGet.port: 8081.000000 | |
I0306 13:52:14.098530 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.initialDelaySeconds: 3.000000 | |
I0306 13:52:14.098568 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.098619 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13" | |
I0306 13:52:14.098674 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.13" | |
I0306 13:52:14.098720 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.098758 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.098797 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.httpGet.port: 10054.000000 | |
I0306 13:52:14.098836 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.098873 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.098912 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[0].containerPort: 53.000000 | |
I0306 13:52:14.098954 10006 visitor.go:40] float64 value at spec.template.spec.containers.[1].ports.[1].containerPort: 53.000000 | |
I0306 13:52:14.099019 10006 images.go:59] Consider image for re-mapping: "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.13" | |
I0306 13:52:14.099065 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.failureThreshold: 5.000000 | |
I0306 13:52:14.099110 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.httpGet.port: 10054.000000 | |
I0306 13:52:14.099149 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.initialDelaySeconds: 60.000000 | |
I0306 13:52:14.099185 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.successThreshold: 1.000000 | |
I0306 13:52:14.099221 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].livenessProbe.timeoutSeconds: 5.000000 | |
I0306 13:52:14.099261 10006 visitor.go:40] float64 value at spec.template.spec.containers.[2].ports.[0].containerPort: 10054.000000 | |
I0306 13:52:14.102708 10006 visitor.go:40] float64 value at spec.ports.[0].port: 53.000000 | |
I0306 13:52:14.102803 10006 visitor.go:40] float64 value at spec.ports.[1].port: 53.000000 | |
I0306 13:52:14.104657 10006 visitor.go:40] float64 value at spec.minAvailable: 1.000000 | |
I0306 13:52:14.105018 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns-autoscaler | |
kubernetes.io/cluster-service: "true" | |
name: kube-dns-autoscaler | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: kube-dns-autoscaler | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: kube-dns-autoscaler | |
spec: | |
containers: | |
- command: | |
- /cluster-proportional-autoscaler | |
- --namespace=kube-system | |
- --configmap=kube-dns-autoscaler | |
- --target=Deployment/kube-dns | |
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} | |
- --logtostderr=true | |
- --v=2 | |
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0 | |
name: autoscaler | |
resources: | |
requests: | |
cpu: 20m | |
memory: 10Mi | |
serviceAccountName: kube-dns-autoscaler | |
tolerations: | |
- key: CriticalAddonsOnly | |
operator: Exists | |
--- | |
apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns | |
kubernetes.io/cluster-service: "true" | |
name: kube-dns | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: kube-dns | |
strategy: | |
rollingUpdate: | |
maxSurge: 10% | |
maxUnavailable: 0 | |
template: | |
metadata: | |
annotations: | |
prometheus.io/port: "10055" | |
prometheus.io/scrape: "true" | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: kube-dns | |
spec: | |
affinity: | |
podAntiAffinity: | |
preferredDuringSchedulingIgnoredDuringExecution: | |
- podAffinityTerm: | |
labelSelector: | |
matchExpressions: | |
- key: k8s-app | |
operator: In | |
values: | |
- kube-dns | |
topologyKey: kubernetes.io/hostname | |
weight: 1 | |
containers: | |
- args: | |
- --config-dir=/kube-dns-config | |
- --dns-port=10053 | |
- --domain=cluster.local. | |
- --v=2 | |
env: | |
- name: PROMETHEUS_PORT | |
value: "10055" | |
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /healthcheck/kubedns | |
port: 10054 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: kubedns | |
ports: | |
- containerPort: 10053 | |
name: dns-local | |
protocol: UDP | |
- containerPort: 10053 | |
name: dns-tcp-local | |
protocol: TCP | |
- containerPort: 10055 | |
name: metrics | |
protocol: TCP | |
readinessProbe: | |
httpGet: | |
path: /readiness | |
port: 8081 | |
scheme: HTTP | |
initialDelaySeconds: 3 | |
timeoutSeconds: 5 | |
resources: | |
limits: | |
memory: 170Mi | |
requests: | |
cpu: 100m | |
memory: 70Mi | |
volumeMounts: | |
- mountPath: /kube-dns-config | |
name: kube-dns-config | |
- args: | |
- -v=2 | |
- -logtostderr | |
- -configDir=/etc/k8s/dns/dnsmasq-nanny | |
- -restartDnsmasq=true | |
- -- | |
- -k | |
- --cache-size=1000 | |
- --dns-forward-max=150 | |
- --no-negcache | |
- --log-facility=- | |
- --server=/cluster.local/127.0.0.1#10053 | |
- --server=/in-addr.arpa/127.0.0.1#10053 | |
- --server=/in6.arpa/127.0.0.1#10053 | |
- --min-port=1024 | |
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.13 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /healthcheck/dnsmasq | |
port: 10054 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: dnsmasq | |
ports: | |
- containerPort: 53 | |
name: dns | |
protocol: UDP | |
- containerPort: 53 | |
name: dns-tcp | |
protocol: TCP | |
resources: | |
requests: | |
cpu: 150m | |
memory: 20Mi | |
volumeMounts: | |
- mountPath: /etc/k8s/dns/dnsmasq-nanny | |
name: kube-dns-config | |
- args: | |
- --v=2 | |
- --logtostderr | |
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A | |
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A | |
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.13 | |
livenessProbe: | |
failureThreshold: 5 | |
httpGet: | |
path: /metrics | |
port: 10054 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
name: sidecar | |
ports: | |
- containerPort: 10054 | |
name: metrics | |
protocol: TCP | |
resources: | |
requests: | |
cpu: 10m | |
memory: 20Mi | |
dnsPolicy: Default | |
serviceAccountName: kube-dns | |
volumes: | |
- configMap: | |
name: kube-dns | |
optional: true | |
name: kube-dns-config | |
--- | |
apiVersion: v1 | |
kind: Service | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
k8s-app: kube-dns | |
kubernetes.io/cluster-service: "true" | |
kubernetes.io/name: KubeDNS | |
name: kube-dns | |
namespace: kube-system | |
spec: | |
clusterIP: 100.64.0.10 | |
ports: | |
- name: dns | |
port: 53 | |
protocol: UDP | |
- name: dns-tcp | |
port: 53 | |
protocol: TCP | |
selector: | |
k8s-app: kube-dns | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
name: kube-dns-autoscaler | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
name: kube-dns-autoscaler | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- nodes | |
verbs: | |
- list | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
- replicationcontrollers/scale | |
verbs: | |
- get | |
- update | |
- apiGroups: | |
- extensions | |
- apps | |
resources: | |
- deployments/scale | |
- replicasets/scale | |
verbs: | |
- get | |
- update | |
- apiGroups: | |
- "" | |
resources: | |
- configmaps | |
verbs: | |
- get | |
- create | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
k8s-addon: kube-dns.addons.k8s.io | |
name: kube-dns-autoscaler | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: kube-dns-autoscaler | |
subjects: | |
- kind: ServiceAccount | |
name: kube-dns-autoscaler | |
namespace: kube-system | |
--- | |
apiVersion: policy/v1beta1 | |
kind: PodDisruptionBudget | |
metadata: | |
name: kube-dns | |
namespace: kube-system | |
spec: | |
minAvailable: 1 | |
selector: | |
matchLabels: | |
k8s-app: kube-dns | |
I0306 13:52:14.106096 10006 bootstrapchannelbuilder.go:79] hash b4dff071aa340fd71650c96f213fdf4b4f799c71 | |
I0306 13:52:14.107099 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
addonmanager.kubernetes.io/mode: Reconcile | |
k8s-addon: rbac.addons.k8s.io | |
kubernetes.io/cluster-service: "true" | |
name: kubelet-cluster-admin | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: system:node | |
subjects: | |
- apiGroup: rbac.authorization.k8s.io | |
kind: User | |
name: kubelet | |
I0306 13:52:14.107236 10006 bootstrapchannelbuilder.go:79] hash 5d53ce7b920cd1e8d65d2306d80a041420711914 | |
I0306 13:52:14.107917 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
name: kops:system:kubelet-api-admin | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: system:kubelet-api-admin | |
subjects: | |
- apiGroup: rbac.authorization.k8s.io | |
kind: User | |
name: kubelet-api | |
I0306 13:52:14.108020 10006 bootstrapchannelbuilder.go:79] hash e1508d77cb4e527d7a2939babe36dc350dd83745 | |
I0306 13:52:14.108585 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: v1 | |
kind: LimitRange | |
metadata: | |
name: limits | |
namespace: default | |
spec: | |
limits: | |
- defaultRequest: | |
cpu: 100m | |
type: Container | |
I0306 13:52:14.108662 10006 bootstrapchannelbuilder.go:79] hash 2ea50e23f1a5aa41df3724630ac25173738cc90c | |
I0306 13:52:14.108984 10006 template_functions.go:194] watch-ingress=false set on dns-controller | |
I0306 13:52:14.109944 10006 visitor.go:40] float64 value at spec.replicas: 1.000000 | |
I0306 13:52:14.110036 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.110099 10006 images.go:59] Consider image for re-mapping: "kope/dns-controller:1.15.2" | |
I0306 13:52:14.111208 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: extensions/v1beta1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
k8s-app: dns-controller | |
version: v1.15.2 | |
name: dns-controller | |
namespace: kube-system | |
spec: | |
replicas: 1 | |
selector: | |
matchLabels: | |
k8s-app: dns-controller | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": | |
"master"}]' | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
k8s-app: dns-controller | |
version: v1.15.2 | |
spec: | |
containers: | |
- command: | |
- /usr/bin/dns-controller | |
- --watch-ingress=false | |
- --dns=digitalocean | |
- --zone=test1.dev.fra1.do.services.example.com | |
- --zone=*/* | |
- -v=2 | |
image: kope/dns-controller:1.15.2 | |
name: dns-controller | |
resources: | |
requests: | |
cpu: 50m | |
memory: 50Mi | |
dnsPolicy: Default | |
hostNetwork: true | |
nodeSelector: | |
kubernetes.io/role: master | |
I0306 13:52:14.111418 10006 bootstrapchannelbuilder.go:79] hash eeb67e0b1e593f14d3c0b9e21cb1987cfd45d43f | |
I0306 13:52:14.111889 10006 template_functions.go:194] watch-ingress=false set on dns-controller | |
I0306 13:52:14.114014 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.114082 10006 images.go:59] Consider image for re-mapping: "kope/dns-controller:1.15.2" | |
I0306 13:52:14.114115 10006 visitor.go:40] float64 value at spec.replicas: 1.000000 | |
I0306 13:52:14.116881 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: extensions/v1beta1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
k8s-app: dns-controller | |
version: v1.15.2 | |
name: dns-controller | |
namespace: kube-system | |
spec: | |
replicas: 1 | |
selector: | |
matchLabels: | |
k8s-app: dns-controller | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": | |
"master"}]' | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
k8s-app: dns-controller | |
version: v1.15.2 | |
spec: | |
containers: | |
- command: | |
- /usr/bin/dns-controller | |
- --watch-ingress=false | |
- --dns=digitalocean | |
- --zone=test1.dev.fra1.do.services.example.com | |
- --zone=*/* | |
- -v=2 | |
env: | |
- name: DIGITALOCEAN_ACCESS_TOKEN | |
valueFrom: | |
secretKeyRef: | |
key: access-token | |
name: digitalocean | |
image: kope/dns-controller:1.15.2 | |
name: dns-controller | |
resources: | |
requests: | |
cpu: 50m | |
memory: 50Mi | |
dnsPolicy: Default | |
hostNetwork: true | |
nodeSelector: | |
node-role.kubernetes.io/master: "" | |
serviceAccount: dns-controller | |
tolerations: | |
- effect: NoSchedule | |
key: node-role.kubernetes.io/master | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
name: dns-controller | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
name: kops:dns-controller | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- endpoints | |
- services | |
- pods | |
- ingress | |
- nodes | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- extensions | |
resources: | |
- ingresses | |
verbs: | |
- get | |
- list | |
- watch | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
name: kops:dns-controller | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: kops:dns-controller | |
subjects: | |
- apiGroup: rbac.authorization.k8s.io | |
kind: User | |
name: system:serviceaccount:kube-system:dns-controller | |
I0306 13:52:14.116947 10006 bootstrapchannelbuilder.go:79] hash bcd517488f62492e9f43af6950d0663b3fb0b09d | |
I0306 13:52:14.117534 10006 template_functions.go:194] watch-ingress=false set on dns-controller | |
I0306 13:52:14.119788 10006 visitor.go:40] float64 value at spec.replicas: 1.000000 | |
I0306 13:52:14.119857 10006 images.go:59] Consider image for re-mapping: "kope/dns-controller:1.15.2" | |
I0306 13:52:14.119911 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.122333 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
k8s-app: dns-controller | |
version: v1.15.2 | |
name: dns-controller | |
namespace: kube-system | |
spec: | |
replicas: 1 | |
selector: | |
matchLabels: | |
k8s-app: dns-controller | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
k8s-app: dns-controller | |
version: v1.15.2 | |
spec: | |
containers: | |
- command: | |
- /usr/bin/dns-controller | |
- --watch-ingress=false | |
- --dns=digitalocean | |
- --zone=test1.dev.fra1.do.services.example.com | |
- --zone=*/* | |
- -v=2 | |
env: | |
- name: DIGITALOCEAN_ACCESS_TOKEN | |
valueFrom: | |
secretKeyRef: | |
key: access-token | |
name: digitalocean | |
image: kope/dns-controller:1.15.2 | |
name: dns-controller | |
resources: | |
requests: | |
cpu: 50m | |
memory: 50Mi | |
dnsPolicy: Default | |
hostNetwork: true | |
nodeSelector: | |
node-role.kubernetes.io/master: "" | |
serviceAccount: dns-controller | |
tolerations: | |
- effect: NoSchedule | |
key: node-role.kubernetes.io/master | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
name: dns-controller | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
name: kops:dns-controller | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- endpoints | |
- services | |
- pods | |
- ingress | |
- nodes | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- extensions | |
resources: | |
- ingresses | |
verbs: | |
- get | |
- list | |
- watch | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
k8s-addon: dns-controller.addons.k8s.io | |
name: kops:dns-controller | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: kops:dns-controller | |
subjects: | |
- apiGroup: rbac.authorization.k8s.io | |
kind: User | |
name: system:serviceaccount:kube-system:dns-controller | |
I0306 13:52:14.122391 10006 bootstrapchannelbuilder.go:79] hash e72c6307f1b240b011845e9fbacd860b72fe7e73 | |
I0306 13:52:14.125129 10006 images.go:59] Consider image for re-mapping: "digitalocean/digitalocean-cloud-controller-manager:v0.1.20" | |
I0306 13:52:14.125173 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.125199 10006 visitor.go:40] float64 value at spec.template.spec.tolerations.[3].tolerationSeconds: 300.000000 | |
I0306 13:52:14.125215 10006 visitor.go:40] float64 value at spec.template.spec.tolerations.[4].tolerationSeconds: 300.000000 | |
I0306 13:52:14.129456 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: v1 | |
kind: Secret | |
metadata: | |
name: digitalocean | |
namespace: kube-system | |
stringData: | |
access-token: a9828e09a9e2bbc9ccc1aa5f2f34556bb1b740c6ae0d015e23635ed1f4dbce38 | |
--- | |
apiVersion: apps/v1 | |
kind: DaemonSet | |
metadata: | |
name: digitalocean-cloud-controller-manager | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: digitalocean-cloud-controller-manager | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: digitalocean-cloud-controller-manager | |
spec: | |
containers: | |
- command: | |
- /bin/digitalocean-cloud-controller-manager | |
- --leader-elect=true | |
env: | |
- name: KUBERNETES_SERVICE_HOST | |
value: 127.0.0.1 | |
- name: KUBERNETES_SERVICE_PORT | |
value: "443" | |
- name: DO_ACCESS_TOKEN | |
valueFrom: | |
secretKeyRef: | |
key: access-token | |
name: digitalocean | |
image: digitalocean/digitalocean-cloud-controller-manager:v0.1.20 | |
name: digitalocean-cloud-controller-manager | |
resources: | |
requests: | |
cpu: 100m | |
memory: 50Mi | |
dnsPolicy: Default | |
hostNetwork: true | |
nodeSelector: | |
node-role.kubernetes.io/master: "" | |
serviceAccountName: cloud-controller-manager | |
tolerations: | |
- effect: NoSchedule | |
key: node.cloudprovider.kubernetes.io/uninitialized | |
value: "true" | |
- key: CriticalAddonsOnly | |
operator: Exists | |
- effect: NoSchedule | |
key: node-role.kubernetes.io/master | |
- effect: NoExecute | |
key: node.kubernetes.io/not-ready | |
operator: Exists | |
tolerationSeconds: 300 | |
- effect: NoExecute | |
key: node.kubernetes.io/unreachable | |
operator: Exists | |
tolerationSeconds: 300 | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
name: cloud-controller-manager | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
annotations: | |
rbac.authorization.kubernetes.io/autoupdate: "true" | |
name: system:cloud-controller-manager | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- events | |
verbs: | |
- create | |
- patch | |
- update | |
- apiGroups: | |
- "" | |
resources: | |
- nodes | |
verbs: | |
- '*' | |
- apiGroups: | |
- "" | |
resources: | |
- nodes/status | |
verbs: | |
- patch | |
- apiGroups: | |
- "" | |
resources: | |
- services | |
verbs: | |
- list | |
- patch | |
- update | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
- services/status | |
verbs: | |
- list | |
- patch | |
- update | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
- serviceaccounts | |
verbs: | |
- create | |
- apiGroups: | |
- "" | |
resources: | |
- persistentvolumes | |
verbs: | |
- get | |
- list | |
- update | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
- endpoints | |
verbs: | |
- create | |
- get | |
- list | |
- watch | |
- update | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
name: system:cloud-controller-manager | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: system:cloud-controller-manager | |
subjects: | |
- kind: ServiceAccount | |
name: cloud-controller-manager | |
namespace: kube-system | |
I0306 13:52:14.129835 10006 bootstrapchannelbuilder.go:79] hash 1950d0270f8c17a620cce69abf3dfe9f1296ca1c | |
I0306 13:52:14.160833 10006 visitor.go:40] float64 value at spec.template.spec.terminationGracePeriodSeconds: 0.000000 | |
I0306 13:52:14.160920 10006 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true | |
I0306 13:52:14.160937 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true | |
I0306 13:52:14.160950 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false | |
I0306 13:52:14.160961 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[2].readOnly: false | |
I0306 13:52:14.160972 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[3].readOnly: false | |
I0306 13:52:14.161036 10006 images.go:59] Consider image for re-mapping: "calico/node:v3.9.3" | |
I0306 13:52:14.161066 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 6.000000 | |
I0306 13:52:14.161080 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 10.000000 | |
I0306 13:52:14.161091 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.periodSeconds: 10.000000 | |
I0306 13:52:14.161103 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.periodSeconds: 10.000000 | |
I0306 13:52:14.161118 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.161149 10006 images.go:59] Consider image for re-mapping: "calico/cni:v3.9.3" | |
I0306 13:52:14.161191 10006 images.go:59] Consider image for re-mapping: "calico/cni:v3.9.3" | |
I0306 13:52:14.161208 10006 images.go:59] Consider image for re-mapping: "calico/pod2daemon-flexvol:v3.9.3" | |
I0306 13:52:14.161234 10006 visitor.go:40] float64 value at spec.updateStrategy.rollingUpdate.maxUnavailable: 1.000000 | |
I0306 13:52:14.166605 10006 images.go:59] Consider image for re-mapping: "calico/kube-controllers:v3.9.3" | |
I0306 13:52:14.166660 10006 visitor.go:40] float64 value at spec.replicas: 1.000000 | |
I0306 13:52:14.168270 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: v1 | |
data: | |
calico_backend: bird | |
cni_network_config: |- | |
{ | |
"name": "k8s-pod-network", | |
"cniVersion": "0.3.1", | |
"plugins": [ | |
{ | |
"type": "calico", | |
"log_level": "info", | |
"datastore_type": "kubernetes", | |
"nodename": "__KUBERNETES_NODE_NAME__", | |
"mtu": __CNI_MTU__, | |
"ipam": { | |
"type": "calico-ipam" | |
}, | |
"policy": { | |
"type": "k8s" | |
}, | |
"kubernetes": { | |
"kubeconfig": "__KUBECONFIG_FILEPATH__" | |
} | |
}, | |
{ | |
"type": "portmap", | |
"snat": true, | |
"capabilities": {"portMappings": true} | |
} | |
] | |
} | |
typha_service_name: none | |
veth_mtu: "1440" | |
kind: ConfigMap | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-config | |
namespace: kube-system | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: felixconfigurations.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: FelixConfiguration | |
plural: felixconfigurations | |
singular: felixconfiguration | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: ipamblocks.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: IPAMBlock | |
plural: ipamblocks | |
singular: ipamblock | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: blockaffinities.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: BlockAffinity | |
plural: blockaffinities | |
singular: blockaffinity | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: ipamhandles.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: IPAMHandle | |
plural: ipamhandles | |
singular: ipamhandle | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: ipamconfigs.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: IPAMConfig | |
plural: ipamconfigs | |
singular: ipamconfig | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: bgppeers.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: BGPPeer | |
plural: bgppeers | |
singular: bgppeer | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: bgpconfigurations.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: BGPConfiguration | |
plural: bgpconfigurations | |
singular: bgpconfiguration | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: ippools.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: IPPool | |
plural: ippools | |
singular: ippool | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: hostendpoints.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: HostEndpoint | |
plural: hostendpoints | |
singular: hostendpoint | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: clusterinformations.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: ClusterInformation | |
plural: clusterinformations | |
singular: clusterinformation | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: globalnetworkpolicies.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: GlobalNetworkPolicy | |
plural: globalnetworkpolicies | |
singular: globalnetworkpolicy | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: globalnetworksets.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: GlobalNetworkSet | |
plural: globalnetworksets | |
singular: globalnetworkset | |
scope: Cluster | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: networkpolicies.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: NetworkPolicy | |
plural: networkpolicies | |
singular: networkpolicy | |
scope: Namespaced | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: networksets.crd.projectcalico.org | |
spec: | |
group: crd.projectcalico.org | |
names: | |
kind: NetworkSet | |
plural: networksets | |
singular: networkset | |
scope: Namespaced | |
version: v1 | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- nodes | |
verbs: | |
- watch | |
- list | |
- get | |
- apiGroups: | |
- "" | |
resources: | |
- pods | |
verbs: | |
- get | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- ippools | |
verbs: | |
- list | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- blockaffinities | |
- ipamblocks | |
- ipamhandles | |
verbs: | |
- get | |
- list | |
- create | |
- update | |
- delete | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- clusterinformations | |
verbs: | |
- get | |
- create | |
- update | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: calico-kube-controllers | |
subjects: | |
- kind: ServiceAccount | |
name: calico-kube-controllers | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- pods | |
- nodes | |
- namespaces | |
verbs: | |
- get | |
- apiGroups: | |
- "" | |
resources: | |
- endpoints | |
- services | |
verbs: | |
- watch | |
- list | |
- get | |
- apiGroups: | |
- "" | |
resources: | |
- nodes/status | |
verbs: | |
- patch | |
- update | |
- apiGroups: | |
- networking.k8s.io | |
resources: | |
- networkpolicies | |
verbs: | |
- watch | |
- list | |
- apiGroups: | |
- "" | |
resources: | |
- pods | |
- namespaces | |
- serviceaccounts | |
verbs: | |
- list | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
- pods/status | |
verbs: | |
- patch | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- globalfelixconfigs | |
- felixconfigurations | |
- bgppeers | |
- globalbgpconfigs | |
- bgpconfigurations | |
- ippools | |
- ipamblocks | |
- globalnetworkpolicies | |
- globalnetworksets | |
- networkpolicies | |
- networksets | |
- clusterinformations | |
- hostendpoints | |
- blockaffinities | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- ippools | |
- felixconfigurations | |
- clusterinformations | |
verbs: | |
- create | |
- update | |
- apiGroups: | |
- "" | |
resources: | |
- nodes | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- bgpconfigurations | |
- bgppeers | |
verbs: | |
- create | |
- update | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- blockaffinities | |
- ipamblocks | |
- ipamhandles | |
verbs: | |
- get | |
- list | |
- create | |
- update | |
- delete | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- ipamconfigs | |
verbs: | |
- get | |
- apiGroups: | |
- crd.projectcalico.org | |
resources: | |
- blockaffinities | |
verbs: | |
- watch | |
- apiGroups: | |
- apps | |
resources: | |
- daemonsets | |
verbs: | |
- get | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: calico-node | |
subjects: | |
- kind: ServiceAccount | |
name: calico-node | |
namespace: kube-system | |
--- | |
apiVersion: apps/v1 | |
kind: DaemonSet | |
metadata: | |
labels: | |
k8s-app: calico-node | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: calico-node | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: calico-node | |
role.kubernetes.io/networking: "1" | |
spec: | |
containers: | |
- env: | |
- name: DATASTORE_TYPE | |
value: kubernetes | |
- name: FELIX_TYPHAK8SSERVICENAME | |
valueFrom: | |
configMapKeyRef: | |
key: typha_service_name | |
name: calico-config | |
- name: WAIT_FOR_DATASTORE | |
value: "true" | |
- name: NODENAME | |
valueFrom: | |
fieldRef: | |
fieldPath: spec.nodeName | |
- name: CALICO_NETWORKING_BACKEND | |
valueFrom: | |
configMapKeyRef: | |
key: calico_backend | |
name: calico-config | |
- name: CLUSTER_TYPE | |
value: kops,bgp | |
- name: IP | |
value: autodetect | |
- name: CALICO_IPV4POOL_IPIP | |
value: always | |
- name: FELIX_IPINIPMTU | |
valueFrom: | |
configMapKeyRef: | |
key: veth_mtu | |
name: calico-config | |
- name: CALICO_IPV4POOL_CIDR | |
value: 100.96.0.0/11 | |
- name: CALICO_DISABLE_FILE_LOGGING | |
value: "true" | |
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION | |
value: ACCEPT | |
- name: FELIX_IPV6SUPPORT | |
value: "false" | |
- name: FELIX_LOGSEVERITYSCREEN | |
value: info | |
- name: FELIX_HEALTHENABLED | |
value: "true" | |
- name: FELIX_PROMETHEUSMETRICSENABLED | |
value: "false" | |
- name: FELIX_PROMETHEUSMETRICSPORT | |
value: "9091" | |
- name: FELIX_PROMETHEUSGOMETRICSENABLED | |
value: "true" | |
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED | |
value: "true" | |
image: calico/node:v3.9.3 | |
livenessProbe: | |
exec: | |
command: | |
- /bin/calico-node | |
- -felix-live | |
failureThreshold: 6 | |
initialDelaySeconds: 10 | |
periodSeconds: 10 | |
name: calico-node | |
readinessProbe: | |
exec: | |
command: | |
- /bin/calico-node | |
- -felix-ready | |
- -bird-ready | |
periodSeconds: 10 | |
resources: | |
requests: | |
cpu: 90m | |
securityContext: | |
privileged: true | |
volumeMounts: | |
- mountPath: /lib/modules | |
name: lib-modules | |
readOnly: true | |
- mountPath: /run/xtables.lock | |
name: xtables-lock | |
readOnly: false | |
- mountPath: /var/run/calico | |
name: var-run-calico | |
readOnly: false | |
- mountPath: /var/lib/calico | |
name: var-lib-calico | |
readOnly: false | |
- mountPath: /var/run/nodeagent | |
name: policysync | |
hostNetwork: true | |
initContainers: | |
- command: | |
- /opt/cni/bin/calico-ipam | |
- -upgrade | |
env: | |
- name: KUBERNETES_NODE_NAME | |
valueFrom: | |
fieldRef: | |
fieldPath: spec.nodeName | |
- name: CALICO_NETWORKING_BACKEND | |
valueFrom: | |
configMapKeyRef: | |
key: calico_backend | |
name: calico-config | |
image: calico/cni:v3.9.3 | |
name: upgrade-ipam | |
volumeMounts: | |
- mountPath: /var/lib/cni/networks | |
name: host-local-net-dir | |
- mountPath: /host/opt/cni/bin | |
name: cni-bin-dir | |
- command: | |
- /install-cni.sh | |
env: | |
- name: CNI_CONF_NAME | |
value: 10-calico.conflist | |
- name: CNI_NETWORK_CONFIG | |
valueFrom: | |
configMapKeyRef: | |
key: cni_network_config | |
name: calico-config | |
- name: KUBERNETES_NODE_NAME | |
valueFrom: | |
fieldRef: | |
fieldPath: spec.nodeName | |
- name: CNI_MTU | |
valueFrom: | |
configMapKeyRef: | |
key: veth_mtu | |
name: calico-config | |
- name: SLEEP | |
value: "false" | |
image: calico/cni:v3.9.3 | |
name: install-cni | |
volumeMounts: | |
- mountPath: /host/opt/cni/bin | |
name: cni-bin-dir | |
- mountPath: /host/etc/cni/net.d | |
name: cni-net-dir | |
- image: calico/pod2daemon-flexvol:v3.9.3 | |
name: flexvol-driver | |
volumeMounts: | |
- mountPath: /host/driver | |
name: flexvol-driver-host | |
nodeSelector: | |
beta.kubernetes.io/os: linux | |
priorityClassName: system-node-critical | |
serviceAccountName: calico-node | |
terminationGracePeriodSeconds: 0 | |
tolerations: | |
- effect: NoSchedule | |
operator: Exists | |
- key: CriticalAddonsOnly | |
operator: Exists | |
- effect: NoExecute | |
operator: Exists | |
volumes: | |
- hostPath: | |
path: /lib/modules | |
name: lib-modules | |
- hostPath: | |
path: /var/run/calico | |
name: var-run-calico | |
- hostPath: | |
path: /var/lib/calico | |
name: var-lib-calico | |
- hostPath: | |
path: /run/xtables.lock | |
type: FileOrCreate | |
name: xtables-lock | |
- hostPath: | |
path: /opt/cni/bin | |
name: cni-bin-dir | |
- hostPath: | |
path: /etc/cni/net.d | |
name: cni-net-dir | |
- hostPath: | |
path: /var/lib/cni/networks | |
name: host-local-net-dir | |
- hostPath: | |
path: /var/run/nodeagent | |
type: DirectoryOrCreate | |
name: policysync | |
- hostPath: | |
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds | |
type: DirectoryOrCreate | |
name: flexvol-driver-host | |
updateStrategy: | |
rollingUpdate: | |
maxUnavailable: 1 | |
type: RollingUpdate | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
namespace: kube-system | |
--- | |
apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
labels: | |
k8s-app: calico-kube-controllers | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
namespace: kube-system | |
spec: | |
replicas: 1 | |
selector: | |
matchLabels: | |
k8s-app: calico-kube-controllers | |
strategy: | |
type: Recreate | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: calico-kube-controllers | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
namespace: kube-system | |
spec: | |
containers: | |
- env: | |
- name: ENABLED_CONTROLLERS | |
value: node | |
- name: DATASTORE_TYPE | |
value: kubernetes | |
image: calico/kube-controllers:v3.9.3 | |
name: calico-kube-controllers | |
readinessProbe: | |
exec: | |
command: | |
- /usr/bin/check-status | |
- -r | |
nodeSelector: | |
beta.kubernetes.io/os: linux | |
priorityClassName: system-cluster-critical | |
serviceAccountName: calico-kube-controllers | |
tolerations: | |
- key: CriticalAddonsOnly | |
operator: Exists | |
- effect: NoSchedule | |
key: node-role.kubernetes.io/master | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
namespace: kube-system | |
I0306 13:52:14.168429 10006 bootstrapchannelbuilder.go:79] hash 9684a0594a8a6a93a36ea6acf17a5a33ec23ce1d | |
I0306 13:52:14.181588 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.failureThreshold: 6.000000 | |
I0306 13:52:14.181662 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.httpGet.port: 9099.000000 | |
I0306 13:52:14.181679 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.initialDelaySeconds: 10.000000 | |
I0306 13:52:14.181690 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].livenessProbe.periodSeconds: 10.000000 | |
I0306 13:52:14.181708 10006 visitor.go:40] float64 value at spec.template.spec.containers.[0].readinessProbe.periodSeconds: 10.000000 | |
I0306 13:52:14.181722 10006 visitor.go:35] string value at spec.template.spec.containers.[0].securityContext.privileged: true | |
I0306 13:52:14.181736 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true | |
I0306 13:52:14.181747 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: false | |
I0306 13:52:14.181760 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[2].readOnly: false | |
I0306 13:52:14.181773 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[3].readOnly: true | |
I0306 13:52:14.181784 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[4].readOnly: true | |
I0306 13:52:14.181851 10006 images.go:59] Consider image for re-mapping: "calico/node:v3.8.0" | |
I0306 13:52:14.181899 10006 images.go:59] Consider image for re-mapping: "calico/cni:v3.8.0" | |
I0306 13:52:14.181930 10006 visitor.go:35] string value at spec.template.spec.containers.[1].volumeMounts.[2].readOnly: true | |
I0306 13:52:14.181944 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.181958 10006 visitor.go:35] string value at spec.template.spec.initContainers.[0].volumeMounts.[0].readOnly: true | |
I0306 13:52:14.181971 10006 visitor.go:35] string value at spec.template.spec.initContainers.[0].volumeMounts.[1].readOnly: true | |
I0306 13:52:14.182009 10006 images.go:59] Consider image for re-mapping: "calico/upgrade:v1.0.5" | |
I0306 13:52:14.182026 10006 visitor.go:40] float64 value at spec.template.spec.terminationGracePeriodSeconds: 0.000000 | |
I0306 13:52:14.182041 10006 visitor.go:40] float64 value at spec.updateStrategy.rollingUpdate.maxUnavailable: 1.000000 | |
I0306 13:52:14.187310 10006 visitor.go:40] float64 value at spec.replicas: 1.000000 | |
I0306 13:52:14.187386 10006 images.go:59] Consider image for re-mapping: "calico/kube-controllers:v3.8.0" | |
I0306 13:52:14.187421 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true | |
I0306 13:52:14.187432 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.187472 10006 images.go:59] Consider image for re-mapping: "calico/upgrade:v1.0.5" | |
I0306 13:52:14.187498 10006 visitor.go:35] string value at spec.template.spec.initContainers.[0].volumeMounts.[0].readOnly: true | |
I0306 13:52:14.187510 10006 visitor.go:35] string value at spec.template.spec.initContainers.[0].volumeMounts.[1].readOnly: true | |
I0306 13:52:14.192778 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[0].readOnly: true | |
I0306 13:52:14.192840 10006 visitor.go:35] string value at spec.template.spec.containers.[0].volumeMounts.[1].readOnly: true | |
I0306 13:52:14.192924 10006 images.go:59] Consider image for re-mapping: "calico/upgrade:v1.0.5" | |
I0306 13:52:14.192950 10006 visitor.go:35] string value at spec.template.spec.hostNetwork: true | |
I0306 13:52:14.195713 10006 bootstrapchannelbuilder.go:76] Manifest apiVersion: v1 | |
data: | |
calico_backend: bird | |
cni_network_config: |- | |
{ | |
"name": "k8s-pod-network", | |
"cniVersion": "0.3.1", | |
"plugins": [ | |
{ | |
"type": "calico", | |
"etcd_endpoints": "__ETCD_ENDPOINTS__", | |
"etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem", | |
"etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem", | |
"etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem", | |
"etcd_scheme": "https", | |
"log_level": "info", | |
"ipam": { | |
"type": "calico-ipam" | |
}, | |
"policy": { | |
"type": "k8s" | |
}, | |
"kubernetes": { | |
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" | |
} | |
}, | |
{ | |
"type": "portmap", | |
"snat": true, | |
"capabilities": {"portMappings": true} | |
} | |
] | |
} | |
etcd_endpoints: https://etcd-1.internal.test1.dev.fra1.do.services.example.com:4001 | |
kind: ConfigMap | |
metadata: | |
name: calico-config | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- pods | |
- nodes | |
- namespaces | |
verbs: | |
- get | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: calico-node | |
subjects: | |
- kind: ServiceAccount | |
name: calico-node | |
namespace: kube-system | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
rules: | |
- apiGroups: | |
- "" | |
- extensions | |
resources: | |
- pods | |
- namespaces | |
- networkpolicies | |
- nodes | |
verbs: | |
- watch | |
- list | |
- apiGroups: | |
- networking.k8s.io | |
resources: | |
- networkpolicies | |
verbs: | |
- watch | |
- list | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: calico-kube-controllers | |
subjects: | |
- kind: ServiceAccount | |
name: calico-kube-controllers | |
namespace: kube-system | |
--- | |
apiVersion: extensions/v1beta1 | |
kind: DaemonSet | |
metadata: | |
labels: | |
k8s-app: calico-node | |
role.kubernetes.io/networking: "1" | |
name: calico-node | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: calico-node | |
template: | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: calico-node | |
role.kubernetes.io/networking: "1" | |
spec: | |
containers: | |
- env: | |
- name: ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
- name: CALICO_NETWORKING_BACKEND | |
valueFrom: | |
configMapKeyRef: | |
key: calico_backend | |
name: calico-config | |
- name: CLUSTER_TYPE | |
value: kops,bgp | |
- name: CALICO_DISABLE_FILE_LOGGING | |
value: "true" | |
- name: CALICO_K8S_NODE_REF | |
valueFrom: | |
fieldRef: | |
fieldPath: spec.nodeName | |
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION | |
value: ACCEPT | |
- name: CALICO_IPV4POOL_CIDR | |
value: 100.96.0.0/11 | |
- name: CALICO_IPV4POOL_IPIP | |
value: always | |
- name: FELIX_IPV6SUPPORT | |
value: "false" | |
- name: FELIX_LOGSEVERITYSCREEN | |
value: info | |
- name: FELIX_PROMETHEUSMETRICSENABLED | |
value: "false" | |
- name: FELIX_PROMETHEUSMETRICSPORT | |
value: "9091" | |
- name: FELIX_PROMETHEUSGOMETRICSENABLED | |
value: "true" | |
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED | |
value: "true" | |
- name: IP | |
value: autodetect | |
- name: FELIX_HEALTHENABLED | |
value: "true" | |
image: calico/node:v3.8.0 | |
livenessProbe: | |
failureThreshold: 6 | |
httpGet: | |
host: localhost | |
path: /liveness | |
port: 9099 | |
initialDelaySeconds: 10 | |
periodSeconds: 10 | |
name: calico-node | |
readinessProbe: | |
exec: | |
command: | |
- /bin/calico-node | |
- -bird-ready | |
- -felix-ready | |
periodSeconds: 10 | |
resources: | |
requests: | |
cpu: 10m | |
securityContext: | |
privileged: true | |
volumeMounts: | |
- mountPath: /lib/modules | |
name: lib-modules | |
readOnly: true | |
- mountPath: /var/run/calico | |
name: var-run-calico | |
readOnly: false | |
- mountPath: /var/lib/calico | |
name: var-lib-calico | |
readOnly: false | |
- mountPath: /etc/hosts | |
name: etc-hosts | |
readOnly: true | |
- mountPath: /certs | |
name: calico | |
readOnly: true | |
- command: | |
- /install-cni.sh | |
env: | |
- name: CNI_CONF_NAME | |
value: 10-calico.conflist | |
- name: ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: CNI_NETWORK_CONFIG | |
valueFrom: | |
configMapKeyRef: | |
key: cni_network_config | |
name: calico-config | |
image: calico/cni:v3.8.0 | |
name: install-cni | |
resources: | |
requests: | |
cpu: 10m | |
volumeMounts: | |
- mountPath: /host/opt/cni/bin | |
name: cni-bin-dir | |
- mountPath: /host/etc/cni/net.d | |
name: cni-net-dir | |
- mountPath: /etc/hosts | |
name: etc-hosts | |
readOnly: true | |
hostNetwork: true | |
initContainers: | |
- command: | |
- /bin/sh | |
- -c | |
- /node-init-container.sh | |
env: | |
- name: CALICO_ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: CALICO_APIV1_DATASTORE_TYPE | |
value: etcdv2 | |
- name: CALICO_APIV1_ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: CALICO_ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: CALICO_ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: CALICO_ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
- name: CALICO_APIV1_ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: CALICO_APIV1_ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: CALICO_APIV1_ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
image: calico/upgrade:v1.0.5 | |
name: migrate | |
volumeMounts: | |
- mountPath: /etc/hosts | |
name: etc-hosts | |
readOnly: true | |
- mountPath: /certs | |
name: calico | |
readOnly: true | |
serviceAccountName: calico-node | |
terminationGracePeriodSeconds: 0 | |
tolerations: | |
- effect: NoSchedule | |
operator: Exists | |
- key: CriticalAddonsOnly | |
operator: Exists | |
- effect: NoExecute | |
operator: Exists | |
volumes: | |
- hostPath: | |
path: /lib/modules | |
name: lib-modules | |
- hostPath: | |
path: /var/run/calico | |
name: var-run-calico | |
- hostPath: | |
path: /var/lib/calico | |
name: var-lib-calico | |
- hostPath: | |
path: /opt/cni/bin | |
name: cni-bin-dir | |
- hostPath: | |
path: /etc/cni/net.d | |
name: cni-net-dir | |
- hostPath: | |
path: /etc/hosts | |
name: etc-hosts | |
- hostPath: | |
path: /srv/kubernetes/calico | |
name: calico | |
updateStrategy: | |
rollingUpdate: | |
maxUnavailable: 1 | |
type: RollingUpdate | |
--- | |
apiVersion: extensions/v1beta1 | |
kind: Deployment | |
metadata: | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: calico-kube-controllers | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
namespace: kube-system | |
spec: | |
replicas: 1 | |
strategy: | |
type: Recreate | |
template: | |
metadata: | |
labels: | |
k8s-app: calico-kube-controllers | |
role.kubernetes.io/networking: "1" | |
name: calico-kube-controllers | |
namespace: kube-system | |
spec: | |
containers: | |
- env: | |
- name: ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: ENABLED_CONTROLLERS | |
value: policy,profile,workloadendpoint,node | |
- name: ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
image: calico/kube-controllers:v3.8.0 | |
name: calico-kube-controllers | |
readinessProbe: | |
exec: | |
command: | |
- /usr/bin/check-status | |
- -r | |
resources: | |
requests: | |
cpu: 10m | |
volumeMounts: | |
- mountPath: /certs | |
name: calico | |
readOnly: true | |
hostNetwork: true | |
initContainers: | |
- command: | |
- /bin/sh | |
- -c | |
- /controller-init.sh | |
env: | |
- name: CALICO_ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: CALICO_APIV1_DATASTORE_TYPE | |
value: etcdv2 | |
- name: CALICO_APIV1_ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: CALICO_ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: CALICO_ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: CALICO_ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
- name: CALICO_APIV1_ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: CALICO_APIV1_ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: CALICO_APIV1_ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
image: calico/upgrade:v1.0.5 | |
name: migrate | |
volumeMounts: | |
- mountPath: /etc/hosts | |
name: etc-hosts | |
readOnly: true | |
- mountPath: /certs | |
name: calico | |
readOnly: true | |
serviceAccountName: calico-kube-controllers | |
tolerations: | |
- key: CriticalAddonsOnly | |
operator: Exists | |
- effect: NoSchedule | |
key: node-role.kubernetes.io/master | |
volumes: | |
- hostPath: | |
path: /etc/hosts | |
name: etc-hosts | |
- hostPath: | |
path: /srv/kubernetes/calico | |
name: calico | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-upgrade-job | |
namespace: kube-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRole | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-upgrade-job | |
rules: | |
- apiGroups: | |
- extensions | |
resources: | |
- daemonsets | |
- daemonsets/status | |
verbs: | |
- get | |
- list | |
- watch | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRoleBinding | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-upgrade-job | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: calico-upgrade-job | |
subjects: | |
- kind: ServiceAccount | |
name: calico-upgrade-job | |
namespace: kube-system | |
--- | |
apiVersion: batch/v1 | |
kind: Job | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
name: calico-complete-upgrade-v331 | |
namespace: kube-system | |
spec: | |
template: | |
metadata: | |
labels: | |
role.kubernetes.io/networking: "1" | |
spec: | |
containers: | |
- command: | |
- /bin/sh | |
- -c | |
- /completion-job.sh | |
env: | |
- name: EXPECTED_NODE_IMAGE | |
value: quay.io/calico/node:v3.7.4 | |
- name: CALICO_ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: CALICO_APIV1_DATASTORE_TYPE | |
value: etcdv2 | |
- name: CALICO_APIV1_ETCD_ENDPOINTS | |
valueFrom: | |
configMapKeyRef: | |
key: etcd_endpoints | |
name: calico-config | |
- name: CALICO_ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: CALICO_ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: CALICO_ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
- name: CALICO_APIV1_ETCD_CERT_FILE | |
value: /certs/calico-client.pem | |
- name: CALICO_APIV1_ETCD_KEY_FILE | |
value: /certs/calico-client-key.pem | |
- name: CALICO_APIV1_ETCD_CA_CERT_FILE | |
value: /certs/ca.pem | |
image: calico/upgrade:v1.0.5 | |
name: migrate-completion | |
volumeMounts: | |
- mountPath: /etc/hosts | |
name: etc-hosts | |
readOnly: true | |
- mountPath: /certs | |
name: calico | |
readOnly: true | |
hostNetwork: true | |
restartPolicy: OnFailure | |
serviceAccountName: calico-upgrade-job | |
volumes: | |
- hostPath: | |
path: /etc/hosts | |
name: etc-hosts | |
- hostPath: | |
path: /srv/kubernetes/calico | |
name: calico | |
I0306 13:52:14.195888 10006 bootstrapchannelbuilder.go:79] hash 5ced6edba94381bdb4c014b07eb645533c8bd4e0 | |
I0306 13:52:14.199302 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199344 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199357 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199369 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199380 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199391 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199404 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199416 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199428 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199439 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199450 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199471 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.199493 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199512 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199527 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199538 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199549 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199560 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199572 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199583 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199594 10006 task.go:103] testing task "Secret" | |
I0306 13:52:14.199625 10006 task.go:103] testing task "MirrorSecrets" | |
I0306 13:52:14.199652 10006 task.go:103] testing task "MirrorKeystore" | |
I0306 13:52:14.214276 10006 build_flags.go:50] ignoring non-field: | |
I0306 13:52:14.214347 10006 build_flags.go:50] ignoring non-field: | |
I0306 13:52:14.214458 10006 proxy.go:30] proxies is == nil, returning empty list | |
I0306 13:52:14.227951 10006 task.go:103] testing task "ManagedFile" | |
I0306 13:52:14.228085 10006 task.go:103] testing task "ManagedFile" | |
I0306 13:52:14.228105 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.228119 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.228131 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.229097 10006 build_flags.go:50] ignoring non-field: | |
I0306 13:52:14.229165 10006 build_flags.go:50] ignoring non-field: | |
I0306 13:52:14.229285 10006 proxy.go:30] proxies is == nil, returning empty list | |
I0306 13:52:14.231283 10006 task.go:103] testing task "ManagedFile" | |
I0306 13:52:14.231353 10006 task.go:103] testing task "ManagedFile" | |
I0306 13:52:14.231370 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.231384 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.231397 10006 task.go:103] testing task "Keypair" | |
I0306 13:52:14.231426 10006 task.go:75] EnsureTask ignoring identical | |
I0306 13:52:14.231464 10006 task.go:103] testing task "Volume" | |
I0306 13:52:14.231488 10006 task.go:103] testing task "Volume" | |
I0306 13:52:14.231730 10006 task.go:103] testing task "Droplet" | |
I0306 13:52:14.231865 10006 task.go:103] testing task "Droplet" | |
I0306 13:52:14.236763 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/cluster.spec" | |
I0306 13:52:14.236814 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/cluster.spec" SSE="-" ACL="" | |
I0306 13:52:14.338434 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" | |
I0306 13:52:14.429416 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" | |
I0306 13:52:14.513585 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" | |
I0306 13:52:14.513635 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" SSE="-" ACL="" | |
I0306 13:52:14.649793 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" | |
I0306 13:52:14.649844 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/instancegroup/master-fra1" SSE="-" ACL="" | |
I0306 13:52:14.758099 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes" | |
I0306 13:52:14.840703 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes" | |
I0306 13:52:14.920122 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes" | |
I0306 13:52:14.920172 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/instancegroup/nodes" SSE="-" ACL="" | |
I0306 13:52:15.026569 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/instancegroup/nodes" | |
I0306 13:52:15.026621 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/instancegroup/nodes" SSE="-" ACL="" | |
I0306 13:52:15.131458 10006 topological_sort.go:64] Dependencies: | |
I0306 13:52:15.131489 10006 topological_sort.go:66] Keypair/etcd-manager-ca-events: [] | |
I0306 13:52:15.131504 10006 topological_sort.go:66] Keypair/kube-controller-manager: [Keypair/ca] | |
I0306 13:52:15.131546 10006 topological_sort.go:66] Secret/kube-proxy: [] | |
I0306 13:52:15.131558 10006 topological_sort.go:66] Secret/kube: [] | |
I0306 13:52:15.131605 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-k8s-1.6: [] | |
I0306 13:52:15.131622 10006 topological_sort.go:66] Keypair/apiserver-aggregator: [Keypair/apiserver-aggregator-ca] | |
I0306 13:52:15.131633 10006 topological_sort.go:66] ManagedFile/etcd-cluster-spec-main: [] | |
I0306 13:52:15.131644 10006 topological_sort.go:66] Keypair/etcd-manager-ca-main: [] | |
I0306 13:52:15.131654 10006 topological_sort.go:66] MirrorSecrets/mirror-secrets: [Secret/system:logging Secret/kube Secret/admin Secret/system:monitoring Secret/system:scheduler Secret/kubelet Secret/system:dns Secret/system:controller_manager Secret/kube-proxy] | |
I0306 13:52:15.131670 10006 topological_sort.go:66] Keypair/etcd-clients-ca: [] | |
I0306 13:52:15.131684 10006 topological_sort.go:66] Keypair/kops: [Keypair/ca] | |
I0306 13:52:15.131695 10006 topological_sort.go:66] Secret/system:logging: [] | |
I0306 13:52:15.131704 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-k8s-1.6: [] | |
I0306 13:52:15.131714 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-pre-k8s-1.6: [] | |
I0306 13:52:15.131724 10006 topological_sort.go:66] Keypair/kube-scheduler: [Keypair/ca] | |
I0306 13:52:15.131734 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-pre-k8s-1.6: [] | |
I0306 13:52:15.131745 10006 topological_sort.go:66] Keypair/etcd-peers-ca-events: [] | |
I0306 13:52:15.131754 10006 topological_sort.go:66] Secret/system:dns: [] | |
I0306 13:52:15.131764 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-limit-range.addons.k8s.io: [] | |
I0306 13:52:15.131774 10006 topological_sort.go:66] Keypair/apiserver-proxy-client: [Keypair/ca] | |
I0306 13:52:15.131785 10006 topological_sort.go:66] Keypair/kubecfg: [Keypair/ca] | |
I0306 13:52:15.131795 10006 topological_sort.go:66] Keypair/ca: [] | |
I0306 13:52:15.131805 10006 topological_sort.go:66] Keypair/kube-proxy: [Keypair/ca] | |
I0306 13:52:15.131815 10006 topological_sort.go:66] Secret/system:scheduler: [] | |
I0306 13:52:15.131825 10006 topological_sort.go:66] MirrorKeystore/mirror-keystore: [Secret/system:scheduler Secret/system:dns Secret/kubelet Secret/system:controller_manager Secret/kube-proxy Secret/kube Secret/admin Secret/system:logging Secret/system:monitoring] | |
I0306 13:52:15.131843 10006 topological_sort.go:66] Keypair/apiserver-aggregator-ca: [] | |
I0306 13:52:15.131856 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-networking.projectcalico.org-k8s-1.7-v3: [] | |
I0306 13:52:15.131866 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-bootstrap: [] | |
I0306 13:52:15.131876 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-digitalocean-cloud-controller.addons.k8s.io-k8s-1.8: [] | |
I0306 13:52:15.131885 10006 topological_sort.go:66] ManagedFile/etcd-cluster-spec-events: [] | |
I0306 13:52:15.131896 10006 topological_sort.go:66] Secret/system:monitoring: [] | |
I0306 13:52:15.131906 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9: [] | |
I0306 13:52:15.131916 10006 topological_sort.go:66] Secret/kubelet: [] | |
I0306 13:52:15.131926 10006 topological_sort.go:66] Droplet/master-fra1.masters.test1.dev.fra1.do.services.example.com: [] | |
I0306 13:52:15.131935 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-k8s-1.12: [] | |
I0306 13:52:15.131945 10006 topological_sort.go:66] Droplet/nodes.test1.dev.fra1.do.services.example.com: [] | |
I0306 13:52:15.131955 10006 topological_sort.go:66] Keypair/etcd-peers-ca-main: [] | |
I0306 13:52:15.131964 10006 topological_sort.go:66] ManagedFile/manifests-etcdmanager-main: [] | |
I0306 13:52:15.131974 10006 topological_sort.go:66] Secret/admin: [] | |
I0306 13:52:15.131984 10006 topological_sort.go:66] Keypair/master: [Keypair/ca] | |
I0306 13:52:15.131995 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-networking.projectcalico.org-k8s-1.12: [] | |
I0306 13:52:15.132004 10006 topological_sort.go:66] Keypair/kubelet: [Keypair/ca] | |
I0306 13:52:15.132015 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12: [] | |
I0306 13:52:15.132025 10006 topological_sort.go:66] Keypair/kubelet-api: [Keypair/ca] | |
I0306 13:52:15.132035 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-core.addons.k8s.io: [] | |
I0306 13:52:15.132045 10006 topological_sort.go:66] ManagedFile/manifests-etcdmanager-events: [] | |
I0306 13:52:15.132055 10006 topological_sort.go:66] Secret/system:controller_manager: [] | |
I0306 13:52:15.132065 10006 topological_sort.go:66] test1.dev.fra1.do.services.example.com-addons-rbac.addons.k8s.io-k8s-1.8: [] | |
I0306 13:52:15.132074 10006 topological_sort.go:66] Volume/kops-1-etcd-main-test1-dev-fra1-do-services-example-com: [] | |
I0306 13:52:15.132084 10006 topological_sort.go:66] Volume/kops-1-etcd-events-test1-dev-fra1-do-services-example-com: [] | |
I0306 13:52:15.132204 10006 executor.go:103] Tasks: 0 done / 50 total; 38 can run | |
I0306 13:52:15.132310 10006 executor.go:178] Executing task "Secret/kubelet": *fitasks.Secret {"Name":"kubelet","Lifecycle":"Sync"} | |
I0306 13:52:15.132422 10006 executor.go:178] Executing task "Keypair/etcd-manager-ca-main": *fitasks.Keypair {"Name":"etcd-manager-ca-main","alternateNames":null,"alternateNameTasks":null,"Lifecycle":null,"Signer":null,"subject":"cn=etcd-manager-ca-main","type":"ca","format":"v1alpha2"} | |
I0306 13:52:15.132632 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kubelet" | |
I0306 13:52:15.132335 10006 executor.go:178] Executing task "ManagedFile/manifests-etcdmanager-events": *fitasks.ManagedFile {"Name":"manifests-etcdmanager-events","Lifecycle":"Sync","Location":"manifests/etcd/events.yaml","Contents":{"Name":"","Resource":"apiVersion: v1\nkind: Pod\nmetadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n creationTimestamp: null\n labels:\n k8s-app: etcd-manager-events\n name: etcd-manager-events\n namespace: kube-system\nspec:\n containers:\n - command:\n - /bin/sh\n - -c\n - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log \u003c /tmp/pipe \u0026 ) ; exec /etcd-manager\n --backup-store=do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/events\n --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true\n --dns-suffix=.internal.test1.dev.fra1.do.services.example.com --etcd-insecure=false\n --grpc-port=3997 --insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995\n --v=6 --volume-name-tag=etcdCluster-events --volume-provider=do --volume-tag=KubernetesCluster=test1-dev-fra1-do-services-example-com\n --volume-tag=k8s-index \u003e /tmp/pipe 2\u003e\u00261\n env:\n - name: DIGITALOCEAN_ACCESS_TOKEN\n value: a9828e09a9e2bbc9ccc1aa5f2f34556bb1b740c6ae0d015e23635ed1f4dbce38\n - name: S3_ACCESS_KEY_ID\n value: 626COYMFSWVJD7CEDB5M\n - name: S3_ENDPOINT\n value: https://fra1.digitaloceanspaces.com\n - name: S3_SECRET_ACCESS_KEY\n value: 3nw4SmWyR/ZNxgmg4C4XjDMaVrNOQo65lsZlMhEKjqY\n image: kopeio/etcd-manager:3.0.20190930\n name: etcd-manager\n resources:\n requests:\n cpu: 100m\n memory: 100Mi\n securityContext:\n privileged: true\n volumeMounts:\n - mountPath: /rootfs\n name: rootfs\n - mountPath: /etc/hosts\n name: hosts\n - mountPath: /etc/kubernetes/pki/etcd-manager\n name: pki\n - mountPath: /var/log/etcd.log\n name: varlogetcd\n hostNetwork: true\n hostPID: true\n priorityClassName: system-cluster-critical\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n volumes:\n - hostPath:\n path: /\n type: Directory\n name: rootfs\n - hostPath:\n path: /etc/hosts\n type: File\n name: hosts\n - hostPath:\n path: /etc/kubernetes/pki/etcd-manager-events\n type: DirectoryOrCreate\n name: pki\n - hostPath:\n path: /var/log/etcd-events.log\n type: FileOrCreate\n name: varlogetcd\nstatus: {}\n"}} | |
I0306 13:52:15.132506 10006 executor.go:178] Executing task "Droplet/master-fra1.masters.test1.dev.fra1.do.services.example.com": *dotasks.Droplet {"Name":"master-fra1.masters.test1.dev.fra1.do.services.example.com","Lifecycle":null,"Region":"fra1","Size":"s-2vcpu-2gb","Image":"ubuntu-18-04-x64","SSHKey":"71:09:f8:40:23:27:a1:78:c4:d6:f5:c1:71:05:95:9a","Tags":["KubernetesCluster:test1-dev-fra1-do-services-example-com","k8s-index:1"],"Count":1,"UserData":{"Name":"","Resource":{}}} | |
I0306 13:52:15.132780 10006 executor.go:178] Executing task "Keypair/apiserver-aggregator-ca": *fitasks.Keypair {"Name":"apiserver-aggregator-ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=apiserver-aggregator-ca","type":"ca","format":"v1alpha2"} | |
I0306 13:52:15.132802 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/keyset.yaml" | |
I0306 13:52:15.132885 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:15.132416 10006 executor.go:178] Executing task "Keypair/etcd-clients-ca": *fitasks.Keypair {"Name":"etcd-clients-ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":null,"Signer":null,"subject":"cn=etcd-clients-ca","type":"ca","format":"v1alpha2"} | |
I0306 13:52:15.133145 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-pre-k8s-1.6": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-pre-k8s-1.6","Lifecycle":"Sync","Location":"addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml","Contents":{"Name":"","Resource":"apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n k8s-app: dns-controller\n version: v1.15.2\n name: dns-controller\n namespace: kube-system\nspec:\n replicas: 1\n selector:\n matchLabels:\n k8s-app: dns-controller\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\": \"dedicated\", \"value\":\n \"master\"}]'\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n k8s-app: dns-controller\n version: v1.15.2\n spec:\n containers:\n - command:\n - /usr/bin/dns-controller\n - --watch-ingress=false\n - --dns=digitalocean\n - --zone=test1.dev.fra1.do.services.example.com\n - --zone=*/*\n - -v=2\n image: kope/dns-controller:1.15.2\n name: dns-controller\n resources:\n requests:\n cpu: 50m\n memory: 50Mi\n dnsPolicy: Default\n hostNetwork: true\n nodeSelector:\n kubernetes.io/role: master"}} | |
I0306 13:52:15.133340 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:15.133363 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/keyset.yaml" | |
I0306 13:52:15.133418 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/manifests/etcd/events.yaml" | |
I0306 13:52:15.133770 10006 executor.go:178] Executing task "Secret/system:logging": *fitasks.Secret {"Name":"system:logging","Lifecycle":"Sync"} | |
I0306 13:52:15.133834 10006 executor.go:178] Executing task "Keypair/etcd-peers-ca-events": *fitasks.Keypair {"Name":"etcd-peers-ca-events","alternateNames":null,"alternateNameTasks":null,"Lifecycle":null,"Signer":null,"subject":"cn=etcd-peers-ca-events","type":"ca","format":"v1alpha2"} | |
I0306 13:52:15.133860 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:logging" | |
I0306 13:52:15.133893 10006 executor.go:178] Executing task "Secret/admin": *fitasks.Secret {"Name":"admin","Lifecycle":"Sync"} | |
I0306 13:52:15.133938 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/keyset.yaml" | |
I0306 13:52:15.133886 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-core.addons.k8s.io": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-core.addons.k8s.io","Lifecycle":"Sync","Location":"addons/core.addons.k8s.io/v1.4.0.yaml","Contents":{"Name":"","Resource":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: kube-system"}} | |
I0306 13:52:15.133991 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/admin" | |
I0306 13:52:15.134012 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/core.addons.k8s.io/v1.4.0.yaml" | |
I0306 13:52:15.134205 10006 executor.go:178] Executing task "Secret/kube": *fitasks.Secret {"Name":"kube","Lifecycle":"Sync"} | |
I0306 13:52:15.134286 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube" | |
I0306 13:52:15.134283 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-rbac.addons.k8s.io-k8s-1.8": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-rbac.addons.k8s.io-k8s-1.8","Lifecycle":"Sync","Location":"addons/rbac.addons.k8s.io/k8s-1.8.yaml","Contents":{"Name":"","Resource":"apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n addonmanager.kubernetes.io/mode: Reconcile\n k8s-addon: rbac.addons.k8s.io\n kubernetes.io/cluster-service: \"true\"\n name: kubelet-cluster-admin\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:node\nsubjects:\n- apiGroup: rbac.authorization.k8s.io\n kind: User\n name: kubelet"}} | |
I0306 13:52:15.134366 10006 executor.go:178] Executing task "Secret/system:scheduler": *fitasks.Secret {"Name":"system:scheduler","Lifecycle":"Sync"} | |
I0306 13:52:15.134328 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-limit-range.addons.k8s.io": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-limit-range.addons.k8s.io","Lifecycle":"Sync","Location":"addons/limit-range.addons.k8s.io/v1.5.0.yaml","Contents":{"Name":"","Resource":"apiVersion: v1\nkind: LimitRange\nmetadata:\n name: limits\n namespace: default\nspec:\n limits:\n - defaultRequest:\n cpu: 100m\n type: Container"}} | |
I0306 13:52:15.134415 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/rbac.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:15.134455 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:scheduler" | |
I0306 13:52:15.134457 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" | |
I0306 13:52:15.134750 10006 executor.go:178] Executing task "Droplet/nodes.test1.dev.fra1.do.services.example.com": *dotasks.Droplet {"Name":"nodes.test1.dev.fra1.do.services.example.com","Lifecycle":null,"Region":"fra1","Size":"s-2vcpu-4gb","Image":"ubuntu-18-04-x64","SSHKey":"71:09:f8:40:23:27:a1:78:c4:d6:f5:c1:71:05:95:9a","Tags":["KubernetesCluster:test1-dev-fra1-do-services-example-com"],"Count":1,"UserData":{"Name":"","Resource":{}}} | |
I0306 13:52:15.134790 10006 executor.go:178] Executing task "Keypair/ca": *fitasks.Keypair {"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"} | |
I0306 13:52:15.134884 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:15.134789 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12","Lifecycle":"Sync","Location":"addons/dns-controller.addons.k8s.io/k8s-1.12.yaml","Contents":{"Name":"","Resource":"apiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n k8s-app: dns-controller\n version: v1.15.2\n name: dns-controller\n namespace: kube-system\nspec:\n replicas: 1\n selector:\n matchLabels:\n k8s-app: dns-controller\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n k8s-app: dns-controller\n version: v1.15.2\n spec:\n containers:\n - command:\n - /usr/bin/dns-controller\n - --watch-ingress=false\n - --dns=digitalocean\n - --zone=test1.dev.fra1.do.services.example.com\n - --zone=*/*\n - -v=2\n env:\n - name: DIGITALOCEAN_ACCESS_TOKEN\n valueFrom:\n secretKeyRef:\n key: access-token\n name: digitalocean\n image: kope/dns-controller:1.15.2\n name: dns-controller\n resources:\n requests:\n cpu: 50m\n memory: 50Mi\n dnsPolicy: Default\n hostNetwork: true\n nodeSelector:\n node-role.kubernetes.io/master: \"\"\n serviceAccount: dns-controller\n tolerations:\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n name: dns-controller\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n name: kops:dns-controller\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - ingress\n - nodes\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - extensions\n resources:\n - ingresses\n verbs:\n - get\n - list\n - watch\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n name: kops:dns-controller\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: kops:dns-controller\nsubjects:\n- apiGroup: rbac.authorization.k8s.io\n kind: User\n name: system:serviceaccount:kube-system:dns-controller"}} | |
I0306 13:52:15.135024 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:15.135262 10006 executor.go:178] Executing task "Keypair/etcd-peers-ca-main": *fitasks.Keypair {"Name":"etcd-peers-ca-main","alternateNames":null,"alternateNameTasks":null,"Lifecycle":null,"Signer":null,"subject":"cn=etcd-peers-ca-main","type":"ca","format":"v1alpha2"} | |
I0306 13:52:15.135347 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/keyset.yaml" | |
I0306 13:52:15.135357 10006 executor.go:178] Executing task "Secret/system:controller_manager": *fitasks.Secret {"Name":"system:controller_manager","Lifecycle":"Sync"} | |
I0306 13:52:15.134604 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-networking.projectcalico.org-k8s-1.7-v3": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-networking.projectcalico.org-k8s-1.7-v3","Lifecycle":"Sync","Location":"addons/networking.projectcalico.org/k8s-1.7-v3.yaml","Contents":{"Name":"","Resource":"apiVersion: v1\ndata:\n calico_backend: bird\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"etcd_endpoints\": \"__ETCD_ENDPOINTS__\",\n \"etcd_ca_cert_file\": \"/srv/kubernetes/calico/ca.pem\",\n \"etcd_cert_file\": \"/srv/kubernetes/calico/calico-client.pem\",\n \"etcd_key_file\": \"/srv/kubernetes/calico/calico-client-key.pem\",\n \"etcd_scheme\": \"https\",\n \"log_level\": \"info\",\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"/etc/cni/net.d/__KUBECONFIG_FILENAME__\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n etcd_endpoints: https://etcd-1.internal.test1.dev.fra1.do.services.example.com:4001\nkind: ConfigMap\nmetadata:\n name: calico-config\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-node\nrules:\n- apiGroups:\n - \"\"\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-node\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\nrules:\n- apiGroups:\n - \"\"\n - extensions\n resources:\n - pods\n - namespaces\n - networkpolicies\n - nodes\n verbs:\n - watch\n - list\n- apiGroups:\n - networking.k8s.io\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n name: calico-kube-controllers\n namespace: kube-system\n\n---\n\napiVersion: extensions/v1beta1\nkind: DaemonSet\nmetadata:\n labels:\n k8s-app: calico-node\n role.kubernetes.io/networking: \"1\"\n name: calico-node\n namespace: kube-system\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-app: calico-node\n role.kubernetes.io/networking: \"1\"\n spec:\n containers:\n - env:\n - name: ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n key: calico_backend\n name: calico-config\n - name: CLUSTER_TYPE\n value: kops,bgp\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n - name: CALICO_K8S_NODE_REF\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: ACCEPT\n - name: CALICO_IPV4POOL_CIDR\n value: 100.96.0.0/11\n - name: CALICO_IPV4POOL_IPIP\n value: always\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: info\n - name: FELIX_PROMETHEUSMETRICSENABLED\n value: \"false\"\n - name: FELIX_PROMETHEUSMETRICSPORT\n value: \"9091\"\n - name: FELIX_PROMETHEUSGOMETRICSENABLED\n value: \"true\"\n - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED\n value: \"true\"\n - name: IP\n value: autodetect\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n image: calico/node:v3.8.0\n livenessProbe:\n failureThreshold: 6\n httpGet:\n host: localhost\n path: /liveness\n port: 9099\n initialDelaySeconds: 10\n periodSeconds: 10\n name: calico-node\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -bird-ready\n - -felix-ready\n periodSeconds: 10\n resources:\n requests:\n cpu: 10m\n securityContext:\n privileged: true\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - mountPath: /etc/hosts\n name: etc-hosts\n readOnly: true\n - mountPath: /certs\n name: calico\n readOnly: true\n - command:\n - /install-cni.sh\n env:\n - name: CNI_CONF_NAME\n value: 10-calico.conflist\n - name: ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n key: cni_network_config\n name: calico-config\n image: calico/cni:v3.8.0\n name: install-cni\n resources:\n requests:\n cpu: 10m\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n - mountPath: /etc/hosts\n name: etc-hosts\n readOnly: true\n hostNetwork: true\n initContainers:\n - command:\n - /bin/sh\n - -c\n - /node-init-container.sh\n env:\n - name: CALICO_ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: CALICO_APIV1_DATASTORE_TYPE\n value: etcdv2\n - name: CALICO_APIV1_ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: CALICO_ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: CALICO_ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: CALICO_ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n - name: CALICO_APIV1_ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: CALICO_APIV1_ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: CALICO_APIV1_ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n image: calico/upgrade:v1.0.5\n name: migrate\n volumeMounts:\n - mountPath: /etc/hosts\n name: etc-hosts\n readOnly: true\n - mountPath: /certs\n name: calico\n readOnly: true\n serviceAccountName: calico-node\n terminationGracePeriodSeconds: 0\n tolerations:\n - effect: NoSchedule\n operator: Exists\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n volumes:\n - hostPath:\n path: /lib/modules\n name: lib-modules\n - hostPath:\n path: /var/run/calico\n name: var-run-calico\n - hostPath:\n path: /var/lib/calico\n name: var-lib-calico\n - hostPath:\n path: /opt/cni/bin\n name: cni-bin-dir\n - hostPath:\n path: /etc/cni/net.d\n name: cni-net-dir\n - hostPath:\n path: /etc/hosts\n name: etc-hosts\n - hostPath:\n path: /srv/kubernetes/calico\n name: calico\n updateStrategy:\n rollingUpdate:\n maxUnavailable: 1\n type: RollingUpdate\n\n---\n\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-app: calico-kube-controllers\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\n namespace: kube-system\nspec:\n replicas: 1\n strategy:\n type: Recreate\n template:\n metadata:\n labels:\n k8s-app: calico-kube-controllers\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\n namespace: kube-system\n spec:\n containers:\n - env:\n - name: ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: ENABLED_CONTROLLERS\n value: policy,profile,workloadendpoint,node\n - name: ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n image: calico/kube-controllers:v3.8.0\n name: calico-kube-controllers\n readinessProbe:\n exec:\n command:\n - /usr/bin/check-status\n - -r\n resources:\n requests:\n cpu: 10m\n volumeMounts:\n - mountPath: /certs\n name: calico\n readOnly: true\n hostNetwork: true\n initContainers:\n - command:\n - /bin/sh\n - -c\n - /controller-init.sh\n env:\n - name: CALICO_ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: CALICO_APIV1_DATASTORE_TYPE\n value: etcdv2\n - name: CALICO_APIV1_ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: CALICO_ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: CALICO_ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: CALICO_ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n - name: CALICO_APIV1_ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: CALICO_APIV1_ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: CALICO_APIV1_ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n image: calico/upgrade:v1.0.5\n name: migrate\n volumeMounts:\n - mountPath: /etc/hosts\n name: etc-hosts\n readOnly: true\n - mountPath: /certs\n name: calico\n readOnly: true\n serviceAccountName: calico-kube-controllers\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n volumes:\n - hostPath:\n path: /etc/hosts\n name: etc-hosts\n - hostPath:\n path: /srv/kubernetes/calico\n name: calico\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-upgrade-job\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-upgrade-job\nrules:\n- apiGroups:\n - extensions\n resources:\n - daemonsets\n - daemonsets/status\n verbs:\n - get\n - list\n - watch\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-upgrade-job\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-upgrade-job\nsubjects:\n- kind: ServiceAccount\n name: calico-upgrade-job\n namespace: kube-system\n\n---\n\napiVersion: batch/v1\nkind: Job\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-complete-upgrade-v331\n namespace: kube-system\nspec:\n template:\n metadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n spec:\n containers:\n - command:\n - /bin/sh\n - -c\n - /completion-job.sh\n env:\n - name: EXPECTED_NODE_IMAGE\n value: quay.io/calico/node:v3.7.4\n - name: CALICO_ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: CALICO_APIV1_DATASTORE_TYPE\n value: etcdv2\n - name: CALICO_APIV1_ETCD_ENDPOINTS\n valueFrom:\n configMapKeyRef:\n key: etcd_endpoints\n name: calico-config\n - name: CALICO_ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: CALICO_ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: CALICO_ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n - name: CALICO_APIV1_ETCD_CERT_FILE\n value: /certs/calico-client.pem\n - name: CALICO_APIV1_ETCD_KEY_FILE\n value: /certs/calico-client-key.pem\n - name: CALICO_APIV1_ETCD_CA_CERT_FILE\n value: /certs/ca.pem\n image: calico/upgrade:v1.0.5\n name: migrate-completion\n volumeMounts:\n - mountPath: /etc/hosts\n name: etc-hosts\n readOnly: true\n - mountPath: /certs\n name: calico\n readOnly: true\n hostNetwork: true\n restartPolicy: OnFailure\n serviceAccountName: calico-upgrade-job\n volumes:\n - hostPath:\n path: /etc/hosts\n name: etc-hosts\n - hostPath:\n path: /srv/kubernetes/calico\n name: calico"}} | |
I0306 13:52:15.135440 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:controller_manager" | |
I0306 13:52:15.135467 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/networking.projectcalico.org/k8s-1.7-v3.yaml" | |
I0306 13:52:15.135707 10006 executor.go:178] Executing task "Keypair/etcd-manager-ca-events": *fitasks.Keypair {"Name":"etcd-manager-ca-events","alternateNames":null,"alternateNameTasks":null,"Lifecycle":null,"Signer":null,"subject":"cn=etcd-manager-ca-events","type":"ca","format":"v1alpha2"} | |
I0306 13:52:15.135864 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/keyset.yaml" | |
I0306 13:52:15.135821 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-digitalocean-cloud-controller.addons.k8s.io-k8s-1.8": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-digitalocean-cloud-controller.addons.k8s.io-k8s-1.8","Lifecycle":"Sync","Location":"addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml","Contents":{"Name":"","Resource":"apiVersion: v1\nkind: Secret\nmetadata:\n name: digitalocean\n namespace: kube-system\nstringData:\n access-token: a9828e09a9e2bbc9ccc1aa5f2f34556bb1b740c6ae0d015e23635ed1f4dbce38\n\n---\n\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: digitalocean-cloud-controller-manager\n namespace: kube-system\nspec:\n selector:\n matchLabels:\n k8s-app: digitalocean-cloud-controller-manager\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-app: digitalocean-cloud-controller-manager\n spec:\n containers:\n - command:\n - /bin/digitalocean-cloud-controller-manager\n - --leader-elect=true\n env:\n - name: KUBERNETES_SERVICE_HOST\n value: 127.0.0.1\n - name: KUBERNETES_SERVICE_PORT\n value: \"443\"\n - name: DO_ACCESS_TOKEN\n valueFrom:\n secretKeyRef:\n key: access-token\n name: digitalocean\n image: digitalocean/digitalocean-cloud-controller-manager:v0.1.20\n name: digitalocean-cloud-controller-manager\n resources:\n requests:\n cpu: 100m\n memory: 50Mi\n dnsPolicy: Default\n hostNetwork: true\n nodeSelector:\n node-role.kubernetes.io/master: \"\"\n serviceAccountName: cloud-controller-manager\n tolerations:\n - effect: NoSchedule\n key: node.cloudprovider.kubernetes.io/uninitialized\n value: \"true\"\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n - effect: NoExecute\n key: node.kubernetes.io/not-ready\n operator: Exists\n tolerationSeconds: 300\n - effect: NoExecute\n key: node.kubernetes.io/unreachable\n operator: Exists\n tolerationSeconds: 300\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: cloud-controller-manager\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n name: system:cloud-controller-manager\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - create\n - patch\n - update\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - '*'\n- apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n- apiGroups:\n - \"\"\n resources:\n - services\n verbs:\n - list\n - patch\n - update\n - watch\n- apiGroups:\n - \"\"\n resources:\n - services/status\n verbs:\n - list\n - patch\n - update\n - watch\n- apiGroups:\n - \"\"\n resources:\n - serviceaccounts\n verbs:\n - create\n- apiGroups:\n - \"\"\n resources:\n - persistentvolumes\n verbs:\n - get\n - list\n - update\n - watch\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n verbs:\n - create\n - get\n - list\n - watch\n - update\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: system:cloud-controller-manager\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:cloud-controller-manager\nsubjects:\n- kind: ServiceAccount\n name: cloud-controller-manager\n namespace: kube-system"}} | |
I0306 13:52:15.136118 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:15.136356 10006 executor.go:178] Executing task "Secret/kube-proxy": *fitasks.Secret {"Name":"kube-proxy","Lifecycle":"Sync"} | |
I0306 13:52:15.136441 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube-proxy" | |
I0306 13:52:15.139355 10006 executor.go:178] Executing task "Secret/system:monitoring": *fitasks.Secret {"Name":"system:monitoring","Lifecycle":"Sync"} | |
I0306 13:52:15.139477 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:monitoring" | |
I0306 13:52:15.137687 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-k8s-1.6": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-k8s-1.6","Lifecycle":"Sync","Location":"addons/kube-dns.addons.k8s.io/k8s-1.6.yaml","Contents":{"Name":"","Resource":"apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns-autoscaler\n kubernetes.io/cluster-service: \"true\"\n name: kube-dns-autoscaler\n namespace: kube-system\nspec:\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\":\"CriticalAddonsOnly\",\n \"operator\":\"Exists\"}]'\n labels:\n k8s-app: kube-dns-autoscaler\n spec:\n containers:\n - command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=kube-dns-autoscaler\n - --target=Deployment/kube-dns\n - --default-params={\"linear\":{\"coresPerReplica\":256,\"nodesPerReplica\":16,\"preventSinglePointFailure\":true}}\n - --logtostderr=true\n - --v=2\n image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2\n name: autoscaler\n resources:\n requests:\n cpu: 20m\n memory: 10Mi\n serviceAccountName: kube-dns-autoscaler\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n\n---\n\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n name: kube-dns\n namespace: kube-system\nspec:\n selector:\n matchLabels:\n k8s-app: kube-dns\n strategy:\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n template:\n metadata:\n annotations:\n prometheus.io/port: \"10055\"\n prometheus.io/scrape: \"true\"\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\":\"CriticalAddonsOnly\",\n \"operator\":\"Exists\"}]'\n labels:\n k8s-app: kube-dns\n spec:\n containers:\n - args:\n - --config-dir=/kube-dns-config\n - --dns-port=10053\n - --domain=cluster.local.\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthcheck/kubedns\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: kubedns\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n initialDelaySeconds: 3\n timeoutSeconds: 5\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n volumeMounts:\n - mountPath: /kube-dns-config\n name: kube-dns-config\n - args:\n - -v=2\n - -logtostderr\n - -configDir=/etc/k8s/dns/dnsmasq-nanny\n - -restartDnsmasq=true\n - --\n - -k\n - --cache-size=1000\n - --dns-forward-max=150\n - --no-negcache\n - --log-facility=-\n - --server=/cluster.local/127.0.0.1#10053\n - --server=/in-addr.arpa/127.0.0.1#10053\n - --server=/in6.arpa/127.0.0.1#10053\n - --min-port=1024\n image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthcheck/dnsmasq\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: dnsmasq\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n resources:\n requests:\n cpu: 150m\n memory: 20Mi\n volumeMounts:\n - mountPath: /etc/k8s/dns/dnsmasq-nanny\n name: kube-dns-config\n - args:\n - --v=2\n - --logtostderr\n - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A\n - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A\n image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: sidecar\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n cpu: 10m\n memory: 20Mi\n dnsPolicy: Default\n serviceAccountName: kube-dns\n volumes:\n - configMap:\n name: kube-dns\n optional: true\n name: kube-dns-config\n\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: KubeDNS\n name: kube-dns\n namespace: kube-system\nspec:\n clusterIP: 100.64.0.10\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n selector:\n k8s-app: kube-dns\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n name: kube-dns-autoscaler\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n name: kube-dns-autoscaler\nrules:\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n- apiGroups:\n - \"\"\n resources:\n - replicationcontrollers/scale\n verbs:\n - get\n - update\n- apiGroups:\n - extensions\n resources:\n - deployments/scale\n - replicasets/scale\n verbs:\n - get\n - update\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - get\n - create\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n name: kube-dns-autoscaler\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: kube-dns-autoscaler\nsubjects:\n- kind: ServiceAccount\n name: kube-dns-autoscaler\n namespace: kube-system"}} | |
I0306 13:52:15.140145 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:15.136526 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9","Lifecycle":"Sync","Location":"addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml","Contents":{"Name":"","Resource":"apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: kops:system:kubelet-api-admin\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:kubelet-api-admin\nsubjects:\n- apiGroup: rbac.authorization.k8s.io\n kind: User\n name: kubelet-api"}} | |
I0306 13:52:15.140781 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" | |
I0306 13:52:15.142857 10006 executor.go:178] Executing task "ManagedFile/etcd-cluster-spec-events": *fitasks.ManagedFile {"Name":"etcd-cluster-spec-events","Lifecycle":"Sync","Location":"backups/etcd/events/control/etcd-cluster-spec","Contents":{"Name":"","Resource":"{\n \"memberCount\": 1,\n \"etcdVersion\": \"3.2.24\"\n}"}} | |
I0306 13:52:15.143015 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/events/control/etcd-cluster-spec" | |
I0306 13:52:15.144508 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-k8s-1.6": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-dns-controller.addons.k8s.io-k8s-1.6","Lifecycle":"Sync","Location":"addons/dns-controller.addons.k8s.io/k8s-1.6.yaml","Contents":{"Name":"","Resource":"apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n k8s-app: dns-controller\n version: v1.15.2\n name: dns-controller\n namespace: kube-system\nspec:\n replicas: 1\n selector:\n matchLabels:\n k8s-app: dns-controller\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\": \"dedicated\", \"value\":\n \"master\"}]'\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n k8s-app: dns-controller\n version: v1.15.2\n spec:\n containers:\n - command:\n - /usr/bin/dns-controller\n - --watch-ingress=false\n - --dns=digitalocean\n - --zone=test1.dev.fra1.do.services.example.com\n - --zone=*/*\n - -v=2\n env:\n - name: DIGITALOCEAN_ACCESS_TOKEN\n valueFrom:\n secretKeyRef:\n key: access-token\n name: digitalocean\n image: kope/dns-controller:1.15.2\n name: dns-controller\n resources:\n requests:\n cpu: 50m\n memory: 50Mi\n dnsPolicy: Default\n hostNetwork: true\n nodeSelector:\n node-role.kubernetes.io/master: \"\"\n serviceAccount: dns-controller\n tolerations:\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n name: dns-controller\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n name: kops:dns-controller\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - ingress\n - nodes\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - extensions\n resources:\n - ingresses\n verbs:\n - get\n - list\n - watch\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n k8s-addon: dns-controller.addons.k8s.io\n name: kops:dns-controller\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: kops:dns-controller\nsubjects:\n- apiGroup: rbac.authorization.k8s.io\n kind: User\n name: system:serviceaccount:kube-system:dns-controller"}} | |
I0306 13:52:15.145111 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:15.146366 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-pre-k8s-1.6": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-pre-k8s-1.6","Lifecycle":"Sync","Location":"addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml","Contents":{"Name":"","Resource":"apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns-autoscaler\n kubernetes.io/cluster-service: \"true\"\n name: kube-dns-autoscaler\n namespace: kube-system\nspec:\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\":\"CriticalAddonsOnly\",\n \"operator\":\"Exists\"}]'\n labels:\n k8s-app: kube-dns-autoscaler\n spec:\n containers:\n - command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=kube-dns-autoscaler\n - --mode=linear\n - --target=Deployment/kube-dns\n - --default-params={\"linear\":{\"coresPerReplica\":256,\"nodesPerReplica\":16,\"min\":2}}\n - --logtostderr=true\n - --v=2\n image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0\n name: autoscaler\n resources:\n requests:\n cpu: 20m\n memory: 10Mi\n\n---\n\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n name: kube-dns\n namespace: kube-system\nspec:\n selector:\n matchLabels:\n k8s-app: kube-dns\n strategy:\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\":\"CriticalAddonsOnly\",\n \"operator\":\"Exists\"}]'\n labels:\n k8s-app: kube-dns\n spec:\n containers:\n - args:\n - --domain=cluster.local.\n - --dns-port=10053\n - --config-map=kube-dns\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n image: k8s.gcr.io/kubedns-amd64:1.9\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthz-kubedns\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: kubedns\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n initialDelaySeconds: 3\n timeoutSeconds: 5\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n - args:\n - --cache-size=1000\n - --dns-forward-max=150\n - --no-resolv\n - --server=127.0.0.1#10053\n - --log-facility=-\n - --min-port=1024\n image: k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.10\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthz-dnsmasq\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: dnsmasq\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n resources:\n requests:\n cpu: 150m\n memory: 10Mi\n - args:\n - --v=2\n - --logtostderr\n image: k8s.gcr.io/dnsmasq-metrics-amd64:1.0\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: dnsmasq-metrics\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n memory: 10Mi\n - args:\n - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 \u003e/dev/null\n - --url=/healthz-dnsmasq\n - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 \u003e/dev/null\n - --url=/healthz-kubedns\n - --port=8080\n - --quiet\n image: k8s.gcr.io/exechealthz-amd64:1.2\n name: healthz\n ports:\n - containerPort: 8080\n protocol: TCP\n resources:\n limits:\n memory: 50Mi\n requests:\n cpu: 10m\n memory: 50Mi\n dnsPolicy: Default\n\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: KubeDNS\n name: kube-dns\n namespace: kube-system\nspec:\n clusterIP: 100.64.0.10\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n selector:\n k8s-app: kube-dns"}} | |
I0306 13:52:15.147066 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:15.148391 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-k8s-1.12": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-kube-dns.addons.k8s.io-k8s-1.12","Lifecycle":"Sync","Location":"addons/kube-dns.addons.k8s.io/k8s-1.12.yaml","Contents":{"Name":"","Resource":"apiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns-autoscaler\n kubernetes.io/cluster-service: \"true\"\n name: kube-dns-autoscaler\n namespace: kube-system\nspec:\n selector:\n matchLabels:\n k8s-app: kube-dns-autoscaler\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-app: kube-dns-autoscaler\n spec:\n containers:\n - command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=kube-dns-autoscaler\n - --target=Deployment/kube-dns\n - --default-params={\"linear\":{\"coresPerReplica\":256,\"nodesPerReplica\":16,\"preventSinglePointFailure\":true}}\n - --logtostderr=true\n - --v=2\n image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0\n name: autoscaler\n resources:\n requests:\n cpu: 20m\n memory: 10Mi\n serviceAccountName: kube-dns-autoscaler\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n\n---\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n name: kube-dns\n namespace: kube-system\nspec:\n selector:\n matchLabels:\n k8s-app: kube-dns\n strategy:\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n template:\n metadata:\n annotations:\n prometheus.io/port: \"10055\"\n prometheus.io/scrape: \"true\"\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-app: kube-dns\n spec:\n affinity:\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values:\n - kube-dns\n topologyKey: kubernetes.io/hostname\n weight: 1\n containers:\n - args:\n - --config-dir=/kube-dns-config\n - --dns-port=10053\n - --domain=cluster.local.\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthcheck/kubedns\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: kubedns\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n initialDelaySeconds: 3\n timeoutSeconds: 5\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n volumeMounts:\n - mountPath: /kube-dns-config\n name: kube-dns-config\n - args:\n - -v=2\n - -logtostderr\n - -configDir=/etc/k8s/dns/dnsmasq-nanny\n - -restartDnsmasq=true\n - --\n - -k\n - --cache-size=1000\n - --dns-forward-max=150\n - --no-negcache\n - --log-facility=-\n - --server=/cluster.local/127.0.0.1#10053\n - --server=/in-addr.arpa/127.0.0.1#10053\n - --server=/in6.arpa/127.0.0.1#10053\n - --min-port=1024\n image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.13\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /healthcheck/dnsmasq\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: dnsmasq\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n resources:\n requests:\n cpu: 150m\n memory: 20Mi\n volumeMounts:\n - mountPath: /etc/k8s/dns/dnsmasq-nanny\n name: kube-dns-config\n - args:\n - --v=2\n - --logtostderr\n - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A\n - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A\n image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.13\n livenessProbe:\n failureThreshold: 5\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n successThreshold: 1\n timeoutSeconds: 5\n name: sidecar\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n cpu: 10m\n memory: 20Mi\n dnsPolicy: Default\n serviceAccountName: kube-dns\n volumes:\n - configMap:\n name: kube-dns\n optional: true\n name: kube-dns-config\n\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: KubeDNS\n name: kube-dns\n namespace: kube-system\nspec:\n clusterIP: 100.64.0.10\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n selector:\n k8s-app: kube-dns\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n name: kube-dns-autoscaler\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n name: kube-dns-autoscaler\nrules:\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - replicationcontrollers/scale\n verbs:\n - get\n - update\n- apiGroups:\n - extensions\n - apps\n resources:\n - deployments/scale\n - replicasets/scale\n verbs:\n - get\n - update\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - get\n - create\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n k8s-addon: kube-dns.addons.k8s.io\n name: kube-dns-autoscaler\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: kube-dns-autoscaler\nsubjects:\n- kind: ServiceAccount\n name: kube-dns-autoscaler\n namespace: kube-system\n\n---\n\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: kube-dns\n namespace: kube-system\nspec:\n minAvailable: 1\n selector:\n matchLabels:\n k8s-app: kube-dns"}} | |
I0306 13:52:15.150609 10006 executor.go:178] Executing task "ManagedFile/etcd-cluster-spec-main": *fitasks.ManagedFile {"Name":"etcd-cluster-spec-main","Lifecycle":"Sync","Location":"backups/etcd/main/control/etcd-cluster-spec","Contents":{"Name":"","Resource":"{\n \"memberCount\": 1,\n \"etcdVersion\": \"3.2.24\"\n}"}} | |
I0306 13:52:15.150763 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/main/control/etcd-cluster-spec" | |
I0306 13:52:15.151354 10006 executor.go:178] Executing task "Secret/system:dns": *fitasks.Secret {"Name":"system:dns","Lifecycle":"Sync"} | |
I0306 13:52:15.151458 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:dns" | |
I0306 13:52:15.151914 10006 executor.go:178] Executing task "ManagedFile/manifests-etcdmanager-main": *fitasks.ManagedFile {"Name":"manifests-etcdmanager-main","Lifecycle":"Sync","Location":"manifests/etcd/main.yaml","Contents":{"Name":"","Resource":"apiVersion: v1\nkind: Pod\nmetadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n creationTimestamp: null\n labels:\n k8s-app: etcd-manager-main\n name: etcd-manager-main\n namespace: kube-system\nspec:\n containers:\n - command:\n - /bin/sh\n - -c\n - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log \u003c /tmp/pipe \u0026 ) ; exec /etcd-manager\n --backup-store=do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/main\n --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true\n --dns-suffix=.internal.test1.dev.fra1.do.services.example.com --etcd-insecure=false\n --grpc-port=3996 --insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994\n --v=6 --volume-name-tag=etcdCluster-main --volume-provider=do --volume-tag=KubernetesCluster=test1-dev-fra1-do-services-example-com\n --volume-tag=k8s-index \u003e /tmp/pipe 2\u003e\u00261\n env:\n - name: DIGITALOCEAN_ACCESS_TOKEN\n value: a9828e09a9e2bbc9ccc1aa5f2f34556bb1b740c6ae0d015e23635ed1f4dbce38\n - name: S3_ACCESS_KEY_ID\n value: 626COYMFSWVJD7CEDB5M\n - name: S3_ENDPOINT\n value: https://fra1.digitaloceanspaces.com\n - name: S3_SECRET_ACCESS_KEY\n value: 3nw4SmWyR/ZNxgmg4C4XjDMaVrNOQo65lsZlMhEKjqY\n image: kopeio/etcd-manager:3.0.20190930\n name: etcd-manager\n resources:\n requests:\n cpu: 200m\n memory: 100Mi\n securityContext:\n privileged: true\n volumeMounts:\n - mountPath: /rootfs\n name: rootfs\n - mountPath: /etc/hosts\n name: hosts\n - mountPath: /etc/kubernetes/pki/etcd-manager\n name: pki\n - mountPath: /var/log/etcd.log\n name: varlogetcd\n hostNetwork: true\n hostPID: true\n priorityClassName: system-cluster-critical\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n volumes:\n - hostPath:\n path: /\n type: Directory\n name: rootfs\n - hostPath:\n path: /etc/hosts\n type: File\n name: hosts\n - hostPath:\n path: /etc/kubernetes/pki/etcd-manager-main\n type: DirectoryOrCreate\n name: pki\n - hostPath:\n path: /var/log/etcd.log\n type: FileOrCreate\n name: varlogetcd\nstatus: {}\n"}} | |
I0306 13:52:15.152167 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/manifests/etcd/main.yaml" | |
I0306 13:52:15.152676 10006 executor.go:178] Executing task "Volume/kops-1-etcd-main-test1-dev-fra1-do-services-example-com": *dotasks.Volume {"Name":"kops-1-etcd-main-test1-dev-fra1-do-services-example-com","ID":null,"Lifecycle":"Sync","SizeGB":20,"Region":"fra1","Tags":{"KubernetesCluster":"test1-dev-fra1-do-services-example-com","etcdCluster-main":"1","k8s-index":"1"}} | |
I0306 13:52:15.153117 10006 executor.go:178] Executing task "Volume/kops-1-etcd-events-test1-dev-fra1-do-services-example-com": *dotasks.Volume {"Name":"kops-1-etcd-events-test1-dev-fra1-do-services-example-com","ID":null,"Lifecycle":"Sync","SizeGB":20,"Region":"fra1","Tags":{"KubernetesCluster":"test1-dev-fra1-do-services-example-com","etcdCluster-events":"1","k8s-index":"1"}} | |
I0306 13:52:15.154608 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-bootstrap": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-bootstrap","Lifecycle":"Sync","Location":"addons/bootstrap-channel.yaml","Contents":{"Name":"","Resource":"kind: Addons\nmetadata:\n creationTimestamp: null\n name: bootstrap\nspec:\n addons:\n - manifest: core.addons.k8s.io/v1.4.0.yaml\n manifestHash: 3ffe9ac576f9eec72e2bdfbd2ea17d56d9b17b90\n name: core.addons.k8s.io\n selector:\n k8s-addon: core.addons.k8s.io\n version: 1.4.0\n - id: pre-k8s-1.6\n kubernetesVersion: \u003c1.6.0\n manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml\n manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f\n name: kube-dns.addons.k8s.io\n selector:\n k8s-addon: kube-dns.addons.k8s.io\n version: 1.14.13-kops.1\n - id: k8s-1.6\n kubernetesVersion: '\u003e=1.6.0 \u003c1.12.0'\n manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml\n manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895\n name: kube-dns.addons.k8s.io\n selector:\n k8s-addon: kube-dns.addons.k8s.io\n version: 1.14.13-kops.1\n - id: k8s-1.12\n kubernetesVersion: '\u003e=1.12.0'\n manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml\n manifestHash: b4dff071aa340fd71650c96f213fdf4b4f799c71\n name: kube-dns.addons.k8s.io\n selector:\n k8s-addon: kube-dns.addons.k8s.io\n version: 1.14.13-kops.1\n - id: k8s-1.8\n kubernetesVersion: '\u003e=1.8.0'\n manifest: rbac.addons.k8s.io/k8s-1.8.yaml\n manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914\n name: rbac.addons.k8s.io\n selector:\n k8s-addon: rbac.addons.k8s.io\n version: 1.8.0\n - id: k8s-1.9\n kubernetesVersion: '\u003e=1.9.0'\n manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml\n manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745\n name: kubelet-api.rbac.addons.k8s.io\n selector:\n k8s-addon: kubelet-api.rbac.addons.k8s.io\n version: v0.0.1\n - manifest: limit-range.addons.k8s.io/v1.5.0.yaml\n manifestHash: 2ea50e23f1a5aa41df3724630ac25173738cc90c\n name: limit-range.addons.k8s.io\n selector:\n k8s-addon: limit-range.addons.k8s.io\n version: 1.5.0\n - id: pre-k8s-1.6\n kubernetesVersion: \u003c1.6.0\n manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml\n manifestHash: eeb67e0b1e593f14d3c0b9e21cb1987cfd45d43f\n name: dns-controller.addons.k8s.io\n selector:\n k8s-addon: dns-controller.addons.k8s.io\n version: 1.15.2\n - id: k8s-1.6\n kubernetesVersion: '\u003e=1.6.0 \u003c1.12.0'\n manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml\n manifestHash: bcd517488f62492e9f43af6950d0663b3fb0b09d\n name: dns-controller.addons.k8s.io\n selector:\n k8s-addon: dns-controller.addons.k8s.io\n version: 1.15.2\n - id: k8s-1.12\n kubernetesVersion: '\u003e=1.12.0'\n manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml\n manifestHash: e72c6307f1b240b011845e9fbacd860b72fe7e73\n name: dns-controller.addons.k8s.io\n selector:\n k8s-addon: dns-controller.addons.k8s.io\n version: 1.15.2\n - id: k8s-1.8\n kubernetesVersion: '\u003e=1.8.0'\n manifest: digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml\n manifestHash: 1950d0270f8c17a620cce69abf3dfe9f1296ca1c\n name: digitalocean-cloud-controller.addons.k8s.io\n selector:\n k8s-addon: digitalocean-cloud-controller.addons.k8s.io\n version: \"1.8\"\n - id: k8s-1.12\n kubernetesVersion: '\u003e=1.12.0'\n manifest: networking.projectcalico.org/k8s-1.12.yaml\n manifestHash: 9684a0594a8a6a93a36ea6acf17a5a33ec23ce1d\n name: networking.projectcalico.org\n selector:\n role.kubernetes.io/networking: \"1\"\n version: 3.9.3-kops.1\n - id: k8s-1.7-v3\n kubernetesVersion: '\u003e=1.7.0 \u003c1.12.0'\n manifest: networking.projectcalico.org/k8s-1.7-v3.yaml\n manifestHash: 5ced6edba94381bdb4c014b07eb645533c8bd4e0\n name: networking.projectcalico.org\n selector:\n role.kubernetes.io/networking: \"1\"\n version: 3.8.0-kops.1\n"}} | |
I0306 13:52:15.155055 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/bootstrap-channel.yaml" | |
I0306 13:52:15.136447 10006 executor.go:178] Executing task "test1.dev.fra1.do.services.example.com-addons-networking.projectcalico.org-k8s-1.12": *fitasks.ManagedFile {"Name":"test1.dev.fra1.do.services.example.com-addons-networking.projectcalico.org-k8s-1.12","Lifecycle":"Sync","Location":"addons/networking.projectcalico.org/k8s-1.12.yaml","Contents":{"Name":"","Resource":"apiVersion: v1\ndata:\n calico_backend: bird\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"info\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"__KUBECONFIG_FILEPATH__\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n typha_service_name: none\n veth_mtu: \"1440\"\nkind: ConfigMap\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-config\n namespace: kube-system\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: felixconfigurations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: ipamblocks.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: blockaffinities.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: ipamhandles.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: ipamconfigs.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: bgppeers.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: ippools.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: hostendpoints.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: clusterinformations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: globalnetworksets.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n scope: Cluster\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: networkpolicies.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n scope: Namespaced\n version: v1\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: networksets.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n scope: Namespaced\n version: v1\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\nrules:\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - watch\n - list\n - get\n- apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - ippools\n verbs:\n - list\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - clusterinformations\n verbs:\n - get\n - create\n - update\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n name: calico-kube-controllers\n namespace: kube-system\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-node\nrules:\n- apiGroups:\n - \"\"\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n verbs:\n - watch\n - list\n - get\n- apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n - update\n- apiGroups:\n - networking.k8s.io\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n- apiGroups:\n - \"\"\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - pods/status\n verbs:\n - patch\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - ipamconfigs\n verbs:\n - get\n- apiGroups:\n - crd.projectcalico.org\n resources:\n - blockaffinities\n verbs:\n - watch\n- apiGroups:\n - apps\n resources:\n - daemonsets\n verbs:\n - get\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n\n---\n\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n labels:\n k8s-app: calico-node\n role.kubernetes.io/networking: \"1\"\n name: calico-node\n namespace: kube-system\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-app: calico-node\n role.kubernetes.io/networking: \"1\"\n spec:\n containers:\n - env:\n - name: DATASTORE_TYPE\n value: kubernetes\n - name: FELIX_TYPHAK8SSERVICENAME\n valueFrom:\n configMapKeyRef:\n key: typha_service_name\n name: calico-config\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n key: calico_backend\n name: calico-config\n - name: CLUSTER_TYPE\n value: kops,bgp\n - name: IP\n value: autodetect\n - name: CALICO_IPV4POOL_IPIP\n value: always\n - name: FELIX_IPINIPMTU\n valueFrom:\n configMapKeyRef:\n key: veth_mtu\n name: calico-config\n - name: CALICO_IPV4POOL_CIDR\n value: 100.96.0.0/11\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: ACCEPT\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: info\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n - name: FELIX_PROMETHEUSMETRICSENABLED\n value: \"false\"\n - name: FELIX_PROMETHEUSMETRICSPORT\n value: \"9091\"\n - name: FELIX_PROMETHEUSGOMETRICSENABLED\n value: \"true\"\n - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED\n value: \"true\"\n image: calico/node:v3.9.3\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n failureThreshold: 6\n initialDelaySeconds: 10\n periodSeconds: 10\n name: calico-node\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-ready\n - -bird-ready\n periodSeconds: 10\n resources:\n requests:\n cpu: 90m\n securityContext:\n privileged: true\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - mountPath: /var/run/nodeagent\n name: policysync\n hostNetwork: true\n initContainers:\n - command:\n - /opt/cni/bin/calico-ipam\n - -upgrade\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n key: calico_backend\n name: calico-config\n image: calico/cni:v3.9.3\n name: upgrade-ipam\n volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - command:\n - /install-cni.sh\n env:\n - name: CNI_CONF_NAME\n value: 10-calico.conflist\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n key: cni_network_config\n name: calico-config\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n key: veth_mtu\n name: calico-config\n - name: SLEEP\n value: \"false\"\n image: calico/cni:v3.9.3\n name: install-cni\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n - image: calico/pod2daemon-flexvol:v3.9.3\n name: flexvol-driver\n volumeMounts:\n - mountPath: /host/driver\n name: flexvol-driver-host\n nodeSelector:\n beta.kubernetes.io/os: linux\n priorityClassName: system-node-critical\n serviceAccountName: calico-node\n terminationGracePeriodSeconds: 0\n tolerations:\n - effect: NoSchedule\n operator: Exists\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n volumes:\n - hostPath:\n path: /lib/modules\n name: lib-modules\n - hostPath:\n path: /var/run/calico\n name: var-run-calico\n - hostPath:\n path: /var/lib/calico\n name: var-lib-calico\n - hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n name: xtables-lock\n - hostPath:\n path: /opt/cni/bin\n name: cni-bin-dir\n - hostPath:\n path: /etc/cni/net.d\n name: cni-net-dir\n - hostPath:\n path: /var/lib/cni/networks\n name: host-local-net-dir\n - hostPath:\n path: /var/run/nodeagent\n type: DirectoryOrCreate\n name: policysync\n - hostPath:\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n type: DirectoryOrCreate\n name: flexvol-driver-host\n updateStrategy:\n rollingUpdate:\n maxUnavailable: 1\n type: RollingUpdate\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-node\n namespace: kube-system\n\n---\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n k8s-app: calico-kube-controllers\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\n namespace: kube-system\nspec:\n replicas: 1\n selector:\n matchLabels:\n k8s-app: calico-kube-controllers\n strategy:\n type: Recreate\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n labels:\n k8s-app: calico-kube-controllers\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\n namespace: kube-system\n spec:\n containers:\n - env:\n - name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n value: kubernetes\n image: calico/kube-controllers:v3.9.3\n name: calico-kube-controllers\n readinessProbe:\n exec:\n command:\n - /usr/bin/check-status\n - -r\n nodeSelector:\n beta.kubernetes.io/os: linux\n priorityClassName: system-cluster-critical\n serviceAccountName: calico-kube-controllers\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n role.kubernetes.io/networking: \"1\"\n name: calico-kube-controllers\n namespace: kube-system"}} | |
I0306 13:52:15.155301 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/networking.projectcalico.org/k8s-1.12.yaml" | |
I0306 13:52:15.149630 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:15.215657 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kubelet" | |
I0306 13:52:15.304195 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.304249 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/" | |
I0306 13:52:15.387563 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.387622 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/" | |
I0306 13:52:15.485895 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:15.485947 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/pre-k8s-1.6.yaml" SSE="-" ACL="" | |
I0306 13:52:15.512822 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.512879 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/" | |
I0306 13:52:15.515610 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/networking.projectcalico.org/k8s-1.12.yaml" | |
I0306 13:52:15.515783 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/networking.projectcalico.org/k8s-1.12.yaml" SSE="-" ACL="" | |
I0306 13:52:15.520808 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.521614 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml" | |
I0306 13:52:15.521773 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/pre-k8s-1.6.yaml" SSE="-" ACL="" | |
I0306 13:52:15.520865 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/ca/" | |
I0306 13:52:15.536114 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.536181 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/" | |
I0306 13:52:15.536676 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/manifests/etcd/main.yaml" | |
I0306 13:52:15.536724 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/manifests/etcd/main.yaml" SSE="-" ACL="" | |
I0306 13:52:15.537491 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:logging" | |
I0306 13:52:15.537581 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:dns" | |
I0306 13:52:15.539484 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/manifests/etcd/events.yaml" | |
I0306 13:52:15.539537 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/manifests/etcd/events.yaml" SSE="-" ACL="" | |
I0306 13:52:15.541855 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/rbac.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:15.541919 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/rbac.addons.k8s.io/k8s-1.8.yaml" SSE="-" ACL="" | |
I0306 13:52:15.543297 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube-proxy" | |
I0306 13:52:15.544235 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/networking.projectcalico.org/k8s-1.7-v3.yaml" | |
I0306 13:52:15.544278 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/networking.projectcalico.org/k8s-1.7-v3.yaml" SSE="-" ACL="" | |
I0306 13:52:15.545231 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.545351 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/" | |
I0306 13:52:15.547901 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" | |
I0306 13:52:15.547950 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" SSE="-" ACL="" | |
I0306 13:52:15.555272 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.555330 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/" | |
I0306 13:52:15.558194 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/core.addons.k8s.io/v1.4.0.yaml" | |
I0306 13:52:15.558259 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/core.addons.k8s.io/v1.4.0.yaml" SSE="-" ACL="" | |
I0306 13:52:15.558923 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/events/control/etcd-cluster-spec" | |
I0306 13:52:15.560063 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/backups/etcd/events/control/etcd-cluster-spec" SSE="-" ACL="" | |
I0306 13:52:15.559893 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:controller_manager" | |
I0306 13:52:15.561016 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/admin" | |
I0306 13:52:15.572421 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:15.572477 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml" SSE="-" ACL="" | |
I0306 13:52:15.572589 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:15.572641 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml" SSE="-" ACL="" | |
I0306 13:52:15.576670 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:monitoring" | |
I0306 13:52:15.578604 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" | |
I0306 13:52:15.578659 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" SSE="-" ACL="" | |
I0306 13:52:15.579980 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:scheduler" | |
I0306 13:52:15.580822 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube" | |
I0306 13:52:15.598983 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/backups/etcd/main/control/etcd-cluster-spec" | |
I0306 13:52:15.599040 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/backups/etcd/main/control/etcd-cluster-spec" SSE="-" ACL="" | |
I0306 13:52:15.600016 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/bootstrap-channel.yaml" | |
I0306 13:52:15.600057 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/bootstrap-channel.yaml" SSE="-" ACL="" | |
I0306 13:52:15.603310 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kubelet" | |
I0306 13:52:15.607249 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" | |
I0306 13:52:15.611396 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" SSE="-" ACL="" | |
I0306 13:52:15.607316 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml" | |
I0306 13:52:15.612613 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml" SSE="-" ACL="" | |
I0306 13:52:15.613109 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main: [] | |
I0306 13:52:15.613171 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/keyset.yaml" | |
I0306 13:52:15.609139 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml" | |
I0306 13:52:15.613787 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml" SSE="-" ACL="" | |
I0306 13:52:15.624708 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca: [] | |
I0306 13:52:15.624866 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:15.627270 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events: [] | |
I0306 13:52:15.627410 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/keyset.yaml" | |
I0306 13:52:15.640321 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main: [] | |
I0306 13:52:15.640389 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/keyset.yaml" | |
I0306 13:52:15.649960 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca: [] | |
I0306 13:52:15.650029 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:15.661442 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events: [] | |
I0306 13:52:15.661516 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/keyset.yaml" | |
I0306 13:52:15.669130 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca: [] | |
I0306 13:52:15.669189 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/keyset.yaml" | |
I0306 13:52:15.710157 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kubelet" | |
I0306 13:52:15.710210 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/kubelet" SSE="-" ACL="" | |
I0306 13:52:15.724041 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.724097 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/" | |
I0306 13:52:15.736933 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.736988 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/" | |
I0306 13:52:15.737107 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.737159 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/" | |
I0306 13:52:15.740603 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.740662 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/ca/" | |
I0306 13:52:15.753368 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.753522 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/" | |
I0306 13:52:15.753596 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.753675 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/" | |
I0306 13:52:15.787116 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.787181 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/" | |
I0306 13:52:15.799687 10006 volume.go:118] DO - Join the volume tag - etcdCluster-events:1 | |
I0306 13:52:15.799719 10006 volume.go:118] DO - Join the volume tag - k8s-index:1 | |
I0306 13:52:15.799732 10006 volume.go:118] DO - Join the volume tag - KubernetesCluster:test1-dev-fra1-do-services-example-com | |
I0306 13:52:15.805164 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kubelet" | |
I0306 13:52:15.805714 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:dns" | |
I0306 13:52:15.811097 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main: [] | |
I0306 13:52:15.811412 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:15.811438 10006 keypair.go:201] Creating PKI keypair "etcd-manager-ca-main" | |
I0306 13:52:15.811473 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/keyset.yaml" | |
I0306 13:52:15.815601 10006 tagbuilder.go:95] tags: [_do _k8s_1_6] | |
I0306 13:52:15.815672 10006 urls.go:168] Using cached protokube location: "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/images/protokube.tar.gz" | |
I0306 13:52:15.822593 10006 volume.go:118] DO - Join the volume tag - etcdCluster-main:1 | |
I0306 13:52:15.822634 10006 volume.go:118] DO - Join the volume tag - k8s-index:1 | |
I0306 13:52:15.822647 10006 volume.go:118] DO - Join the volume tag - KubernetesCluster:test1-dev-fra1-do-services-example-com | |
I0306 13:52:15.835728 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca: [] | |
I0306 13:52:15.835733 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events: [] | |
I0306 13:52:15.835894 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:15.835896 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:15.835911 10006 keypair.go:201] Creating PKI keypair "ca" | |
I0306 13:52:15.835919 10006 keypair.go:201] Creating PKI keypair "etcd-peers-ca-events" | |
I0306 13:52:15.835944 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:15.835950 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/keyset.yaml" | |
I0306 13:52:15.839301 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca: [] | |
I0306 13:52:15.839454 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:15.839471 10006 keypair.go:201] Creating PKI keypair "apiserver-aggregator-ca" | |
I0306 13:52:15.839502 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:15.844922 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events: [] | |
I0306 13:52:15.845082 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:15.845105 10006 keypair.go:201] Creating PKI keypair "etcd-manager-ca-events" | |
I0306 13:52:15.845137 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/keyset.yaml" | |
I0306 13:52:15.846032 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main: [] | |
I0306 13:52:15.846178 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:15.846275 10006 keypair.go:201] Creating PKI keypair "etcd-peers-ca-main" | |
I0306 13:52:15.846341 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/keyset.yaml" | |
I0306 13:52:15.873832 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca: [] | |
I0306 13:52:15.874000 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:15.874022 10006 keypair.go:201] Creating PKI keypair "etcd-clients-ca" | |
I0306 13:52:15.874056 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/keyset.yaml" | |
I0306 13:52:15.885385 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:dns" | |
I0306 13:52:15.885437 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/system:dns" SSE="-" ACL="" | |
I0306 13:52:15.919245 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.919305 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/" | |
I0306 13:52:15.923478 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.923537 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/ca/" | |
I0306 13:52:15.930413 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.930473 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/" | |
I0306 13:52:15.933404 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.933466 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/" | |
I0306 13:52:15.933597 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.933666 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/" | |
I0306 13:52:15.940192 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.940250 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/" | |
I0306 13:52:15.954537 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:15.954593 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/" | |
I0306 13:52:16.005313 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main: [] | |
I0306 13:52:16.005375 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/keyset.yaml" | |
I0306 13:52:16.006248 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca: [] | |
I0306 13:52:16.006309 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:16.019109 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events: [] | |
I0306 13:52:16.019169 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/keyset.yaml" | |
I0306 13:52:16.021083 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main: [] | |
I0306 13:52:16.021152 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/keyset.yaml" | |
I0306 13:52:16.025075 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca: [] | |
I0306 13:52:16.025129 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:16.029312 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events: [] | |
I0306 13:52:16.029401 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/keyset.yaml" | |
I0306 13:52:16.034797 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca: [] | |
I0306 13:52:16.034859 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/keyset.yaml" | |
I0306 13:52:16.075355 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:dns" | |
I0306 13:52:16.075402 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube-proxy" | |
I0306 13:52:16.086546 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:16.086620 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/ca/" | |
I0306 13:52:16.089554 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:16.089609 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/" | |
I0306 13:52:16.112641 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:16.112700 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/" | |
I0306 13:52:16.112774 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:16.113071 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/" | |
I0306 13:52:16.115062 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/keyset.yaml", falling back to directory-list method | |
I0306 13:52:16.115113 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/" | |
I0306 13:52:16.124617 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/keyset.yaml", falling back to directory-list method | |
I0306 13:52:16.124677 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/" | |
I0306 13:52:16.130344 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/keyset.yaml", falling back to directory-list method | |
I0306 13:52:16.130402 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/" | |
I0306 13:52:16.162425 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube-proxy" | |
I0306 13:52:16.162482 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/kube-proxy" SSE="-" ACL="" | |
I0306 13:52:16.179608 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca: [] | |
I0306 13:52:16.179660 10006 keypair.go:212] Creating privateKey "ca" | |
I0306 13:52:16.203551 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events: [] | |
I0306 13:52:16.203599 10006 keypair.go:212] Creating privateKey "etcd-manager-ca-events" | |
I0306 13:52:16.221108 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main: [] | |
I0306 13:52:16.221166 10006 keypair.go:212] Creating privateKey "etcd-manager-ca-main" | |
I0306 13:52:16.238887 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main: [] | |
I0306 13:52:16.238944 10006 keypair.go:212] Creating privateKey "etcd-peers-ca-main" | |
I0306 13:52:16.258234 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca: [] | |
I0306 13:52:16.276802 10006 keypair.go:212] Creating privateKey "apiserver-aggregator-ca" | |
I0306 13:52:16.258534 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube-proxy" | |
I0306 13:52:16.290058 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca: [] | |
I0306 13:52:16.290214 10006 keypair.go:212] Creating privateKey "etcd-clients-ca" | |
I0306 13:52:16.258569 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:logging" | |
I0306 13:52:16.258755 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events: [] | |
I0306 13:52:16.314457 10006 keypair.go:212] Creating privateKey "etcd-peers-ca-events" | |
I0306 13:52:16.404450 10006 tagbuilder.go:95] tags: [_do _k8s_1_6] | |
I0306 13:52:16.404618 10006 urls.go:168] Using cached protokube location: "https://kubeupv2.s3.amazonaws.com/kops/1.15.2/images/protokube.tar.gz" | |
I0306 13:52:16.441305 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:logging" | |
I0306 13:52:16.441458 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/system:logging" SSE="-" ACL="" | |
I0306 13:52:16.562033 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:logging" | |
I0306 13:52:16.562702 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:controller_manager" | |
I0306 13:52:16.686239 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:controller_manager" | |
I0306 13:52:16.686379 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/system:controller_manager" SSE="-" ACL="" | |
I0306 13:52:16.799895 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:controller_manager" | |
I0306 13:52:16.800963 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/admin" | |
I0306 13:52:16.907907 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/admin" | |
I0306 13:52:16.907958 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/admin" SSE="-" ACL="" | |
I0306 13:52:17.030122 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/admin" | |
I0306 13:52:17.030811 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:monitoring" | |
I0306 13:52:17.120826 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:monitoring" | |
I0306 13:52:17.120892 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/system:monitoring" SSE="-" ACL="" | |
I0306 13:52:17.308079 10006 vfs_castore.go:729] Issuing new certificate: "etcd-manager-ca-main" | |
I0306 13:52:17.322690 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:monitoring" | |
I0306 13:52:17.326847 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube" | |
I0306 13:52:17.327690 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/" | |
I0306 13:52:17.443373 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube" | |
I0306 13:52:17.443512 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/kube" SSE="-" ACL="" | |
I0306 13:52:17.512363 10006 vfs_castore.go:729] Issuing new certificate: "ca" | |
I0306 13:52:17.520252 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/ca/" | |
I0306 13:52:17.533931 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube" | |
I0306 13:52:17.548735 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main: [] | |
I0306 13:52:17.557799 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/keyset.yaml" | |
I0306 13:52:17.570731 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:17.577133 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:scheduler" | |
I0306 13:52:17.668285 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/ca: [] | |
I0306 13:52:17.670161 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/ca/keyset.yaml" | |
I0306 13:52:17.670210 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/ca/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:17.759808 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/6801092469864564949182816984.key" | |
I0306 13:52:17.759989 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-main/6801092469864564949182816984.key" SSE="-" ACL="" | |
I0306 13:52:17.797140 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:scheduler" | |
I0306 13:52:17.797419 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/secrets/system:scheduler" SSE="-" ACL="" | |
I0306 13:52:17.801603 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/ca/6801092470741949892137852812.key" | |
I0306 13:52:17.801844 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/ca/6801092470741949892137852812.key" SSE="-" ACL="" | |
I0306 13:52:17.884688 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/" | |
I0306 13:52:17.904593 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/system:scheduler" | |
I0306 13:52:17.914711 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/ca/" | |
I0306 13:52:17.925530 10006 vfs_castore.go:729] Issuing new certificate: "etcd-clients-ca" | |
I0306 13:52:17.941143 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/" | |
I0306 13:52:17.985438 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main: [] | |
I0306 13:52:17.987388 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/keyset.yaml" | |
I0306 13:52:17.987538 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.007290 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca: [] | |
I0306 13:52:18.008664 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:18.008867 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.048509 10006 vfs_castore.go:729] Issuing new certificate: "etcd-peers-ca-main" | |
I0306 13:52:18.050327 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca: [] | |
I0306 13:52:18.061802 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/keyset.yaml" | |
I0306 13:52:18.067807 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.067596 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/" | |
I0306 13:52:18.077937 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/6801092469864564949182816984.crt" | |
I0306 13:52:18.078080 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/6801092469864564949182816984.crt" SSE="-" ACL="" | |
I0306 13:52:18.103433 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/6801092470741949892137852812.crt" | |
I0306 13:52:18.103488 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/ca/6801092470741949892137852812.crt" SSE="-" ACL="" | |
I0306 13:52:18.155377 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main: [] | |
I0306 13:52:18.157119 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/keyset.yaml" | |
I0306 13:52:18.157159 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.193748 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-main/6801092469864564949182816984.crt" | |
I0306 13:52:18.194524 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/6801092472516510488001847046.key" | |
I0306 13:52:18.194562 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-clients-ca/6801092472516510488001847046.key" SSE="-" ACL="" | |
I0306 13:52:18.243700 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/6801092470741949892137852812.crt" | |
I0306 13:52:18.277806 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:18.278011 10006 keypair.go:230] created certificate with cn=etcd-manager-ca-main | |
I0306 13:52:18.292153 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/" | |
I0306 13:52:18.294554 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/6801092473044668113421723099.key" | |
I0306 13:52:18.294603 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-main/6801092473044668113421723099.key" SSE="-" ACL="" | |
I0306 13:52:18.321766 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:18.321966 10006 keypair.go:230] created certificate with cn=kubernetes | |
I0306 13:52:18.383914 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/" | |
I0306 13:52:18.391074 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca: [] | |
I0306 13:52:18.395873 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/keyset.yaml" | |
I0306 13:52:18.399745 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.454839 10006 vfs_castore.go:729] Issuing new certificate: "etcd-manager-ca-events" | |
I0306 13:52:18.462667 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/" | |
I0306 13:52:18.465132 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main: [] | |
I0306 13:52:18.466081 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/keyset.yaml" | |
I0306 13:52:18.466118 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.499809 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/6801092472516510488001847046.crt" | |
I0306 13:52:18.499866 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/6801092472516510488001847046.crt" SSE="-" ACL="" | |
I0306 13:52:18.582694 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events: [] | |
I0306 13:52:18.584345 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/keyset.yaml" | |
I0306 13:52:18.584392 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.587819 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/6801092473044668113421723099.crt" | |
I0306 13:52:18.587866 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/6801092473044668113421723099.crt" SSE="-" ACL="" | |
I0306 13:52:18.664458 10006 vfs_castore.go:729] Issuing new certificate: "etcd-peers-ca-events" | |
I0306 13:52:18.672202 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/" | |
I0306 13:52:18.684113 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-main/6801092473044668113421723099.crt" | |
I0306 13:52:18.702229 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/6801092474789852924273824582.key" | |
I0306 13:52:18.702286 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-manager-ca-events/6801092474789852924273824582.key" SSE="-" ACL="" | |
I0306 13:52:18.719464 10006 vfs_castore.go:729] Issuing new certificate: "apiserver-aggregator-ca" | |
I0306 13:52:18.727445 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/" | |
I0306 13:52:18.763467 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:18.763776 10006 keypair.go:230] created certificate with cn=etcd-peers-ca-main | |
I0306 13:52:18.774537 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events: [] | |
I0306 13:52:18.776368 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/keyset.yaml" | |
I0306 13:52:18.776417 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.790585 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-clients-ca/6801092472516510488001847046.crt" | |
I0306 13:52:18.801473 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/" | |
I0306 13:52:18.858486 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca: [] | |
I0306 13:52:18.866976 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:18.867145 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.874198 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:18.874440 10006 keypair.go:230] created certificate with cn=etcd-clients-ca | |
I0306 13:52:18.888001 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events: [] | |
I0306 13:52:18.888974 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/keyset.yaml" | |
I0306 13:52:18.889079 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:18.927730 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/6801092475690146253835421205.key" | |
I0306 13:52:18.927860 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/etcd-peers-ca-events/6801092475690146253835421205.key" SSE="-" ACL="" | |
I0306 13:52:19.049407 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/6801092474789852924273824582.crt" | |
I0306 13:52:19.049465 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/6801092474789852924273824582.crt" SSE="-" ACL="" | |
I0306 13:52:19.090402 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/" | |
I0306 13:52:19.145961 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-manager-ca-events/6801092474789852924273824582.crt" | |
I0306 13:52:19.149101 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/6801092475926411310981713148.key" | |
I0306 13:52:19.149151 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/6801092475926411310981713148.key" SSE="-" ACL="" | |
I0306 13:52:19.212845 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events: [] | |
I0306 13:52:19.213782 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/keyset.yaml" | |
I0306 13:52:19.213826 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:19.232736 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:19.232943 10006 keypair.go:230] created certificate with cn=etcd-manager-ca-events | |
I0306 13:52:19.237104 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/" | |
I0306 13:52:19.321268 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/6801092475690146253835421205.crt" | |
I0306 13:52:19.321318 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/6801092475690146253835421205.crt" SSE="-" ACL="" | |
I0306 13:52:19.331862 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca: [] | |
I0306 13:52:19.332830 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:19.332879 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:19.428532 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/6801092475926411310981713148.crt" | |
I0306 13:52:19.428582 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/6801092475926411310981713148.crt" SSE="-" ACL="" | |
I0306 13:52:19.439241 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/etcd-peers-ca-events/6801092475690146253835421205.crt" | |
I0306 13:52:19.533338 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/6801092475926411310981713148.crt" | |
I0306 13:52:19.534706 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:19.534913 10006 keypair.go:230] created certificate with cn=etcd-peers-ca-events | |
I0306 13:52:19.617981 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:19.618215 10006 keypair.go:230] created certificate with cn=apiserver-aggregator-ca | |
I0306 13:52:19.618291 10006 executor.go:103] Tasks: 38 done / 50 total; 12 can run | |
I0306 13:52:19.618366 10006 executor.go:178] Executing task "Keypair/kubelet-api": *fitasks.Keypair {"Name":"kubelet-api","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"cn=kubelet-api","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.618393 10006 executor.go:178] Executing task "Keypair/kops": *fitasks.Keypair {"Name":"kops","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"o=system:masters,cn=kops","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.618424 10006 executor.go:178] Executing task "Keypair/kube-controller-manager": *fitasks.Keypair {"Name":"kube-controller-manager","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"cn=system:kube-controller-manager","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.618542 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/keyset.yaml" | |
I0306 13:52:19.618560 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops/keyset.yaml" | |
I0306 13:52:19.618611 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/keyset.yaml" | |
I0306 13:52:19.618893 10006 executor.go:178] Executing task "Keypair/apiserver-proxy-client": *fitasks.Keypair {"Name":"apiserver-proxy-client","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"cn=apiserver-proxy-client","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.619059 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/keyset.yaml" | |
I0306 13:52:19.619070 10006 executor.go:178] Executing task "Keypair/kubecfg": *fitasks.Keypair {"Name":"kubecfg","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"o=system:masters,cn=kubecfg","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.619171 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml" | |
I0306 13:52:19.619244 10006 executor.go:178] Executing task "Keypair/kubelet": *fitasks.Keypair {"Name":"kubelet","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"o=system:nodes,cn=kubelet","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.619353 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet/keyset.yaml" | |
I0306 13:52:19.619527 10006 executor.go:178] Executing task "Keypair/apiserver-aggregator": *fitasks.Keypair {"Name":"apiserver-aggregator","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"apiserver-aggregator-ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=apiserver-aggregator-ca","type":"ca","format":"v1alpha2"},"subject":"cn=aggregator","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.619626 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/keyset.yaml" | |
I0306 13:52:19.619575 10006 executor.go:178] Executing task "MirrorSecrets/mirror-secrets": *fitasks.MirrorSecrets {"Name":"mirror-secrets","Lifecycle":null,"MirrorPath":{}} | |
I0306 13:52:19.619760 10006 executor.go:178] Executing task "Keypair/master": *fitasks.Keypair {"Name":"master","alternateNames":["kubernetes","kubernetes.default","kubernetes.default.svc","kubernetes.default.svc.cluster.local","api.test1.dev.fra1.do.services.example.com","api.internal.test1.dev.fra1.do.services.example.com","100.64.0.1","127.0.0.1"],"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"cn=kubernetes-master","type":"server","format":"v1alpha2"} | |
I0306 13:52:19.619833 10006 executor.go:178] Executing task "Keypair/kube-proxy": *fitasks.Keypair {"Name":"kube-proxy","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"cn=system:kube-proxy","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.619863 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master/keyset.yaml" | |
I0306 13:52:19.619873 10006 executor.go:178] Executing task "Keypair/kube-scheduler": *fitasks.Keypair {"Name":"kube-scheduler","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":{"Name":"ca","alternateNames":null,"alternateNameTasks":null,"Lifecycle":"Sync","Signer":null,"subject":"cn=kubernetes","type":"ca","format":"v1alpha2"},"subject":"cn=system:kube-scheduler","type":"client","format":"v1alpha2"} | |
I0306 13:52:19.619943 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/keyset.yaml" | |
I0306 13:52:19.619964 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/keyset.yaml" | |
I0306 13:52:19.620013 10006 executor.go:178] Executing task "MirrorKeystore/mirror-keystore": *fitasks.MirrorKeystore {"Name":"mirror-keystore","Lifecycle":null,"MirrorPath":{}} | |
I0306 13:52:19.702519 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.702575 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/" | |
I0306 13:52:19.704430 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.704481 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kops/" | |
I0306 13:52:19.785459 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.785515 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/" | |
I0306 13:52:19.786466 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.786511 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/" | |
I0306 13:52:19.868315 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.868369 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/" | |
I0306 13:52:19.872326 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.872450 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubelet/" | |
I0306 13:52:19.959896 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.959950 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/master/" | |
I0306 13:52:19.960971 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.961024 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/" | |
I0306 13:52:19.986230 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.986287 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/" | |
I0306 13:52:19.988317 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api: [] | |
I0306 13:52:19.988375 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/keyset.yaml" | |
I0306 13:52:19.990329 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/keyset.yaml", falling back to directory-list method | |
I0306 13:52:19.990387 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/" | |
I0306 13:52:19.991334 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client: [] | |
I0306 13:52:19.991390 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/keyset.yaml" | |
I0306 13:52:19.994677 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops: [] | |
I0306 13:52:19.994736 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops/keyset.yaml" | |
I0306 13:52:20.013735 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager: [] | |
I0306 13:52:20.013805 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/keyset.yaml" | |
I0306 13:52:20.019041 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet: [] | |
I0306 13:52:20.019130 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet/keyset.yaml" | |
I0306 13:52:20.025227 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg: [] | |
I0306 13:52:20.025284 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg/keyset.yaml" | |
I0306 13:52:20.051482 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master: [] | |
I0306 13:52:20.051550 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master/keyset.yaml" | |
I0306 13:52:20.053601 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator: [] | |
I0306 13:52:20.053684 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/keyset.yaml" | |
I0306 13:52:20.066167 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.066227 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/" | |
I0306 13:52:20.074886 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.074956 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kops/" | |
I0306 13:52:20.089129 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.089191 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/" | |
I0306 13:52:20.092417 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler: [] | |
I0306 13:52:20.092478 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/keyset.yaml" | |
I0306 13:52:20.102757 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.102818 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubelet/" | |
I0306 13:52:20.107094 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.107147 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubecfg/" | |
I0306 13:52:20.115552 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.115605 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/" | |
I0306 13:52:20.118488 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy: [] | |
I0306 13:52:20.118547 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/keyset.yaml" | |
I0306 13:52:20.138790 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.138950 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/master/" | |
I0306 13:52:20.145229 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.145286 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/" | |
I0306 13:52:20.151422 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api: [] | |
I0306 13:52:20.151758 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.151784 10006 keypair.go:201] Creating PKI keypair "kubelet-api" | |
I0306 13:52:20.151845 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/keyset.yaml" | |
I0306 13:52:20.156598 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops: [] | |
I0306 13:52:20.156826 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.156852 10006 keypair.go:201] Creating PKI keypair "kops" | |
I0306 13:52:20.156913 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops/keyset.yaml" | |
I0306 13:52:20.169448 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.169514 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/" | |
I0306 13:52:20.192082 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet: [] | |
I0306 13:52:20.192245 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg: [] | |
I0306 13:52:20.192273 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.192291 10006 keypair.go:201] Creating PKI keypair "kubelet" | |
I0306 13:52:20.192326 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet/keyset.yaml" | |
I0306 13:52:20.192397 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.192416 10006 keypair.go:201] Creating PKI keypair "kubecfg" | |
I0306 13:52:20.192446 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml" | |
I0306 13:52:20.216974 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client: [] | |
I0306 13:52:20.216975 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager: [] | |
I0306 13:52:20.217150 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.217175 10006 keypair.go:201] Creating PKI keypair "apiserver-proxy-client" | |
I0306 13:52:20.217211 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/keyset.yaml" | |
I0306 13:52:20.217235 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.217253 10006 keypair.go:201] Creating PKI keypair "kube-controller-manager" | |
I0306 13:52:20.217284 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/keyset.yaml" | |
I0306 13:52:20.223323 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.223398 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/" | |
I0306 13:52:20.230920 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.230979 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/" | |
I0306 13:52:20.231025 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator: [] | |
I0306 13:52:20.231179 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.231205 10006 keypair.go:201] Creating PKI keypair "apiserver-aggregator" | |
I0306 13:52:20.231240 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/keyset.yaml" | |
I0306 13:52:20.243727 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.243783 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kops/" | |
I0306 13:52:20.249759 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master: [] | |
I0306 13:52:20.249937 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.249954 10006 keypair.go:201] Creating PKI keypair "master" | |
I0306 13:52:20.249987 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master/keyset.yaml" | |
I0306 13:52:20.263144 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler: [] | |
I0306 13:52:20.263317 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.263335 10006 keypair.go:201] Creating PKI keypair "kube-scheduler" | |
I0306 13:52:20.263368 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/keyset.yaml" | |
I0306 13:52:20.272592 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.272649 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/" | |
I0306 13:52:20.282857 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.282909 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubelet/" | |
I0306 13:52:20.302008 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.302064 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/" | |
I0306 13:52:20.306637 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.306692 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/" | |
I0306 13:52:20.309678 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.309742 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/" | |
I0306 13:52:20.316824 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api: [] | |
I0306 13:52:20.316882 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/keyset.yaml" | |
I0306 13:52:20.326582 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops: [] | |
I0306 13:52:20.326651 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops/keyset.yaml" | |
I0306 13:52:20.333909 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.333969 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/master/" | |
I0306 13:52:20.339061 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy: [] | |
I0306 13:52:20.339225 10006 keypair.go:181] creating brand new certificate | |
I0306 13:52:20.339253 10006 keypair.go:201] Creating PKI keypair "kube-proxy" | |
I0306 13:52:20.339297 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/keyset.yaml" | |
I0306 13:52:20.347588 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.347645 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/" | |
I0306 13:52:20.353941 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg: [] | |
I0306 13:52:20.354007 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg/keyset.yaml" | |
I0306 13:52:20.393864 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager: [] | |
I0306 13:52:20.393937 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/keyset.yaml" | |
I0306 13:52:20.405312 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.405365 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kops/" | |
I0306 13:52:20.414811 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.414869 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/" | |
I0306 13:52:20.425012 10006 vfs_castore.go:377] no certificate bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.425079 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/" | |
I0306 13:52:20.425553 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet: [] | |
I0306 13:52:20.425617 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet/keyset.yaml" | |
I0306 13:52:20.426157 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client: [] | |
I0306 13:52:20.426157 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator: [] | |
I0306 13:52:20.426208 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/keyset.yaml" | |
I0306 13:52:20.426232 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/keyset.yaml" | |
I0306 13:52:20.427433 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master: [] | |
I0306 13:52:20.427497 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master/keyset.yaml" | |
I0306 13:52:20.433137 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.433197 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubecfg/" | |
I0306 13:52:20.435106 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler: [] | |
I0306 13:52:20.435161 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/keyset.yaml" | |
I0306 13:52:20.489437 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.489499 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/" | |
I0306 13:52:20.503043 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.503106 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubelet/" | |
I0306 13:52:20.510495 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.510544 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/" | |
I0306 13:52:20.516888 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.517426 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/" | |
I0306 13:52:20.529337 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy: [] | |
I0306 13:52:20.529395 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/keyset.yaml" | |
I0306 13:52:20.529404 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api: [] | |
I0306 13:52:20.529440 10006 keypair.go:212] Creating privateKey "kubelet-api" | |
I0306 13:52:20.531243 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops: [] | |
I0306 13:52:20.531291 10006 keypair.go:212] Creating privateKey "kops" | |
I0306 13:52:20.534218 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.534270 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/master/" | |
I0306 13:52:20.544640 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg: [] | |
I0306 13:52:20.544704 10006 keypair.go:212] Creating privateKey "kubecfg" | |
I0306 13:52:20.574696 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.574757 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/" | |
I0306 13:52:20.596914 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet: [] | |
I0306 13:52:20.596965 10006 keypair.go:212] Creating privateKey "kubelet" | |
I0306 13:52:20.612117 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator: [] | |
I0306 13:52:20.612268 10006 keypair.go:212] Creating privateKey "apiserver-aggregator" | |
I0306 13:52:20.642358 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client: [] | |
I0306 13:52:20.642649 10006 keypair.go:212] Creating privateKey "apiserver-proxy-client" | |
I0306 13:52:20.655226 10006 vfs_castore.go:831] no private key bundle "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/keyset.yaml", falling back to directory-list method | |
I0306 13:52:20.655290 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/" | |
I0306 13:52:20.656051 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager: [] | |
I0306 13:52:20.656212 10006 keypair.go:212] Creating privateKey "kube-controller-manager" | |
I0306 13:52:20.668794 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master: [] | |
I0306 13:52:20.668952 10006 keypair.go:212] Creating privateKey "master" | |
I0306 13:52:20.693202 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler: [] | |
I0306 13:52:20.693374 10006 keypair.go:212] Creating privateKey "kube-scheduler" | |
I0306 13:52:20.834269 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy: [] | |
I0306 13:52:20.838856 10006 keypair.go:212] Creating privateKey "kube-proxy" | |
I0306 13:52:21.238418 10006 vfs_castore.go:729] Issuing new certificate: "kubecfg" | |
I0306 13:52:21.238577 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:21.363771 10006 vfs_castore.go:729] Issuing new certificate: "apiserver-aggregator" | |
I0306 13:52:21.396150 10006 vfs_castore.go:729] Issuing new certificate: "apiserver-proxy-client" | |
I0306 13:52:21.400836 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:21.423403 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/ca/keyset.yaml" | |
I0306 13:52:21.545376 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:21.545925 10006 privatekey.go:176] Parsing pem block: "RSA PRIVATE KEY" | |
I0306 13:52:21.553871 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubecfg/" | |
I0306 13:52:21.554695 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:21.665712 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:21.669904 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator-ca/keyset.yaml" | |
I0306 13:52:21.752070 10006 vfs_castore.go:729] Issuing new certificate: "kube-proxy" | |
I0306 13:52:21.767129 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:21.770934 10006 privatekey.go:176] Parsing pem block: "RSA PRIVATE KEY" | |
I0306 13:52:21.805186 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/" | |
I0306 13:52:21.818856 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/" | |
I0306 13:52:21.822029 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/" | |
I0306 13:52:22.096626 10006 vfs_castore.go:729] Issuing new certificate: "kops" | |
I0306 13:52:22.104581 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kops/" | |
I0306 13:52:22.338434 10006 vfs_castore.go:729] Issuing new certificate: "kube-scheduler" | |
I0306 13:52:22.341629 10006 vfs_castore.go:729] Issuing new certificate: "master" | |
I0306 13:52:22.346085 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/" | |
I0306 13:52:22.365389 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/master/" | |
I0306 13:52:22.385284 10006 vfs_castore.go:729] Issuing new certificate: "kubelet-api" | |
I0306 13:52:22.393523 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/" | |
I0306 13:52:22.440236 10006 vfs_castore.go:729] Issuing new certificate: "kubelet" | |
I0306 13:52:22.448012 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kubelet/" | |
I0306 13:52:22.463046 10006 vfs_castore.go:729] Issuing new certificate: "kube-controller-manager" | |
I0306 13:52:22.470713 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/" | |
I0306 13:52:24.461538 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg: [] | |
I0306 13:52:24.463353 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg/keyset.yaml" | |
I0306 13:52:24.463401 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kubecfg/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.477739 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator: [] | |
I0306 13:52:24.479423 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/keyset.yaml" | |
I0306 13:52:24.479469 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.481212 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy: [] | |
I0306 13:52:24.482858 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/keyset.yaml" | |
I0306 13:52:24.482903 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.486902 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api: [] | |
I0306 13:52:24.488549 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/keyset.yaml" | |
I0306 13:52:24.488599 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.489048 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager: [] | |
I0306 13:52:24.490806 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/keyset.yaml" | |
I0306 13:52:24.490855 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.491630 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client: [] | |
I0306 13:52:24.493393 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/keyset.yaml" | |
I0306 13:52:24.493438 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.495421 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops: [] | |
I0306 13:52:24.497495 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler: [] | |
I0306 13:52:24.497918 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops/keyset.yaml" | |
I0306 13:52:24.498020 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kops/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.499096 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet: [] | |
I0306 13:52:24.499452 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/keyset.yaml" | |
I0306 13:52:24.499518 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.500910 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master: [] | |
I0306 13:52:24.501022 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet/keyset.yaml" | |
I0306 13:52:24.501068 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kubelet/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.502865 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master/keyset.yaml" | |
I0306 13:52:24.502916 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/master/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.578040 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/6801092491670949150423528169.key" | |
I0306 13:52:24.578093 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kubelet-api/6801092491670949150423528169.key" SSE="-" ACL="" | |
I0306 13:52:24.589947 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/6801092488951346165475934608.key" | |
I0306 13:52:24.590000 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kube-proxy/6801092488951346165475934608.key" SSE="-" ACL="" | |
I0306 13:52:24.596860 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/6801092491469748979477226152.key" | |
I0306 13:52:24.596916 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kube-scheduler/6801092491469748979477226152.key" SSE="-" ACL="" | |
I0306 13:52:24.602872 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg/6801092486745260066320596157.key" | |
I0306 13:52:24.602928 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kubecfg/6801092486745260066320596157.key" SSE="-" ACL="" | |
I0306 13:52:24.607204 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/6801092492004982993591548241.key" | |
I0306 13:52:24.607260 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kube-controller-manager/6801092492004982993591548241.key" SSE="-" ACL="" | |
I0306 13:52:24.615509 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/master/6801092491483496327516416590.key" | |
I0306 13:52:24.615564 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/master/6801092491483496327516416590.key" SSE="-" ACL="" | |
I0306 13:52:24.616690 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/6801092487283635027535772703.key" | |
I0306 13:52:24.616737 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/apiserver-aggregator/6801092487283635027535772703.key" SSE="-" ACL="" | |
I0306 13:52:24.640948 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/6801092487422639087152138270.key" | |
I0306 13:52:24.641002 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/apiserver-proxy-client/6801092487422639087152138270.key" SSE="-" ACL="" | |
I0306 13:52:24.644218 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kops/6801092490431227699204540905.key" | |
I0306 13:52:24.644269 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kops/6801092490431227699204540905.key" SSE="-" ACL="" | |
I0306 13:52:24.666643 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/" | |
I0306 13:52:24.693093 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/" | |
I0306 13:52:24.702509 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/" | |
I0306 13:52:24.704709 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/" | |
I0306 13:52:24.708929 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/" | |
I0306 13:52:24.714261 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/master/" | |
I0306 13:52:24.722013 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/" | |
I0306 13:52:24.725745 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/" | |
I0306 13:52:24.746138 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api: [] | |
I0306 13:52:24.747044 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/keyset.yaml" | |
I0306 13:52:24.747095 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.758167 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kops/" | |
I0306 13:52:24.778483 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy: [] | |
I0306 13:52:24.779399 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/keyset.yaml" | |
I0306 13:52:24.779444 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.788434 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager: [] | |
I0306 13:52:24.789375 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/keyset.yaml" | |
I0306 13:52:24.789412 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.790248 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler: [] | |
I0306 13:52:24.790248 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg: [] | |
I0306 13:52:24.792340 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml" | |
I0306 13:52:24.792398 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.795777 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/keyset.yaml" | |
I0306 13:52:24.795922 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.801795 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubelet/6801092491907001064332572421.key" | |
I0306 13:52:24.802143 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/private/kubelet/6801092491907001064332572421.key" SSE="-" ACL="" | |
I0306 13:52:24.803157 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master: [] | |
I0306 13:52:24.803431 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator: [] | |
I0306 13:52:24.804857 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/keyset.yaml" | |
I0306 13:52:24.804986 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.806987 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master/keyset.yaml" | |
I0306 13:52:24.807137 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/master/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.832980 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/6801092491670949150423528169.crt" | |
I0306 13:52:24.833034 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/6801092491670949150423528169.crt" SSE="-" ACL="" | |
I0306 13:52:24.839462 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops: [] | |
I0306 13:52:24.840473 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops/keyset.yaml" | |
I0306 13:52:24.840949 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kops/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.840885 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client: [] | |
I0306 13:52:24.842810 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/keyset.yaml" | |
I0306 13:52:24.842941 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:24.882716 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/6801092492004982993591548241.crt" | |
I0306 13:52:24.882777 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/6801092492004982993591548241.crt" SSE="-" ACL="" | |
I0306 13:52:24.887044 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/6801092491469748979477226152.crt" | |
I0306 13:52:24.887100 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/6801092491469748979477226152.crt" SSE="-" ACL="" | |
I0306 13:52:24.891147 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/6801092488951346165475934608.crt" | |
I0306 13:52:24.891196 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/6801092488951346165475934608.crt" SSE="-" ACL="" | |
I0306 13:52:24.896502 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/6801092486745260066320596157.crt" | |
I0306 13:52:24.896554 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/6801092486745260066320596157.crt" SSE="-" ACL="" | |
I0306 13:52:24.897842 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/6801092487283635027535772703.crt" | |
I0306 13:52:24.897891 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/6801092487283635027535772703.crt" SSE="-" ACL="" | |
I0306 13:52:24.920382 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet-api/6801092491670949150423528169.crt" | |
I0306 13:52:24.928749 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master/6801092491483496327516416590.crt" | |
I0306 13:52:24.928801 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/master/6801092491483496327516416590.crt" SSE="-" ACL="" | |
I0306 13:52:24.931408 10006 s3fs.go:257] Listing objects in S3 bucket "test1.dev.fra1-state-store" with prefix "test1.dev.fra1.do.services.example.com/pki/issued/kubelet/" | |
I0306 13:52:24.937975 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/6801092487422639087152138270.crt" | |
I0306 13:52:24.938034 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/6801092487422639087152138270.crt" SSE="-" ACL="" | |
I0306 13:52:24.944232 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops/6801092490431227699204540905.crt" | |
I0306 13:52:24.944283 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kops/6801092490431227699204540905.crt" SSE="-" ACL="" | |
I0306 13:52:24.983825 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/6801092486745260066320596157.crt" | |
I0306 13:52:24.993172 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-proxy/6801092488951346165475934608.crt" | |
I0306 13:52:25.002899 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.003155 10006 keypair.go:230] created certificate with cn=kubelet-api | |
I0306 13:52:25.025991 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-proxy-client/6801092487422639087152138270.crt" | |
I0306 13:52:25.028971 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/apiserver-aggregator/6801092487283635027535772703.crt" | |
I0306 13:52:25.038567 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kops/6801092490431227699204540905.crt" | |
I0306 13:52:25.065927 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.066155 10006 keypair.go:230] created certificate with cn=kubecfg | |
I0306 13:52:25.076713 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.076942 10006 keypair.go:230] created certificate with cn=system:kube-proxy | |
I0306 13:52:25.090982 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/master/6801092491483496327516416590.crt" | |
I0306 13:52:25.104320 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.104535 10006 keypair.go:230] created certificate with cn=apiserver-proxy-client | |
I0306 13:52:25.116877 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.117111 10006 keypair.go:230] created certificate with cn=kops | |
I0306 13:52:25.117139 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.117397 10006 keypair.go:230] created certificate with cn=aggregator | |
I0306 13:52:25.171844 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.172141 10006 keypair.go:230] created certificate with cn=kubernetes-master | |
I0306 13:52:25.231212 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-scheduler/6801092491469748979477226152.crt" | |
I0306 13:52:25.309090 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.309358 10006 keypair.go:230] created certificate with cn=system:kube-scheduler | |
I0306 13:52:25.674540 10006 s3fs.go:285] Listed files in do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet: [] | |
I0306 13:52:25.675424 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet/keyset.yaml" | |
I0306 13:52:25.675469 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kubelet/keyset.yaml" SSE="-" ACL="" | |
I0306 13:52:25.689970 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kube-controller-manager/6801092492004982993591548241.crt" | |
I0306 13:52:25.766293 10006 s3fs.go:128] Writing file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet/6801092491907001064332572421.crt" | |
I0306 13:52:25.766346 10006 s3fs.go:166] Calling S3 PutObject Bucket="test1.dev.fra1-state-store" Key="test1.dev.fra1.do.services.example.com/pki/issued/kubelet/6801092491907001064332572421.crt" SSE="-" ACL="" | |
I0306 13:52:25.767150 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.767369 10006 keypair.go:230] created certificate with cn=system:kube-controller-manager | |
I0306 13:52:25.858193 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubelet/6801092491907001064332572421.crt" | |
I0306 13:52:25.941434 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:25.941695 10006 keypair.go:230] created certificate with cn=kubelet | |
I0306 13:52:25.941761 10006 executor.go:103] Tasks: 50 done / 50 total; 0 can run | |
I0306 13:52:25.941792 10006 dns.go:155] Pre-creating DNS records | |
I0306 13:52:27.485946 10006 dns.go:231] Pre-creating DNS record api.test1.dev.fra1.do.services.example.com. => 203.0.113.123 | |
I0306 13:52:27.485983 10006 dns.go:231] Pre-creating DNS record api.internal.test1.dev.fra1.do.services.example.com. => 203.0.113.123 | |
I0306 13:52:27.486001 10006 dns.go:317] applying changes in record change set | |
I0306 13:52:29.823393 10006 dns.go:331] record change set additions complete | |
I0306 13:52:29.823427 10006 dns.go:365] record change sets successfully applied | |
I0306 13:52:29.823446 10006 dns.go:247] Pre-created DNS names: [api.test1.dev.fra1.do.services.example.com. api.internal.test1.dev.fra1.do.services.example.com.] | |
I0306 13:52:29.823466 10006 context.go:91] deleting temp dir: "/tmp/deploy716689444" | |
I0306 13:52:29.823732 10006 kubectl.go:131] Running command: kubectl config view --output json | |
I0306 13:52:30.063482 10006 kubectl.go:83] config = "{\n \"kind\": \"Config\",\n \"apiVersion\": \"v1\",\n \"preferences\": {},\n \"clusters\": [\n {\n \"name\": \"test1.dev.sfo1.do.services.example.com\",\n \"cluster\": {\n \"server\": \"https://api.test1.dev.sfo1.do.services.example.com\",\n \"certificate-authority-data\": \"DATA+OMITTED\"\n }\n }\n ],\n \"users\": [\n {\n \"name\": \"test1.dev.sfo1.do.services.example.com\",\n \"user\": {\n \"client-certificate-data\": \"REDACTED\",\n \"client-key-data\": \"REDACTED\",\n \"username\": \"admin\",\n \"password\": \"hXNAfS6x1BB37xvnsUsNT3haHoTxNdaL\"\n }\n },\n {\n \"name\": \"test1.dev.sfo1.do.services.example.com-basic-auth\",\n \"user\": {\n \"username\": \"admin\",\n \"password\": \"hXNAfS6x1BB37xvnsUsNT3haHoTxNdaL\"\n }\n }\n ],\n \"contexts\": [\n {\n \"name\": \"test1.dev.sfo1.do.services.example.com\",\n \"context\": {\n \"cluster\": \"test1.dev.sfo1.do.services.example.com\",\n \"user\": \"test1.dev.sfo1.do.services.example.com\"\n }\n }\n ],\n \"current-context\": \"\"\n}" | |
I0306 13:52:30.063995 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml" | |
I0306 13:52:30.173046 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:30.173291 10006 update_cluster.go:305] Exporting kubecfg for cluster | |
I0306 13:52:30.173352 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:30.257349 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:30.257573 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/ca/keyset.yaml" | |
I0306 13:52:30.374604 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:30.374829 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/ca/keyset.yaml" | |
I0306 13:52:30.457672 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:30.457897 10006 privatekey.go:176] Parsing pem block: "RSA PRIVATE KEY" | |
I0306 13:52:30.458166 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/issued/kubecfg/keyset.yaml" | |
I0306 13:52:30.537517 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:30.537790 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/pki/private/kubecfg/keyset.yaml" | |
I0306 13:52:30.620306 10006 certificate.go:103] Parsing pem block: "CERTIFICATE" | |
I0306 13:52:30.620551 10006 privatekey.go:176] Parsing pem block: "RSA PRIVATE KEY" | |
I0306 13:52:30.620885 10006 s3fs.go:220] Reading file "do://test1.dev.fra1-state-store/test1.dev.fra1.do.services.example.com/secrets/kube" | |
I0306 13:52:30.727412 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
I0306 13:52:30.729281 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
I0306 13:52:30.731028 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
I0306 13:52:30.732707 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
I0306 13:52:30.738747 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
I0306 13:52:30.744116 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
I0306 13:52:30.753621 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
I0306 13:52:30.761576 10006 loader.go:359] Config loaded from file: /home/mirek/.kube/config | |
kops has set your kubectl context to test1.dev.fra1.do.services.example.com | |
Cluster is starting. It should be ready in a few minutes. | |
Suggestions: | |
* validate cluster: kops validate cluster | |
* list nodes: kubectl get nodes --show-labels | |
* ssh to the master: ssh -i ~/.ssh/id_rsa [email protected] | |
* the admin user is specific to Debian. If not using Debian please use the appropriate user based on your OS. | |
* read about installing addons at: https://github.com/kubernetes/kops/blob/master/docs/addons.md. |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment