Skip to content

Instantly share code, notes, and snippets.

@ams0
Created February 16, 2025 22:52
Show Gist options
  • Save ams0/ee60ddb09d6c35c0b312656b1ab43a7d to your computer and use it in GitHub Desktop.
Save ams0/ee60ddb09d6c35c0b312656b1ab43a7d to your computer and use it in GitHub Desktop.
# This file can be regenerated by running the following command:
# cd examples/omni/apps/kube-system/cilium/ && helm dependency build
# helm template cilium . --namespace kube-system | yq -i 'with(.cluster.inlineManifests.[] | select(.name=="cilium"); .contents=load_str("/dev/stdin"))' ../../../infra/patches/cilium.yaml
cluster:
inlineManifests:
- name: cilium
contents: "---\n# Source: cilium/charts/cilium/templates/cilium-secrets-namespace.yaml\napiVersion: v1\nkind: Namespace\nmetadata:\n name: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/serviceaccount.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: \"cilium\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-envoy/serviceaccount.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: \"cilium-envoy\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/serviceaccount.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: \"cilium-operator\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/hubble-relay/serviceaccount.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: \"hubble-relay\"\n namespace: kube-system\nautomountServiceAccountToken: false\n---\n# Source: cilium/charts/cilium/templates/hubble-ui/serviceaccount.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: \"hubble-ui\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-ca-secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: cilium-ca\n namespace: kube-system\ndata:\n ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU12MTBmOWQwUHZ4MGd5Z1BQa3dIV1V3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURJeE5qSXlORGN6T0ZvWERUSTRNREl4TmpJeQpORGN6T0Zvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUEvQTV6a2FhS3JnUmhFeko4UVo4L0JXSXZURllYN3BLV2o5YjNIOUxQM2RDcEhDV3AKRVpwNm5DTHQ2TG9qZXNzSkM4bmI5bWdGTWwxcVh0OS82VG1IWktHY0VrV29JOWo1SXR2SWFjUXo4QVd6R2JWZAp4ZlZWekVEWHJWVVZaMTV6NXlkZ0FZZVhxQW1wbFplK3o0MXVJakgxSkgvKzlCUUdQcmNYNXZsb3ZaUlVGTm1MCjM5cy9Lc2lneERwUmRvMjlhVWZadWpwWjVjYmRhM013MXB0eTJFQWFmRUN5QS9DTW5sdmNoenppTncxeVB2dlcKMG9FaXduckY3WWYxVVVRcElOSVlRb0l2Rk5YdU1FaGljdzYrdWVUazNPTmluN2xVdU1UQTlDaUFnZUFjajlYNQpnTmFCcCtyY2QzYzNOUkFENndMbTVSRFgxbklFWFcwMHRFWGdSUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGUHFmSkhoN1JoS2Z6QmxFM0lGUXhaVXJOMkZwTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRRFVRK0sydzRORkdld2lUSkYraGtqVkprUnRCMDdPVWdER00ramF4bnMrbFNWeldaaXp1NDZ2ClRENnFvMFJ1NVQvV3hHRkhaMkZ3Y2dmcHVRdkdQVjQwYTN2TUVzcXNPTUlheGpyc05sNzRhdS9leU5paWI0QXoKZDRUWmRWZm15WDJBRWc4ckZmdE13YXIySWlkSURxYmhXU1l5ZnVkU1ErQzhqd0xWY2JGSEs0MEIwbGEyUmxmVwoxMDRTbUVWTjNSOXdiTVRraDdHTjRRV1dvSVN0dE8xdC9zbFE1bi9CTS9iSW9ha21GcVIxL3lZNmYrZXhhb1hyCnlybnVGU0szSnZNOE9vTVY3KzVYcThHWHlGenU4UjV6djhldVpqYTVuVEhHSm1iaFVtZWdoeE1ROGlVNTMwVFUKeUtBb1IwSXJCNm1Tdm1WNi9jZ3JNUlVnSG5rNGoxVG4KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBL0E1emthYUtyZ1JoRXpKOFFaOC9CV0l2VEZZWDdwS1dqOWIzSDlMUDNkQ3BIQ1dwCkVacDZuQ0x0NkxvamVzc0pDOG5iOW1nRk1sMXFYdDkvNlRtSFpLR2NFa1dvSTlqNUl0dklhY1F6OEFXekdiVmQKeGZWVnpFRFhyVlVWWjE1ejV5ZGdBWWVYcUFtcGxaZSt6NDF1SWpIMUpILys5QlFHUHJjWDV2bG92WlJVRk5tTAozOXMvS3NpZ3hEcFJkbzI5YVVmWnVqcFo1Y2JkYTNNdzFwdHkyRUFhZkVDeUEvQ01ubHZjaHp6aU53MXlQdnZXCjBvRWl3bnJGN1lmMVVVUXBJTklZUW9JdkZOWHVNRWhpY3c2K3VlVGszT05pbjdsVXVNVEE5Q2lBZ2VBY2o5WDUKZ05hQnArcmNkM2MzTlJBRDZ3TG01UkRYMW5JRVhXMDB0RVhnUlFJREFRQUJBb0lCQVFDdThwamtqMlBTUk5mYwpLejV6amNTQVNCWHNOaFdHVWRpUEhmV0k4Um0yTEE0MHF5OGVPTXJCZEwwTXU2WEVwUWd5Vm5tR0hjejltRGtzCmdKU1huSWFOSWw3aGo5ZVVpaFRpVFFwZDN3RHRxQWNYNFBxbFhCT2hJYUNSZm16TXdwSlA3dFNFTzBqWUtWeTcKcUFlWXArRzVlbE5jY2pyVHp0SkcxVzZSc3Fzd2FWTVNGa3J1bTI4M1NTWVYwV3g0eDdZYzBlOEJ0VU9wUGQ2ZQpQUzdtQmFqSy96OEhvc0hyY0kwWXFoUjUyRUZVejJJN3RZVWx4QlovQnEvam1UNzZRbklQS0FWNFNyMXY3UDI4CnFSNXJrbjhwdGFlNkF2VFVGTnFIb1F6SnV0eVVSclhqYytJMXhCbXpOeklnbk1aOVZEU3oyTkp5eVM0MXJiaHAKVENjbC9FNUJBb0dCQVA2ZUw3MVlaaUJXSGRMZDVqR0NldEhRMlhWbm5oWWRuMGZwdklGZ2VXb3lYZzB3UmMxYwphelVHaWVaNzYyTVZYOUhUUEJaM3VEMmxpbUQvaXFaNUZTZ1V4a3JsbWhQM3hIdVBpQjljbFkzclp4c3NDQmFlCjZPSjRkZnl2d1hGcnR1V0tOR0ZuNitxQUIvTDVEL3FJWnB0dmNjbkVJYmFEYnBQeEJ5aVJQZCtUQW9HQkFQMXMKdEtFSFNWN1dndGE2NG1KWmlDcURDNHVQNkZLODQrcGFCdTZuMEFKQVJDVTEwWmsweWVpeDVKM2VDekt6QWd6RwpCWEVrRXpRbjJiVjJjb2gwT1k5bWZENUhTMlRrR2I3QWxyQkpieUtaeVprb2Q1OXZteVdrWDVTU2hPc3B4YjlMClJZa1liajJFNWdpYkMwS21rWWphbERyZVV5cFYwK2ZmNXM5dkdiZkhBb0dBTUlWaW9abWI5S09Va1YrYUEydFQKRG5MWFRlbE9GZmJWS0xZaEp5S0VZTGx5ZE43ekhseHpUYXFPVXU3Z2Y0Uk9PMFVlMlVVOVJmb3E4YXNnOS9KeApGcTlHem5VTXRLdHR0UUk0TlluR01rSmU1cTJ3YU9jbGlDcmVpU1hSSGxVaGlJS2Ewc2trQ3c4VTlMZUQxUUZ6ClZFR2JsSkNjd0JReDJWMkFPTG5jNmFNQ2dZQXMzeGFNZkpITkxRWTFweGwyZUl2WG15UnNMYXJoRzNqdmdjbEEKY0k0d0Y5ajVEYkp6QWhuT0RkR0E4bFBoUWpqV3NpazhnenF6UFdzYTZrSVZUeDd4K1I5K2wza09BQ1ZlMVJMWgpXK2RGcFIxQTlSMVpLWlc2OElQTDlyVjZQZ2M1ZThad0k5ZEJKVXJsc3k0bk92OEIwRkgycW03blRCTWROMVBTCjNzVWErUUtCZ1FEb05Cemx2blJCeU4zRnhjS2NsNFBJODJTSERnUTkzcDRiS3FQYU9HM0lvcms0d240WWNLSzMKaVRSdE1GbDZIYXBmdVJFejNmL1IwL3dDWStyZzN6TUczQXduRHFMRWU4dDV6V283Q0ZsNnpXcmpkSW1QN29kaQpVVTNXWlRMRzdOQXJ3Tjc4c3FOMVRUaFhRLzdhSWtDMEZ3NWloU1RmdENjUC8rSmJTT1lRSnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=\n---\n# Source: cilium/charts/cilium/templates/hubble/tls-helm/relay-client-secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: hubble-relay-client-certs\n namespace: kube-system\ntype: kubernetes.io/tls\ndata:\n ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU12MTBmOWQwUHZ4MGd5Z1BQa3dIV1V3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURJeE5qSXlORGN6T0ZvWERUSTRNREl4TmpJeQpORGN6T0Zvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUEvQTV6a2FhS3JnUmhFeko4UVo4L0JXSXZURllYN3BLV2o5YjNIOUxQM2RDcEhDV3AKRVpwNm5DTHQ2TG9qZXNzSkM4bmI5bWdGTWwxcVh0OS82VG1IWktHY0VrV29JOWo1SXR2SWFjUXo4QVd6R2JWZAp4ZlZWekVEWHJWVVZaMTV6NXlkZ0FZZVhxQW1wbFplK3o0MXVJakgxSkgvKzlCUUdQcmNYNXZsb3ZaUlVGTm1MCjM5cy9Lc2lneERwUmRvMjlhVWZadWpwWjVjYmRhM013MXB0eTJFQWFmRUN5QS9DTW5sdmNoenppTncxeVB2dlcKMG9FaXduckY3WWYxVVVRcElOSVlRb0l2Rk5YdU1FaGljdzYrdWVUazNPTmluN2xVdU1UQTlDaUFnZUFjajlYNQpnTmFCcCtyY2QzYzNOUkFENndMbTVSRFgxbklFWFcwMHRFWGdSUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGUHFmSkhoN1JoS2Z6QmxFM0lGUXhaVXJOMkZwTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRRFVRK0sydzRORkdld2lUSkYraGtqVkprUnRCMDdPVWdER00ramF4bnMrbFNWeldaaXp1NDZ2ClRENnFvMFJ1NVQvV3hHRkhaMkZ3Y2dmcHVRdkdQVjQwYTN2TUVzcXNPTUlheGpyc05sNzRhdS9leU5paWI0QXoKZDRUWmRWZm15WDJBRWc4ckZmdE13YXIySWlkSURxYmhXU1l5ZnVkU1ErQzhqd0xWY2JGSEs0MEIwbGEyUmxmVwoxMDRTbUVWTjNSOXdiTVRraDdHTjRRV1dvSVN0dE8xdC9zbFE1bi9CTS9iSW9ha21GcVIxL3lZNmYrZXhhb1hyCnlybnVGU0szSnZNOE9vTVY3KzVYcThHWHlGenU4UjV6djhldVpqYTVuVEhHSm1iaFVtZWdoeE1ROGlVNTMwVFUKeUtBb1IwSXJCNm1Tdm1WNi9jZ3JNUlVnSG5rNGoxVG4KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTVENDQWpHZ0F3SUJBZ0lSQUxFek1PRVErUVMrM1lCSVNxUUZGbk13RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURJeE5qSXlORGN6T1ZvWERUSTJNREl4TmpJeQpORGN6T1Zvd0l6RWhNQjhHQTFVRUF3d1lLaTVvZFdKaWJHVXRjbVZzWVhrdVkybHNhWFZ0TG1sdk1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXhTMWlYdExBS2I5M3VCMTM3U2pVR1JIN2xSSm0KS0JpRENQV0x1UkdVR2d2MTRkU2hxZExLS09ydGF3V2d6MEJCd25KdURCeC9sWUFUZzRabXpuUWx1Z0pjOW1laApLN2Nia096Tk01cFduSlJKQzZmUytKWG9oS3BkMTNBMFE0Q2lOZ0FVUGh3UGd5bzZqV3hFWWYwTUlqQW84ZU4wCkt5dytqdDdLV2VocFkyd1lTRW5LRjBxaGZmM2syUjY0UmtLMCtvTVBrZTBzNnBBZXVhTkF2d1pkMFFFTXVwWmgKUE00YW9iMG1UTmdLci9GQUFxSWhZa1FVb21OL3RaQ1dLTmgyeUlKUW9FSjhqZlFpMHBCYlY3VUpRU1NWdlM3aQo0akU1UmFWYXVpYlQwVndRUVp1ZFJlZWp3bUlFeFQ4VVYwdjAraHBGdmJvblNqNzI2aE8wakpWYkN3SURBUUFCCm80R0dNSUdETUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUIKQlFVSEF3SXdEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCVDZueVI0ZTBZU244d1pSTnlCVU1XVgpLemRoYVRBakJnTlZIUkVFSERBYWdoZ3FMbWgxWW1Kc1pTMXlaV3hoZVM1amFXeHBkVzB1YVc4d0RRWUpLb1pJCmh2Y05BUUVMQlFBRGdnRUJBQjRudVBzQXR4OGpLTGdkZXlmSDdnTndZRVJldGJuK1Naa0tHUG5GOHJGdG9aY3MKSnFpamFIR2dyNStGTklpcG5jSlRJeDZ6Tkw5eUpSYW1LR2FTbHFPOExpc0hCcDBTenJWNEx2TVdDVEd5QUwzbwp4Tk5VRFVsU29vSVVIcHdoZldvVDNQYUZkNndDSGp5dTVOM09TTjQ1K2xtblVrSTVxcHdZcTY4UVJvRldUUkxTCm0zcENKd2NpcW00VCtrcmE2SmtwVWRlWWUzUVFvS1RKU29jV2Q5QmxJNThsVjZ4Mk5DYkF3UkIwTmJ2c2poam8KUjkxMlNlZkt1U0VXemdoZUJwcWxNUlhPdDlQSEppZDMxQk4wZTlXYnlHVUJaS0hnSjVRSWxPMGZVbGJjZm9JSwpqc3NIcGVEVUhtZExsRzFwbjNIZFJSNXI3b3dRRTBhVWRReCtTRVE9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K\n tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFMxaVh0TEFLYjkzdUIxMzdTalVHUkg3bFJKbUtCaURDUFdMdVJHVUdndjE0ZFNoCnFkTEtLT3J0YXdXZ3owQkJ3bkp1REJ4L2xZQVRnNFptem5RbHVnSmM5bWVoSzdjYmtPek5NNXBXbkpSSkM2ZlMKK0pYb2hLcGQxM0EwUTRDaU5nQVVQaHdQZ3lvNmpXeEVZZjBNSWpBbzhlTjBLeXcranQ3S1dlaHBZMndZU0VuSwpGMHFoZmYzazJSNjRSa0swK29NUGtlMHM2cEFldWFOQXZ3WmQwUUVNdXBaaFBNNGFvYjBtVE5nS3IvRkFBcUloCllrUVVvbU4vdFpDV0tOaDJ5SUpRb0VKOGpmUWkwcEJiVjdVSlFTU1Z2UzdpNGpFNVJhVmF1aWJUMFZ3UVFadWQKUmVlandtSUV4VDhVVjB2MCtocEZ2Ym9uU2o3MjZoTzBqSlZiQ3dJREFRQUJBb0lCQVFDZDRtcGJYVXdMNGNtKwpQckp4ejNpT0hvMGgzOXEzRG5HV01lL1FvQmZFQjRHNmMrV0RNMHMzR201RGZQMDRGczdSSVltbTN3YjJNZGp1ClB5ZUE5eFhpYk5QOVpodHpBcXJyRUFKNzZJQ2oza1RqTlYzRU9WMDNKYWdBdzFMR0huRkU1aWxQVm03QmE0WDUKQ2hGem1Ra25Nd01HTlVnbTdjV2w1eFdHaW9nZzE5eUJaNWxjUjU5ZnBqTm95VGxuM2VrNlUwaUMwSFkySmpkeAp1ZGNmNXBsNEQrWUtiTnpHMGswQ242RjZsNnQ0WHdKcnA1WW1tQjVQelV3RThraS9SR2FvN2w3TUl0d0k0cmhJCnpKVmN6THdOS0dNNVpJUEtZRFJ3SWQ4V3pJSmNGMmwyTTdGZ2RqWjRtb2R5cHl6N09rclRPZmdhcjViT2ZZQVQKb0dxUzBxQ0pBb0dCQU40VXc1eStYVjdrVE5TelVoSERCTWVCRWNlZlBqMnFYTzJpQmNHbHNldXNVajlYK3dmegpjaVdyOEpUREdTUm1icnU4blN5YnhNckxHd1ZnK0psRXFwQW9pZzAwakVVRSt1bWZ2UnFJdXl6V2UxQU1WYy8zCjdsamdUSHB4YnhGTUF2Tmw3MlJUN3Z3dXVONTd6RVp6YytkbGNvcTUvWXcyMEQ4KzBTdU42SmJkQW9HQkFPTksKNVV2QksvOUVzdGtuZ2pUTEpGd2Ztd2tUVzhkS0YxUW9ieTZ2bEMzdWFOMjNXOFgvSzFJZCtCQXNhYTREODhtZwpIZjRnVWcvYVI4TDQ4VzI1YjBhVWVLNkpYbXA4VFFORUhwa2Z0ajBFajRESE4ySzlYSE1jSlhlYlJCV01EcEcxCjUyc0tQUFRWOVF2Y01MMGxSODkyWUYwbHdudFR6aTcrQkxFTXN2Y0hBb0dBVTVabkplajB4ZEdTSllDMUJDQnQKL1JWVWR0azJHUW0xWG0vY1VNMm4wUGVVSVdsZy85WWVnWlZjd1BGSjFreURvei9Udk1XSVpYZmEzTVJwN1c1Tgo4R0tCeTRTYXd5T2p1U2s1aVBJcjJTMkZUbFRHTzdlcXkyTHBCakVKeCszZk1uT01qaXVWU08vZy8wU0RWNFQrCisvcFFDVzBGeEsrZzVCMGxwNG9MSURVQ2dZRUFvR05yWm9lTHFuZ3AxSDdDck1wa2YvVjFpM1B6b2Erd3dwdzkKeUszeFhIQU40OWJObGV3My81eFNQZWQ1TTJocWtXN0JlMkFIU0ZSNXF5WE1CK2VBbEVlVTJXVHFFUzVoa2RweAoxUS9CbzFFaWluY0pxRTJEZlY3eE9YT0JHY3RaUkkwZUhqUjlTeS9uMlZTcHQrOGt1MHNIT1BPNlJ4aWVPQjdiCjVLdlhaZVVDZ1lBUnRQeksvYUNFV3NDdHpreGlGTFg2bTJLWENrdWEwVUZTSmNtT2N0S1N6aGlTMy9TRGF2Y0QKNmdVL08weXBMQ1FxVDdWU2tMaElRbUFNSS8vaFNoR0p4b25LT2NicDJzckEvelhoai8rMkVGT0R0enV4RHJZRApsakxDLytyVXBWOU56ci96R1NwOWU3ellramxQRHBLUjdpVm1KSkQ3RXE0VUpkMS9GYi9jbWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=\n---\n# Source: cilium/charts/cilium/templates/hubble/tls-helm/server-secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: hubble-server-certs\n namespace: kube-system\ntype: kubernetes.io/tls\ndata:\n ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU12MTBmOWQwUHZ4MGd5Z1BQa3dIV1V3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURJeE5qSXlORGN6T0ZvWERUSTRNREl4TmpJeQpORGN6T0Zvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUEvQTV6a2FhS3JnUmhFeko4UVo4L0JXSXZURllYN3BLV2o5YjNIOUxQM2RDcEhDV3AKRVpwNm5DTHQ2TG9qZXNzSkM4bmI5bWdGTWwxcVh0OS82VG1IWktHY0VrV29JOWo1SXR2SWFjUXo4QVd6R2JWZAp4ZlZWekVEWHJWVVZaMTV6NXlkZ0FZZVhxQW1wbFplK3o0MXVJakgxSkgvKzlCUUdQcmNYNXZsb3ZaUlVGTm1MCjM5cy9Lc2lneERwUmRvMjlhVWZadWpwWjVjYmRhM013MXB0eTJFQWFmRUN5QS9DTW5sdmNoenppTncxeVB2dlcKMG9FaXduckY3WWYxVVVRcElOSVlRb0l2Rk5YdU1FaGljdzYrdWVUazNPTmluN2xVdU1UQTlDaUFnZUFjajlYNQpnTmFCcCtyY2QzYzNOUkFENndMbTVSRFgxbklFWFcwMHRFWGdSUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGUHFmSkhoN1JoS2Z6QmxFM0lGUXhaVXJOMkZwTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRRFVRK0sydzRORkdld2lUSkYraGtqVkprUnRCMDdPVWdER00ramF4bnMrbFNWeldaaXp1NDZ2ClRENnFvMFJ1NVQvV3hHRkhaMkZ3Y2dmcHVRdkdQVjQwYTN2TUVzcXNPTUlheGpyc05sNzRhdS9leU5paWI0QXoKZDRUWmRWZm15WDJBRWc4ckZmdE13YXIySWlkSURxYmhXU1l5ZnVkU1ErQzhqd0xWY2JGSEs0MEIwbGEyUmxmVwoxMDRTbUVWTjNSOXdiTVRraDdHTjRRV1dvSVN0dE8xdC9zbFE1bi9CTS9iSW9ha21GcVIxL3lZNmYrZXhhb1hyCnlybnVGU0szSnZNOE9vTVY3KzVYcThHWHlGenU4UjV6djhldVpqYTVuVEhHSm1iaFVtZWdoeE1ROGlVNTMwVFUKeUtBb1IwSXJCNm1Tdm1WNi9jZ3JNUlVnSG5rNGoxVG4KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURWakNDQWo2Z0F3SUJBZ0lRUFByNEJHME9teWV4aVZsZjM1Nnc5ekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV3TWpFMk1qSTBOek01V2hjTk1qWXdNakUyTWpJMApOek01V2pBcU1TZ3dKZ1lEVlFRRERCOHFMbVJsWm1GMWJIUXVhSFZpWW14bExXZHljR011WTJsc2FYVnRMbWx2Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBczRQNGJxZEFGeXczaHlGOG5ubncKTW1HQlVtWmJNeTIycXZ2dUdrR2ZuS2ZpbjV4d1BxNlQwOE9DK1VzZWt0NkhjTjExSXQ5azFGZXdPVVBMWXZUbgpCVGlUdGhKY1owMi81akdESmhmcDAzTElXYTVPUGkya28zdDhrZVVyVVNhVmJVWGtGdHBKbERSSEs4eXdMOHpsCndiUGVLZ2dodWpqeFc1a2ozQkVMaDczajN5OE9hNzFWaEFJZ2JSclVHQ093aERrdU5jdkhrS0ZsTGNTN3A0aEYKUkhFTCticFhFSjZ1WDlWU2tHTEJqT2ZUMW9qM0wxVDAwbENRNDA3QlJ5Z2E3dUZKNWxtZXlSa203UE5vcFkrbQo2TEQveTVQREsvVlpYLzBHekI0MWc5bVFsOGVaTERhd1RFSmpLUU9sekZqelNOZkkyZjN3bk1weWsxK0lqZ2RwCnd3SURBUUFCbzRHTk1JR0tNQTRHQTFVZER3RUIvd1FFQXdJRm9EQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0QKQVFZSUt3WUJCUVVIQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUNm55UjRlMFlTbjh3WgpSTnlCVU1XVkt6ZGhhVEFxQmdOVkhSRUVJekFoZ2g4cUxtUmxabUYxYkhRdWFIVmlZbXhsTFdkeWNHTXVZMmxzCmFYVnRMbWx2TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCN1ZBQmZveFlTN2tRNXgrQ3pjZENCQmJhczBxRFcKVWhhL3JLLyt6cjRXT0ZSam1QYllxa2k5akpCTDhwZ2JIMWI1TS9WS1VWTG5OOG5kRHpHd1BtOWNJTitGRUhOTwpLQ1Q2aEFOYWU5cHpET1ZDeHNJZzhMTjhLQmhSYkE2UStDZUxVNnNCN2FQMUR6VU5FNzRCakE5RTlWdUtNS2RJCjh2VGRaSFdIaHBHUm9NK3BlMXpLZkxCN1lDbXQzQmhERTExZmlvQ3EycnJKeDlsdEpiUDhmK1pMSmVOekVSNVkKaG9nSmFqTnpnRExQakdsV2l6RDJZTHhyYUJRd3pEN3hBTktmdStoOUFGYWVPQjg4UGJRTUJXMEU4M1pKcHlHdwp4U1NxMUt1N2VZTHRhbDJ2UWNTWGpxczJOMTY2V1V3cWxJWlZ3ZGhUWEU0aDVNdExVcmZHTlZROQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==\n tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb1FJQkFBS0NBUUVBczRQNGJxZEFGeXczaHlGOG5ubndNbUdCVW1aYk15MjJxdnZ1R2tHZm5LZmluNXh3ClBxNlQwOE9DK1VzZWt0NkhjTjExSXQ5azFGZXdPVVBMWXZUbkJUaVR0aEpjWjAyLzVqR0RKaGZwMDNMSVdhNU8KUGkya28zdDhrZVVyVVNhVmJVWGtGdHBKbERSSEs4eXdMOHpsd2JQZUtnZ2h1amp4VzVrajNCRUxoNzNqM3k4TwphNzFWaEFJZ2JSclVHQ093aERrdU5jdkhrS0ZsTGNTN3A0aEZSSEVMK2JwWEVKNnVYOVZTa0dMQmpPZlQxb2ozCkwxVDAwbENRNDA3QlJ5Z2E3dUZKNWxtZXlSa203UE5vcFkrbTZMRC95NVBESy9WWlgvMEd6QjQxZzltUWw4ZVoKTERhd1RFSmpLUU9sekZqelNOZkkyZjN3bk1weWsxK0lqZ2Rwd3dJREFRQUJBb0lCQURlR2QxNUtqekZidE9VdwpQMnFKVHZsV1hYak9QaUZMWDNmdHFzMG1SU2RnYW1GeU11bkFkK3NGUzBxdXFJdEhGMSthc0RPVUk0em1mcFNICjJxNEdFRHIyTUFJZUZjVENNTHdUem53RzQ0Vkt1SnU4cFdqb0YwZVYxNGNNK21RRWFpMjljM3A2RkU0cUNXV0wKejJkSm1qdGExam5yLy9xb3lFYkx6R0t5UnYyNkFvMWhES3lsMkQwbWQ4K0FPM0xybGNYeTRsaFkyQTAwbjNJTApOOStXTGNSWm1LeXZRMnhxTHp3VFROSEk4bUFMeFg3WGVYY3ZESUt6QzhIT3BJRGVxM2pwakNJelk4TWwzVW8yCjlrMmJrNVV0Z0xCcVB6N2s5RlBMb21pVW1MNzVSeU5nd2h1ZHROZktEUVVwWUR1V0NlMHJrV3BnWGhsZzRRVmQKZVRvNkJwRUNnWUVBNVBZNjdhT3BNL0k4WGQ4VVE0Yk84L3FObis0a2ZoL3J6MHRERUZibjlmSEczQnJjK2FnQQpXY1Vqc0ZINkkrQ1hvOUZ4TFAxZnFrUFBlNjIxNGdNa2RXRkNtMkNodUtXRjdOczY2eml4eGdNTlhoeFZCaFFMCi80NW9ja3Nya1NYYmdxYWZ5VVZweS81MStESXlTN2dFeDNwcFllTml3dnhscEdLMDlKRGJPcnNDZ1lFQXlMYnMKVENjbnFDU1B5TjZmWXkxMkhsUTkzc2s4YXBDd2s0UElFNFN1aWNNL0pQRnExUXZOR1lLdC9EK3NxOEVmcjVPUwpnMjFlM0NJSEczemVsNTJXdEtabHdBcXhuM1lDTGJyVXJ3WTRnRDBiRUlkOWU3TUVVc1poQ2dBRkFCVmdSSkQyCmNJKzZBWDdnSy9zeDY0QmRqRU5iRmRMT1dDMHJER3dRK21tMjhKa0NnWUVBZ2VId1VjaEtmczVHVWxYdXo5T1UKYUxEVVVFb3VaMkhDU2loK2xiRkhDcXlweTBodDNDM2liUW81RVplZlYyVGJnTEdhQ3hSOGhVRE1YWE9RUHZMYgpacjdTb3dPeEdlYkR6ellaK1QrREVUbkFCb2Y1NzJYRjNLV3pPRXJPSHh1anB6elFYY1BEblZYdkkrbU53SWhXCkpiYWFPMDViUXhNY3NhRFExLytvYldzQ2Z4OWNkWTl3anNHMjlNQnVDSGwyb1A0Y0s0Nmo0c3pEenRNOXhBVkkKdGZ6UWhRMUFxM0NlWjIyVS9acHNXSTU5bm5YUW1SUHdBdUppYnJuY3lPcm50S1NGVzJDbVN4cWZWYmZQZVd2ZgpqUFI4bDczOWJaM21Xc2FVT0pWWGEzQ1g1bkxRTTdGaXc2MTRKWTY1c1JNYSs0THZBMHhsbzlnak5wYTZTK3pVCmw4RUNnWUFZanRUeGkrb21sazVZcE01aHZDdHpJL0ZZOStWY2pLS1hIOVo1OWRtU0JJN0twNlFkQU15NHVTSE8KN3l3S0ppTlhkRTkxbzdzZkQ5UE5mUU1HbUVVeE91eTBJWTN4UGxZVGdqbmxhTnRjRE5tQlZFUUM2SEFyUG9segp3V0VtS0t5NmJ0SzZGZW9HVmljOXR6NnRrdEJGMENRUk5FSURScEtOQUl5T01PTzhOUT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n---\n# Source: cilium/charts/cilium/templates/cilium-configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cilium-config\n namespace: kube-system\ndata:\n\n # Identity allocation mode selects how identities are shared between cilium\n # nodes by setting how they are stored. The options are \"crd\", \"kvstore\" or\n # \"doublewrite-readkvstore\" / \"doublewrite-readcrd\".\n # - \"crd\" stores identities in kubernetes as CRDs (custom resource definition).\n # These can be queried with:\n # kubectl get ciliumid\n # - \"kvstore\" stores identities in an etcd kvstore, that is\n # configured below. Cilium versions before 1.6 supported only the kvstore\n # backend. Upgrades from these older cilium versions should continue using\n # the kvstore by commenting out the identity-allocation-mode below, or\n # setting it to \"kvstore\".\n # - \"doublewrite\" modes store identities in both the kvstore and CRDs. This is useful\n # for seamless migrations from the kvstore mode to the crd mode. Consult the\n # documentation for more information on how to perform the migration.\n identity-allocation-mode: crd\n\n identity-heartbeat-timeout: \"30m0s\"\n identity-gc-interval: \"15m0s\"\n cilium-endpoint-gc-interval: \"5m0s\"\n nodes-gc-interval: \"5m0s\"\n\n # If you want to run cilium in debug mode change this value to true\n debug: \"false\"\n # The agent can be put into the following three policy enforcement modes\n # default, always and never.\n # https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes\n enable-policy: \"default\"\n # If you want metrics enabled in cilium-operator, set the port for\n # which the Cilium Operator will have their metrics exposed.\n # NOTE that this will open the port on the nodes where Cilium operator pod\n # is scheduled.\n operator-prometheus-serve-addr: \":9963\"\n enable-metrics: \"true\"\n enable-envoy-config: \"true\"\n envoy-config-retry-interval: \"15s\"\n enable-gateway-api: \"true\"\n enable-gateway-api-secrets-sync: \"true\"\n enable-gateway-api-proxy-protocol: \"false\"\n enable-gateway-api-app-protocol: \"true\"\n enable-gateway-api-alpn: \"true\"\n gateway-api-xff-num-trusted-hops: \"0\"\n gateway-api-service-externaltrafficpolicy: \"Cluster\"\n gateway-api-secrets-namespace: \"cilium-secrets\"\n gateway-api-hostnetwork-enabled: \"false\"\n gateway-api-hostnetwork-nodelabelselector: \"\"\n enable-policy-secrets-sync: \"true\"\n policy-secrets-only-from-secrets-namespace: \"true\"\n policy-secrets-namespace: \"cilium-secrets\"\n\n # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4\n # address.\n enable-ipv4: \"true\"\n\n # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6\n # address.\n enable-ipv6: \"false\"\n # Users who wish to specify their own custom CNI configuration file must set\n # custom-cni-conf to \"true\", otherwise Cilium may overwrite the configuration.\n custom-cni-conf: \"false\"\n enable-bpf-clock-probe: \"false\"\n # If you want cilium monitor to aggregate tracing for packets, set this level\n # to \"low\", \"medium\", or \"maximum\". The higher the level, the less packets\n # that will be seen in monitor output.\n monitor-aggregation: medium\n\n # The monitor aggregation interval governs the typical time between monitor\n # notification events for each allowed connection.\n #\n # Only effective when monitor aggregation is set to \"medium\" or higher.\n monitor-aggregation-interval: \"5s\"\n\n # The monitor aggregation flags determine which TCP flags which, upon the\n # first observation, cause monitor notifications to be generated.\n #\n # Only effective when monitor aggregation is set to \"medium\" or higher.\n monitor-aggregation-flags: all\n # Specifies the ratio (0.0-1.0] of total system memory to use for dynamic\n # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.\n bpf-map-dynamic-size-ratio: \"0.0025\"\n # bpf-policy-map-max specifies the maximum number of entries in endpoint\n # policy map (per endpoint)\n bpf-policy-map-max: \"16384\"\n # bpf-lb-map-max specifies the maximum number of entries in bpf lb service,\n # backend and affinity maps.\n bpf-lb-map-max: \"65536\"\n bpf-lb-external-clusterip: \"false\"\n bpf-lb-source-range-all-types: \"false\"\n bpf-lb-algorithm-annotation: \"false\"\n bpf-lb-mode-annotation: \"false\"\n\n bpf-events-drop-enabled: \"true\"\n bpf-events-policy-verdict-enabled: \"true\"\n bpf-events-trace-enabled: \"true\"\n\n # Pre-allocation of map entries allows per-packet latency to be reduced, at\n # the expense of up-front memory allocation for the entries in the maps. The\n # default value below will minimize memory usage in the default installation;\n # users who are sensitive to latency may consider setting this to \"true\".\n #\n # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore\n # this option and behave as though it is set to \"true\".\n #\n # If this value is modified, then during the next Cilium startup the restore\n # of existing endpoints and tracking of ongoing connections may be disrupted.\n # As a result, reply packets may be dropped and the load-balancing decisions\n # for established connections may change.\n #\n # If this option is set to \"false\" during an upgrade from 1.3 or earlier to\n # 1.4 or later, then it may cause one-time disruptions during the upgrade.\n preallocate-bpf-maps: \"false\"\n\n # Name of the cluster. Only relevant when building a mesh of clusters.\n cluster-name: default\n # Unique ID of the cluster. Must be unique across all conneted clusters and\n # in the range of 1 and 255. Only relevant when building a mesh of clusters.\n cluster-id: \"0\"\n\n # Encapsulation mode for communication between nodes\n # Possible values:\n # - disabled\n # - vxlan (default)\n # - geneve\n\n routing-mode: \"tunnel\"\n tunnel-protocol: \"vxlan\"\n service-no-backend-response: \"reject\"\n\n\n # Enables L7 proxy for L7 policy enforcement and visibility\n enable-l7-proxy: \"true\"\n\n enable-ipv4-masquerade: \"true\"\n enable-ipv4-big-tcp: \"false\"\n enable-ipv6-big-tcp: \"false\"\n enable-ipv6-masquerade: \"true\"\n enable-tcx: \"true\"\n datapath-mode: \"veth\"\n enable-masquerade-to-route-source: \"false\"\n\n enable-xt-socket-fallback: \"true\"\n install-no-conntrack-iptables-rules: \"false\"\n iptables-random-fully: \"false\"\n\n auto-direct-node-routes: \"false\"\n direct-routing-skip-unreachable: \"false\"\n enable-local-redirect-policy: \"false\"\n enable-runtime-device-detection: \"true\"\n\n kube-proxy-replacement: \"true\"\n kube-proxy-replacement-healthz-bind-address: \"\"\n bpf-lb-sock: \"false\"\n enable-health-check-nodeport: \"true\"\n enable-health-check-loadbalancer-ip: \"false\"\n node-port-bind-protection: \"true\"\n enable-auto-protect-node-port-range: \"true\"\n bpf-lb-acceleration: \"disabled\"\n enable-experimental-lb: \"false\"\n enable-svc-source-range-check: \"true\"\n enable-l2-neigh-discovery: \"true\"\n arping-refresh-period: \"30s\"\n k8s-require-ipv4-pod-cidr: \"false\"\n k8s-require-ipv6-pod-cidr: \"false\"\n enable-k8s-networkpolicy: \"true\"\n enable-endpoint-lockdown-on-policy-overflow: \"false\"\n # Tell the agent to generate and write a CNI configuration file\n write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist\n cni-exclusive: \"true\"\n cni-log-file: \"/var/run/cilium/cilium-cni.log\"\n enable-endpoint-health-checking: \"true\"\n enable-health-checking: \"true\"\n health-check-icmp-failure-threshold: \"3\"\n enable-well-known-identities: \"false\"\n enable-node-selector-labels: \"false\"\n synchronize-k8s-nodes: \"true\"\n operator-api-serve-addr: \"127.0.0.1:9234\"\n # Enable Hubble gRPC service.\n enable-hubble: \"true\"\n # UNIX domain socket for Hubble server to listen to.\n hubble-socket-path: \"/var/run/cilium/hubble.sock\"\n hubble-export-file-max-size-mb: \"10\"\n hubble-export-file-max-backups: \"5\"\n # An additional address for Hubble server to listen to (e.g. \":4244\").\n hubble-listen-address: \":4244\"\n hubble-disable-tls: \"false\"\n hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt\n hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key\n hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt\n ipam: \"kubernetes\"\n ipam-multi-pool-pre-allocation: \n ipam-cilium-node-update-rate: \"15s\"\n\n default-lb-service-ipam: \"lbipam\"\n egress-gateway-reconciliation-trigger-interval: \"1s\"\n enable-vtep: \"false\"\n vtep-endpoint: \"\"\n vtep-cidr: \"\"\n vtep-mask: \"\"\n vtep-mac: \"\"\n enable-bgp-control-plane: \"true\"\n bgp-secrets-namespace: \"kube-system\"\n enable-bgp-control-plane-status-report: \"true\"\n procfs: \"/host/proc\"\n bpf-root: \"/sys/fs/bpf\"\n cgroup-root: \"/sys/fs/cgroup\"\n enable-k8s-terminating-endpoint: \"true\"\n enable-sctp: \"false\"\n remove-cilium-node-taints: \"true\"\n set-cilium-node-taints: \"true\"\n set-cilium-is-up-condition: \"true\"\n unmanaged-pod-watcher-interval: \"15\"\n # default DNS proxy to transparent mode in non-chaining modes\n dnsproxy-enable-transparent-mode: \"true\"\n dnsproxy-socket-linger-timeout: \"10\"\n tofqdns-dns-reject-response-code: \"refused\"\n tofqdns-enable-dns-compression: \"true\"\n tofqdns-endpoint-max-ip-per-hostname: \"1000\"\n tofqdns-idle-connection-grace-period: \"0s\"\n tofqdns-max-deferred-connection-deletes: \"10000\"\n tofqdns-proxy-response-max-delay: \"100ms\"\n agent-not-ready-taint-key: \"node.cilium.io/agent-not-ready\"\n\n mesh-auth-enabled: \"true\"\n mesh-auth-queue-size: \"1024\"\n mesh-auth-rotated-identities-queue-size: \"1024\"\n mesh-auth-gc-interval: \"5m0s\"\n\n proxy-xff-num-trusted-hops-ingress: \"0\"\n proxy-xff-num-trusted-hops-egress: \"0\"\n proxy-connect-timeout: \"2\"\n proxy-initial-fetch-timeout: \"30\"\n proxy-max-requests-per-connection: \"0\"\n proxy-max-connection-duration-seconds: \"0\"\n proxy-idle-timeout-seconds: \"60\"\n proxy-max-concurrent-retries: \"128\"\n http-retry-count: \"3\"\n\n external-envoy-proxy: \"true\"\n envoy-base-id: \"0\"\n envoy-access-log-buffer-size: \"4096\"\n envoy-keep-cap-netbindservice: \"false\"\n max-connected-clusters: \"255\"\n clustermesh-enable-endpoint-sync: \"false\"\n clustermesh-enable-mcs-api: \"false\"\n\n nat-map-stats-entries: \"32\"\n nat-map-stats-interval: \"30s\"\n enable-internal-traffic-policy: \"true\"\n enable-lb-ipam: \"true\"\n enable-non-default-deny-policies: \"true\"\n enable-source-ip-verification: \"true\"\n\n# Extra config allows adding arbitrary properties to the cilium config.\n# By putting it at the end of the ConfigMap, it's also possible to override existing properties.\n---\n# Source: cilium/charts/cilium/templates/cilium-envoy/configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cilium-envoy-config\n namespace: kube-system\ndata:\n # Keep the key name as bootstrap-config.json to avoid breaking changes\n bootstrap-config.json: |\n {\"admin\":{\"address\":{\"pipe\":{\"path\":\"/var/run/cilium/envoy/sockets/admin.sock\"}}},\"applicationLogConfig\":{\"logFormat\":{\"textFormat\":\"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v\"}},\"bootstrapExtensions\":[{\"name\":\"envoy.bootstrap.internal_listener\",\"typedConfig\":{\"@type\":\"type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener\"}}],\"dynamicResources\":{\"cdsConfig\":{\"apiConfigSource\":{\"apiType\":\"GRPC\",\"grpcServices\":[{\"envoyGrpc\":{\"clusterName\":\"xds-grpc-cilium\"}}],\"setNodeOnFirstMessageOnly\":true,\"transportApiVersion\":\"V3\"},\"initialFetchTimeout\":\"30s\",\"resourceApiVersion\":\"V3\"},\"ldsConfig\":{\"apiConfigSource\":{\"apiType\":\"GRPC\",\"grpcServices\":[{\"envoyGrpc\":{\"clusterName\":\"xds-grpc-cilium\"}}],\"setNodeOnFirstMessageOnly\":true,\"transportApiVersion\":\"V3\"},\"initialFetchTimeout\":\"30s\",\"resourceApiVersion\":\"V3\"}},\"node\":{\"cluster\":\"ingress-cluster\",\"id\":\"host~127.0.0.1~no-id~localdomain\"},\"overloadManager\":{\"resourceMonitors\":[{\"name\":\"envoy.resource_monitors.global_downstream_max_connections\",\"typedConfig\":{\"@type\":\"type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig\",\"max_active_downstream_connections\":\"50000\"}}]},\"staticResources\":{\"clusters\":[{\"circuitBreakers\":{\"thresholds\":[{\"maxRetries\":128}]},\"cleanupInterval\":\"2.500s\",\"connectTimeout\":\"2s\",\"lbPolicy\":\"CLUSTER_PROVIDED\",\"name\":\"ingress-cluster\",\"type\":\"ORIGINAL_DST\",\"typedExtensionProtocolOptions\":{\"envoy.extensions.upstreams.http.v3.HttpProtocolOptions\":{\"@type\":\"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions\",\"commonHttpProtocolOptions\":{\"idleTimeout\":\"60s\",\"maxConnectionDuration\":\"0s\",\"maxRequestsPerConnection\":0},\"useDownstreamProtocolConfig\":{}}}},{\"circuitBreakers\":{\"thresholds\":[{\"maxRetries\":128}]},\"cleanupInterval\":\"2.500s\",\"connectTimeout\":\"2s\",\"lbPolicy\":\"CLUSTER_PROVIDED\",\"name\":\"egress-cluster-tls\",\"transportSocket\":{\"name\":\"cilium.tls_wrapper\",\"typedConfig\":{\"@type\":\"type.googleapis.com/cilium.UpstreamTlsWrapperContext\"}},\"type\":\"ORIGINAL_DST\",\"typedExtensionProtocolOptions\":{\"envoy.extensions.upstreams.http.v3.HttpProtocolOptions\":{\"@type\":\"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions\",\"commonHttpProtocolOptions\":{\"idleTimeout\":\"60s\",\"maxConnectionDuration\":\"0s\",\"maxRequestsPerConnection\":0},\"upstreamHttpProtocolOptions\":{},\"useDownstreamProtocolConfig\":{}}}},{\"circuitBreakers\":{\"thresholds\":[{\"maxRetries\":128}]},\"cleanupInterval\":\"2.500s\",\"connectTimeout\":\"2s\",\"lbPolicy\":\"CLUSTER_PROVIDED\",\"name\":\"egress-cluster\",\"type\":\"ORIGINAL_DST\",\"typedExtensionProtocolOptions\":{\"envoy.extensions.upstreams.http.v3.HttpProtocolOptions\":{\"@type\":\"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions\",\"commonHttpProtocolOptions\":{\"idleTimeout\":\"60s\",\"maxConnectionDuration\":\"0s\",\"maxRequestsPerConnection\":0},\"useDownstreamProtocolConfig\":{}}}},{\"circuitBreakers\":{\"thresholds\":[{\"maxRetries\":128}]},\"cleanupInterval\":\"2.500s\",\"connectTimeout\":\"2s\",\"lbPolicy\":\"CLUSTER_PROVIDED\",\"name\":\"ingress-cluster-tls\",\"transportSocket\":{\"name\":\"cilium.tls_wrapper\",\"typedConfig\":{\"@type\":\"type.googleapis.com/cilium.UpstreamTlsWrapperContext\"}},\"type\":\"ORIGINAL_DST\",\"typedExtensionProtocolOptions\":{\"envoy.extensions.upstreams.http.v3.HttpProtocolOptions\":{\"@type\":\"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions\",\"commonHttpProtocolOptions\":{\"idleTimeout\":\"60s\",\"maxConnectionDuration\":\"0s\",\"maxRequestsPerConnection\":0},\"upstreamHttpProtocolOptions\":{},\"useDownstreamProtocolConfig\":{}}}},{\"connectTimeout\":\"2s\",\"loadAssignment\":{\"clusterName\":\"xds-grpc-cilium\",\"endpoints\":[{\"lbEndpoints\":[{\"endpoint\":{\"address\":{\"pipe\":{\"path\":\"/var/run/cilium/envoy/sockets/xds.sock\"}}}}]}]},\"name\":\"xds-grpc-cilium\",\"type\":\"STATIC\",\"typedExtensionProtocolOptions\":{\"envoy.extensions.upstreams.http.v3.HttpProtocolOptions\":{\"@type\":\"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions\",\"explicitHttpConfig\":{\"http2ProtocolOptions\":{}}}}},{\"connectTimeout\":\"2s\",\"loadAssignment\":{\"clusterName\":\"/envoy-admin\",\"endpoints\":[{\"lbEndpoints\":[{\"endpoint\":{\"address\":{\"pipe\":{\"path\":\"/var/run/cilium/envoy/sockets/admin.sock\"}}}}]}]},\"name\":\"/envoy-admin\",\"type\":\"STATIC\"}],\"listeners\":[{\"address\":{\"socketAddress\":{\"address\":\"0.0.0.0\",\"portValue\":9964}},\"filterChains\":[{\"filters\":[{\"name\":\"envoy.filters.network.http_connection_manager\",\"typedConfig\":{\"@type\":\"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\",\"httpFilters\":[{\"name\":\"envoy.filters.http.router\",\"typedConfig\":{\"@type\":\"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\"}}],\"internalAddressConfig\":{\"cidrRanges\":[{\"addressPrefix\":\"10.0.0.0\",\"prefixLen\":8},{\"addressPrefix\":\"172.16.0.0\",\"prefixLen\":12},{\"addressPrefix\":\"192.168.0.0\",\"prefixLen\":16},{\"addressPrefix\":\"127.0.0.1\",\"prefixLen\":32}]},\"routeConfig\":{\"virtualHosts\":[{\"domains\":[\"*\"],\"name\":\"prometheus_metrics_route\",\"routes\":[{\"match\":{\"prefix\":\"/metrics\"},\"name\":\"prometheus_metrics_route\",\"route\":{\"cluster\":\"/envoy-admin\",\"prefixRewrite\":\"/stats/prometheus\"}}]}]},\"statPrefix\":\"envoy-prometheus-metrics-listener\",\"streamIdleTimeout\":\"0s\"}}]}],\"name\":\"envoy-prometheus-metrics-listener\"},{\"address\":{\"socketAddress\":{\"address\":\"127.0.0.1\",\"portValue\":9878}},\"filterChains\":[{\"filters\":[{\"name\":\"envoy.filters.network.http_connection_manager\",\"typedConfig\":{\"@type\":\"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\",\"httpFilters\":[{\"name\":\"envoy.filters.http.router\",\"typedConfig\":{\"@type\":\"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\"}}],\"internalAddressConfig\":{\"cidrRanges\":[{\"addressPrefix\":\"10.0.0.0\",\"prefixLen\":8},{\"addressPrefix\":\"172.16.0.0\",\"prefixLen\":12},{\"addressPrefix\":\"192.168.0.0\",\"prefixLen\":16},{\"addressPrefix\":\"127.0.0.1\",\"prefixLen\":32}]},\"routeConfig\":{\"virtual_hosts\":[{\"domains\":[\"*\"],\"name\":\"health\",\"routes\":[{\"match\":{\"prefix\":\"/healthz\"},\"name\":\"health\",\"route\":{\"cluster\":\"/envoy-admin\",\"prefixRewrite\":\"/ready\"}}]}]},\"statPrefix\":\"envoy-health-listener\",\"streamIdleTimeout\":\"0s\"}}]}],\"name\":\"envoy-health-listener\"}]}}\n---\n# Source: cilium/charts/cilium/templates/hubble-relay/configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: hubble-relay-config\n namespace: kube-system\ndata:\n config.yaml: |\n cluster-name: default\n peer-service: \"hubble-peer.kube-system.svc.cluster.local.:443\"\n listen-address: :4245\n gops: true\n gops-port: \"9893\"\n retry-timeout: \n sort-buffer-len-max: \n sort-buffer-drain-timeout: \n tls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt\n tls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key\n tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\n \n disable-server-tls: true\n---\n# Source: cilium/charts/cilium/templates/hubble-ui/configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: hubble-ui-nginx\n namespace: kube-system\ndata:\n nginx.conf: \"server {\\n listen 8081;\\n listen [::]:8081;\\n server_name localhost;\\n root /app;\\n index index.html;\\n client_max_body_size 1G;\\n\\n location / {\\n proxy_set_header Host $host;\\n proxy_set_header X-Real-IP $remote_addr;\\n\\n location /api {\\n proxy_http_version 1.1;\\n proxy_pass_request_headers on;\\n proxy_pass http://127.0.0.1:8090;\\n }\\n location / {\\n # double `/index.html` is required here \\n try_files $uri $uri/ /index.html /index.html;\\n }\\n\\n # Liveness probe\\n location /healthz {\\n access_log off;\\n add_header Content-Type text/plain;\\n return 200 'ok';\\n }\\n }\\n}\"\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/clusterrole.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: cilium\n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - networking.k8s.io\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - discovery.k8s.io\n resources:\n - endpointslices\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - namespaces\n - services\n - pods\n - endpoints\n - nodes\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs:\n - list\n - watch\n # This is used when validating policies in preflight. This will need to stay\n # until we figure out how to avoid \"get\" inside the preflight, and then\n # should be removed ideally.\n - get\n- apiGroups:\n - cilium.io\n resources:\n - ciliumloadbalancerippools\n - ciliumbgppeeringpolicies\n - ciliumbgpnodeconfigs\n - ciliumbgpadvertisements\n - ciliumbgppeerconfigs\n - ciliumclusterwideenvoyconfigs\n - ciliumclusterwidenetworkpolicies\n - ciliumegressgatewaypolicies\n - ciliumendpoints\n - ciliumendpointslices\n - ciliumenvoyconfigs\n - ciliumidentities\n - ciliumlocalredirectpolicies\n - ciliumnetworkpolicies\n - ciliumnodes\n - ciliumnodeconfigs\n - ciliumcidrgroups\n - ciliuml2announcementpolicies\n - ciliumpodippools\n verbs:\n - list\n - watch\n- apiGroups:\n - cilium.io\n resources:\n - ciliumidentities\n - ciliumendpoints\n - ciliumnodes\n verbs:\n - create\n- apiGroups:\n - cilium.io\n # To synchronize garbage collection of such resources\n resources:\n - ciliumidentities\n verbs:\n - update\n- apiGroups:\n - cilium.io\n resources:\n - ciliumendpoints\n verbs:\n - delete\n - get\n- apiGroups:\n - cilium.io\n resources:\n - ciliumnodes\n - ciliumnodes/status\n verbs:\n - get\n - update\n- apiGroups:\n - cilium.io\n resources:\n - ciliumendpoints/status\n - ciliumendpoints\n - ciliuml2announcementpolicies/status\n - ciliumbgpnodeconfigs/status\n verbs:\n - patch\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/clusterrole.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: cilium-operator\n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - list\n - watch\n # to automatically delete [core|kube]dns pods so that are starting to being\n # managed by Cilium\n - delete\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - cilium-config\n verbs:\n # allow patching of the configmap to set annotations\n - patch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n # To remove node taints\n - nodes\n # To set NetworkUnavailable false on startup\n - nodes/status\n verbs:\n - patch\n- apiGroups:\n - discovery.k8s.io\n resources:\n - endpointslices\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n # to perform LB IP allocation for BGP\n - services/status\n verbs:\n - update\n - patch\n- apiGroups:\n - \"\"\n resources:\n # to check apiserver connectivity\n - namespaces\n - secrets\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n # to perform the translation of a CNP that contains `ToGroup` to its endpoints\n - services\n - endpoints\n verbs:\n - get\n - list\n - watch\n - create\n - update\n - delete\n - patch\n- apiGroups:\n - cilium.io\n resources:\n - ciliumnetworkpolicies\n - ciliumclusterwidenetworkpolicies\n verbs:\n # Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'\n - create\n - update\n - deletecollection\n # To update the status of the CNPs and CCNPs\n - patch\n - get\n - list\n - watch\n- apiGroups:\n - cilium.io\n resources:\n - ciliumnetworkpolicies/status\n - ciliumclusterwidenetworkpolicies/status\n verbs:\n # Update the auto-generated CNPs and CCNPs status.\n - patch\n - update\n- apiGroups:\n - cilium.io\n resources:\n - ciliumendpoints\n - ciliumidentities\n verbs:\n # To perform garbage collection of such resources\n - delete\n - list\n - watch\n- apiGroups:\n - cilium.io\n resources:\n - ciliumidentities\n verbs:\n # To synchronize garbage collection of such resources\n - update\n- apiGroups:\n - cilium.io\n resources:\n - ciliumnodes\n verbs:\n - create\n - update\n - get\n - list\n - watch\n # To perform CiliumNode garbage collector\n - delete\n- apiGroups:\n - cilium.io\n resources:\n - ciliumnodes/status\n verbs:\n - update\n- apiGroups:\n - cilium.io\n resources:\n - ciliumendpointslices\n - ciliumenvoyconfigs\n - ciliumbgppeerconfigs\n - ciliumbgpadvertisements\n - ciliumbgpnodeconfigs\n verbs:\n - create\n - update\n - get\n - list\n - watch\n - delete\n - patch\n- apiGroups:\n - cilium.io\n resources:\n - ciliumbgpclusterconfigs/status\n - ciliumbgppeerconfigs/status\n verbs:\n - update\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs:\n - create\n - get\n - list\n - watch\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs:\n - update\n resourceNames:\n - ciliumloadbalancerippools.cilium.io\n - ciliumbgppeeringpolicies.cilium.io\n - ciliumbgpclusterconfigs.cilium.io\n - ciliumbgppeerconfigs.cilium.io\n - ciliumbgpadvertisements.cilium.io\n - ciliumbgpnodeconfigs.cilium.io\n - ciliumbgpnodeconfigoverrides.cilium.io\n - ciliumclusterwideenvoyconfigs.cilium.io\n - ciliumclusterwidenetworkpolicies.cilium.io\n - ciliumegressgatewaypolicies.cilium.io\n - ciliumendpoints.cilium.io\n - ciliumendpointslices.cilium.io\n - ciliumenvoyconfigs.cilium.io\n - ciliumexternalworkloads.cilium.io\n - ciliumidentities.cilium.io\n - ciliumlocalredirectpolicies.cilium.io\n - ciliumnetworkpolicies.cilium.io\n - ciliumnodes.cilium.io\n - ciliumnodeconfigs.cilium.io\n - ciliumcidrgroups.cilium.io\n - ciliuml2announcementpolicies.cilium.io\n - ciliumpodippools.cilium.io\n- apiGroups:\n - cilium.io\n resources:\n - ciliumloadbalancerippools\n - ciliumpodippools\n - ciliumbgppeeringpolicies\n - ciliumbgpclusterconfigs\n - ciliumbgpnodeconfigoverrides\n - ciliumbgppeerconfigs\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - cilium.io\n resources:\n - ciliumpodippools\n verbs:\n - create\n- apiGroups:\n - cilium.io\n resources:\n - ciliumloadbalancerippools/status\n verbs:\n - patch\n# For cilium-operator running in HA mode.\n#\n# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election\n# between multiple running instances.\n# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less\n# common and fewer objects in the cluster watch \"all Leases\".\n- apiGroups:\n - coordination.k8s.io\n resources:\n - leases\n verbs:\n - create\n - get\n - update\n- apiGroups:\n - gateway.networking.k8s.io\n resources:\n - gatewayclasses\n - gateways\n - tlsroutes\n - httproutes\n - grpcroutes\n - referencegrants\n - referencepolicies\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - gateway.networking.k8s.io\n resources:\n - gatewayclasses/status\n - gateways/status\n - httproutes/status\n - grpcroutes/status\n - tlsroutes/status\n verbs:\n - update\n - patch\n- apiGroups:\n - multicluster.x-k8s.io\n resources:\n - serviceimports\n verbs:\n - get\n - list\n - watch\n---\n# Source: cilium/charts/cilium/templates/hubble-ui/clusterrole.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: hubble-ui\n labels:\n app.kubernetes.io/part-of: cilium\n\nrules:\n- apiGroups:\n - networking.k8s.io\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - componentstatuses\n - endpoints\n - namespaces\n - nodes\n - pods\n - services\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - cilium.io\n resources:\n - \"*\"\n verbs:\n - get\n - list\n - watch\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/clusterrolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: cilium\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cilium\nsubjects:\n- kind: ServiceAccount\n name: \"cilium\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/clusterrolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: cilium-operator\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cilium-operator\nsubjects:\n- kind: ServiceAccount\n name: \"cilium-operator\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/hubble-ui/clusterrolebinding.yaml\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: hubble-ui\n labels:\n app.kubernetes.io/part-of: cilium\n\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: hubble-ui\nsubjects:\n- kind: ServiceAccount\n name: \"hubble-ui\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: cilium-config-agent\n namespace: kube-system\n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - get\n - list\n - watch\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: cilium-gateway-secrets\n namespace: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - get\n - list\n - watch\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: cilium-bgp-control-plane-secrets\n namespace: \"kube-system\"\n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - get\n - list\n - watch\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: cilium-tlsinterception-secrets\n namespace: \"cilium-secrets\" \n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - get\n - list\n - watch\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: cilium-operator-gateway-secrets\n namespace: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - create\n - delete\n - update\n - patch\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: cilium-operator-tlsinterception-secrets\n namespace: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\nrules:\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - create\n - delete\n - update\n - patch\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/rolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: cilium-config-agent\n namespace: kube-system\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: cilium-config-agent\nsubjects:\n - kind: ServiceAccount\n name: \"cilium\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/rolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: cilium-gateway-secrets\n namespace: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: cilium-gateway-secrets\nsubjects:\n- kind: ServiceAccount\n name: \"cilium\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/rolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: cilium-bgp-control-plane-secrets\n namespace: \"kube-system\"\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: cilium-bgp-control-plane-secrets\nsubjects:\n- kind: ServiceAccount\n name: \"cilium\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/rolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: cilium-tlsinterception-secrets\n namespace: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: cilium-tlsinterception-secrets\nsubjects:\n- kind: ServiceAccount\n name: \"cilium\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/rolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: cilium-operator-gateway-secrets\n namespace: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: cilium-operator-gateway-secrets\nsubjects:\n- kind: ServiceAccount\n name: \"cilium-operator\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/rolebinding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: cilium-operator-tlsinterception-secrets\n namespace: \"cilium-secrets\"\n labels:\n app.kubernetes.io/part-of: cilium\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: cilium-operator-tlsinterception-secrets\nsubjects:\n- kind: ServiceAccount\n name: \"cilium-operator\"\n namespace: kube-system\n---\n# Source: cilium/charts/cilium/templates/cilium-envoy/service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: cilium-envoy\n namespace: kube-system\n annotations:\n prometheus.io/scrape: \"true\"\n prometheus.io/port: \"9964\"\n labels:\n k8s-app: cilium-envoy\n app.kubernetes.io/name: cilium-envoy\n app.kubernetes.io/part-of: cilium\n io.cilium/app: proxy\nspec:\n clusterIP: None\n type: ClusterIP\n selector:\n k8s-app: cilium-envoy\n ports:\n - name: envoy-metrics\n port: 9964\n protocol: TCP\n targetPort: envoy-metrics\n---\n# Source: cilium/charts/cilium/templates/hubble-relay/service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: hubble-relay\n namespace: kube-system\n annotations:\n labels:\n k8s-app: hubble-relay\n app.kubernetes.io/name: hubble-relay\n app.kubernetes.io/part-of: cilium\n\nspec:\n type: \"ClusterIP\"\n selector:\n k8s-app: hubble-relay\n ports:\n - protocol: TCP\n port: 80\n targetPort: grpc\n---\n# Source: cilium/charts/cilium/templates/hubble-ui/service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: hubble-ui\n namespace: kube-system\n\n annotations:\n omni-kube-service-exposer.sidero.dev/label: Hubble\n omni-kube-service-exposer.sidero.dev/port: \"50080\"\n labels:\n k8s-app: hubble-ui\n app.kubernetes.io/name: hubble-ui\n app.kubernetes.io/part-of: cilium\n\nspec:\n type: \"ClusterIP\"\n selector:\n k8s-app: hubble-ui\n ports:\n - name: http\n port: 80\n targetPort: 8081\n---\n# Source: cilium/charts/cilium/templates/hubble/peer-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: hubble-peer\n namespace: kube-system\n labels:\n k8s-app: cilium\n app.kubernetes.io/part-of: cilium\n app.kubernetes.io/name: hubble-peer\n\nspec:\n selector:\n k8s-app: cilium\n ports:\n - name: peer-service\n port: 443\n protocol: TCP\n targetPort: 4244\n internalTrafficPolicy: Local\n---\n# Source: cilium/charts/cilium/templates/cilium-agent/daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: cilium\n namespace: kube-system\n labels:\n k8s-app: cilium\n app.kubernetes.io/part-of: cilium\n app.kubernetes.io/name: cilium-agent\nspec:\n selector:\n matchLabels:\n k8s-app: cilium\n updateStrategy:\n rollingUpdate:\n maxUnavailable: 2\n type: RollingUpdate\n template:\n metadata:\n annotations:\n labels:\n k8s-app: cilium\n app.kubernetes.io/name: cilium-agent\n app.kubernetes.io/part-of: cilium\n spec:\n securityContext:\n appArmorProfile:\n type: Unconfined\n containers:\n - name: cilium-agent\n image: \"quay.io/cilium/cilium:v1.17.1@sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866\"\n imagePullPolicy: IfNotPresent\n command:\n - cilium-agent\n args:\n - --config-dir=/tmp/cilium/config-map\n startupProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9879\n scheme: HTTP\n httpHeaders:\n - name: \"brief\"\n value: \"true\"\n failureThreshold: 105\n periodSeconds: 2\n successThreshold: 1\n initialDelaySeconds: 5\n livenessProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9879\n scheme: HTTP\n httpHeaders:\n - name: \"brief\"\n value: \"true\"\n periodSeconds: 30\n successThreshold: 1\n failureThreshold: 10\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9879\n scheme: HTTP\n httpHeaders:\n - name: \"brief\"\n value: \"true\"\n periodSeconds: 30\n successThreshold: 1\n failureThreshold: 3\n timeoutSeconds: 5\n env:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: CILIUM_K8S_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n - name: CILIUM_CLUSTERMESH_CONFIG\n value: /var/lib/cilium/clustermesh/\n - name: GOMEMLIMIT\n valueFrom:\n resourceFieldRef:\n resource: limits.memory\n divisor: '1'\n - name: KUBERNETES_SERVICE_HOST\n value: \"localhost\"\n - name: KUBERNETES_SERVICE_PORT\n value: \"7445\"\n lifecycle:\n postStart:\n exec:\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -o errexit\n set -o pipefail\n set -o nounset\n \n # When running in AWS ENI mode, it's likely that 'aws-node' has\n # had a chance to install SNAT iptables rules. These can result\n # in dropped traffic, so we should attempt to remove them.\n # We do it using a 'postStart' hook since this may need to run\n # for nodes which might have already been init'ed but may still\n # have dangling rules. This is safe because there are no\n # dependencies on anything that is part of the startup script\n # itself, and can be safely run multiple times per node (e.g. in\n # case of a restart).\n if [[ \"$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')\" != \"0\" ]];\n then\n echo 'Deleting iptables rules created by the AWS CNI VPC plugin'\n iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore\n fi\n echo 'Done!'\n \n preStop:\n exec:\n command:\n - /cni-uninstall.sh\n securityContext:\n seLinuxOptions:\n level: s0\n type: spc_t\n capabilities:\n add:\n - CHOWN\n - KILL\n - NET_ADMIN\n - NET_RAW\n - IPC_LOCK\n - SYS_ADMIN\n - SYS_RESOURCE\n - DAC_OVERRIDE\n - FOWNER\n - SETUID\n - SETGID\n drop:\n - ALL\n terminationMessagePolicy: FallbackToLogsOnError\n volumeMounts:\n - name: envoy-sockets\n mountPath: /var/run/cilium/envoy/sockets\n readOnly: false\n # Unprivileged containers need to mount /proc/sys/net from the host\n # to have write access\n - mountPath: /host/proc/sys/net\n name: host-proc-sys-net\n # Unprivileged containers need to mount /proc/sys/kernel from the host\n # to have write access\n - mountPath: /host/proc/sys/kernel\n name: host-proc-sys-kernel\n - name: bpf-maps\n mountPath: /sys/fs/bpf\n # Unprivileged containers can't set mount propagation to bidirectional\n # in this case we will mount the bpf fs from an init container that\n # is privileged and set the mount propagation from host to container\n # in Cilium.\n mountPropagation: HostToContainer\n # Check for duplicate mounts before mounting\n - name: cilium-cgroup\n mountPath: /sys/fs/cgroup\n - name: cilium-run\n mountPath: /var/run/cilium\n - name: cilium-netns\n mountPath: /var/run/cilium/netns\n mountPropagation: HostToContainer\n - name: etc-cni-netd\n mountPath: /host/etc/cni/net.d\n - name: clustermesh-secrets\n mountPath: /var/lib/cilium/clustermesh\n readOnly: true\n # Needed to be able to load kernel modules\n - name: lib-modules\n mountPath: /lib/modules\n readOnly: true\n - name: xtables-lock\n mountPath: /run/xtables.lock\n - name: hubble-tls\n mountPath: /var/lib/cilium/tls/hubble\n readOnly: true\n - name: tmp\n mountPath: /tmp\n initContainers:\n - name: config\n image: \"quay.io/cilium/cilium:v1.17.1@sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866\"\n imagePullPolicy: IfNotPresent\n command:\n - cilium-dbg\n - build-config\n env:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: CILIUM_K8S_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n - name: KUBERNETES_SERVICE_HOST\n value: \"localhost\"\n - name: KUBERNETES_SERVICE_PORT\n value: \"7445\"\n volumeMounts:\n - name: tmp\n mountPath: /tmp\n terminationMessagePolicy: FallbackToLogsOnError\n - name: apply-sysctl-overwrites\n image: \"quay.io/cilium/cilium:v1.17.1@sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866\"\n imagePullPolicy: IfNotPresent\n env:\n - name: BIN_PATH\n value: /opt/cni/bin\n command:\n - sh\n - -ec\n # The statically linked Go program binary is invoked to avoid any\n # dependency on utilities like sh that can be missing on certain\n # distros installed on the underlying host. Copy the binary to the\n # same directory where we install cilium cni plugin so that exec permissions\n # are available.\n - |\n cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;\n nsenter --mount=/hostproc/1/ns/mnt \"${BIN_PATH}/cilium-sysctlfix\";\n rm /hostbin/cilium-sysctlfix\n volumeMounts:\n - name: hostproc\n mountPath: /hostproc\n - name: cni-path\n mountPath: /hostbin\n terminationMessagePolicy: FallbackToLogsOnError\n securityContext:\n seLinuxOptions:\n level: s0\n type: spc_t\n capabilities:\n add:\n - SYS_ADMIN\n - SYS_CHROOT\n - SYS_PTRACE\n drop:\n - ALL\n # Mount the bpf fs if it is not mounted. We will perform this task\n # from a privileged container because the mount propagation bidirectional\n # only works from privileged containers.\n - name: mount-bpf-fs\n image: \"quay.io/cilium/cilium:v1.17.1@sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866\"\n imagePullPolicy: IfNotPresent\n args:\n - 'mount | grep \"/sys/fs/bpf type bpf\" || mount -t bpf bpf /sys/fs/bpf'\n command:\n - /bin/bash\n - -c\n - --\n terminationMessagePolicy: FallbackToLogsOnError\n securityContext:\n privileged: true\n volumeMounts:\n - name: bpf-maps\n mountPath: /sys/fs/bpf\n mountPropagation: Bidirectional\n - name: clean-cilium-state\n image: \"quay.io/cilium/cilium:v1.17.1@sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866\"\n imagePullPolicy: IfNotPresent\n command:\n - /init-container.sh\n env:\n - name: CILIUM_ALL_STATE\n valueFrom:\n configMapKeyRef:\n name: cilium-config\n key: clean-cilium-state\n optional: true\n - name: CILIUM_BPF_STATE\n valueFrom:\n configMapKeyRef:\n name: cilium-config\n key: clean-cilium-bpf-state\n optional: true\n - name: WRITE_CNI_CONF_WHEN_READY\n valueFrom:\n configMapKeyRef:\n name: cilium-config\n key: write-cni-conf-when-ready\n optional: true\n - name: KUBERNETES_SERVICE_HOST\n value: \"localhost\"\n - name: KUBERNETES_SERVICE_PORT\n value: \"7445\"\n terminationMessagePolicy: FallbackToLogsOnError\n securityContext:\n seLinuxOptions:\n level: s0\n type: spc_t\n capabilities:\n add:\n - NET_ADMIN\n - SYS_ADMIN\n - SYS_RESOURCE\n drop:\n - ALL\n volumeMounts:\n - name: bpf-maps\n mountPath: /sys/fs/bpf\n # Required to mount cgroup filesystem from the host to cilium agent pod\n - name: cilium-cgroup\n mountPath: /sys/fs/cgroup\n mountPropagation: HostToContainer\n - name: cilium-run\n mountPath: /var/run/cilium # wait-for-kube-proxy\n # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent\n - name: install-cni-binaries\n image: \"quay.io/cilium/cilium:v1.17.1@sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866\"\n imagePullPolicy: IfNotPresent\n command:\n - \"/install-plugin.sh\"\n resources:\n requests:\n cpu: 100m\n memory: 10Mi\n securityContext:\n seLinuxOptions:\n level: s0\n type: spc_t\n capabilities:\n drop:\n - ALL\n terminationMessagePolicy: FallbackToLogsOnError\n volumeMounts:\n - name: cni-path\n mountPath: /host/opt/cni/bin # .Values.cni.install\n restartPolicy: Always\n priorityClassName: system-node-critical\n serviceAccountName: \"cilium\"\n automountServiceAccountToken: true\n terminationGracePeriodSeconds: 1\n hostNetwork: true\n affinity:\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchLabels:\n k8s-app: cilium\n topologyKey: kubernetes.io/hostname\n nodeSelector:\n kubernetes.io/os: linux\n tolerations:\n - operator: Exists\n volumes:\n # For sharing configuration between the \"config\" initContainer and the agent\n - name: tmp\n emptyDir: {}\n # To keep state between restarts / upgrades\n - name: cilium-run\n hostPath:\n path: /var/run/cilium\n type: DirectoryOrCreate\n # To exec into pod network namespaces\n - name: cilium-netns\n hostPath:\n path: /var/run/netns\n type: DirectoryOrCreate\n # To keep state between restarts / upgrades for bpf maps\n - name: bpf-maps\n hostPath:\n path: /sys/fs/bpf\n type: DirectoryOrCreate\n # To mount cgroup2 filesystem on the host or apply sysctlfix\n - name: hostproc\n hostPath:\n path: /proc\n type: Directory\n # To keep state between restarts / upgrades for cgroup2 filesystem\n - name: cilium-cgroup\n hostPath:\n path: /sys/fs/cgroup\n type: DirectoryOrCreate\n # To install cilium cni plugin in the host\n - name: cni-path\n hostPath:\n path: /opt/cni/bin\n type: DirectoryOrCreate\n # To install cilium cni configuration in the host\n - name: etc-cni-netd\n hostPath:\n path: /etc/cni/net.d\n type: DirectoryOrCreate\n # To be able to load kernel modules\n - name: lib-modules\n hostPath:\n path: /lib/modules\n # To access iptables concurrently with other processes (e.g. kube-proxy)\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Sharing socket with Cilium Envoy on the same node by using a host path\n - name: envoy-sockets\n hostPath:\n path: \"/var/run/cilium/envoy/sockets\"\n type: DirectoryOrCreate\n # To read the clustermesh configuration\n - name: clustermesh-secrets\n projected:\n # note: the leading zero means this number is in octal representation: do not remove it\n defaultMode: 0400\n sources:\n - secret:\n name: cilium-clustermesh\n optional: true\n # note: items are not explicitly listed here, since the entries of this secret\n # depend on the peers configured, and that would cause a restart of all agents\n # at every addition/removal. Leaving the field empty makes each secret entry\n # to be automatically projected into the volume as a file whose name is the key.\n - secret:\n name: clustermesh-apiserver-remote-cert\n optional: true\n items:\n - key: tls.key\n path: common-etcd-client.key\n - key: tls.crt\n path: common-etcd-client.crt\n - key: ca.crt\n path: common-etcd-client-ca.crt\n # note: we configure the volume for the kvstoremesh-specific certificate\n # regardless of whether KVStoreMesh is enabled or not, so that it can be\n # automatically mounted in case KVStoreMesh gets subsequently enabled,\n # without requiring an agent restart.\n - secret:\n name: clustermesh-apiserver-local-cert\n optional: true\n items:\n - key: tls.key\n path: local-etcd-client.key\n - key: tls.crt\n path: local-etcd-client.crt\n - key: ca.crt\n path: local-etcd-client-ca.crt\n - name: host-proc-sys-net\n hostPath:\n path: /proc/sys/net\n type: Directory\n - name: host-proc-sys-kernel\n hostPath:\n path: /proc/sys/kernel\n type: Directory\n - name: hubble-tls\n projected:\n # note: the leading zero means this number is in octal representation: do not remove it\n defaultMode: 0400\n sources:\n - secret:\n name: hubble-server-certs\n optional: true\n items:\n - key: tls.crt\n path: server.crt\n - key: tls.key\n path: server.key\n - key: ca.crt\n path: client-ca.crt\n---\n# Source: cilium/charts/cilium/templates/cilium-envoy/daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: cilium-envoy\n namespace: kube-system\n labels:\n k8s-app: cilium-envoy\n app.kubernetes.io/part-of: cilium\n app.kubernetes.io/name: cilium-envoy\n name: cilium-envoy\nspec:\n selector:\n matchLabels:\n k8s-app: cilium-envoy\n updateStrategy:\n rollingUpdate:\n maxUnavailable: 2\n type: RollingUpdate\n template:\n metadata:\n annotations:\n labels:\n k8s-app: cilium-envoy\n name: cilium-envoy\n app.kubernetes.io/name: cilium-envoy\n app.kubernetes.io/part-of: cilium\n spec:\n securityContext:\n appArmorProfile:\n type: Unconfined\n containers:\n - name: cilium-envoy\n image: \"quay.io/cilium/cilium-envoy:v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae@sha256:fc708bd36973d306412b2e50c924cd8333de67e0167802c9b48506f9d772f521\"\n imagePullPolicy: IfNotPresent\n command:\n - /usr/bin/cilium-envoy-starter\n args:\n - '--'\n - '-c /var/run/cilium/envoy/bootstrap-config.json'\n - '--base-id 0'\n - '--log-level info'\n startupProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9878\n scheme: HTTP\n failureThreshold: 105\n periodSeconds: 2\n successThreshold: 1\n initialDelaySeconds: 5\n livenessProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9878\n scheme: HTTP\n periodSeconds: 30\n successThreshold: 1\n failureThreshold: 10\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9878\n scheme: HTTP\n periodSeconds: 30\n successThreshold: 1\n failureThreshold: 3\n timeoutSeconds: 5\n env:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: CILIUM_K8S_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n - name: KUBERNETES_SERVICE_HOST\n value: \"localhost\"\n - name: KUBERNETES_SERVICE_PORT\n value: \"7445\"\n ports:\n - name: envoy-metrics\n containerPort: 9964\n hostPort: 9964\n protocol: TCP\n securityContext:\n seLinuxOptions:\n level: s0\n type: spc_t\n capabilities:\n add:\n - NET_ADMIN\n - SYS_ADMIN\n drop:\n - ALL\n terminationMessagePolicy: FallbackToLogsOnError\n volumeMounts:\n - name: envoy-sockets\n mountPath: /var/run/cilium/envoy/sockets\n readOnly: false\n - name: envoy-artifacts\n mountPath: /var/run/cilium/envoy/artifacts\n readOnly: true\n - name: envoy-config\n mountPath: /var/run/cilium/envoy/\n readOnly: true\n - name: bpf-maps\n mountPath: /sys/fs/bpf\n mountPropagation: HostToContainer\n restartPolicy: Always\n priorityClassName: system-node-critical\n serviceAccountName: \"cilium-envoy\"\n automountServiceAccountToken: true\n terminationGracePeriodSeconds: 1\n hostNetwork: true\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: cilium.io/no-schedule\n operator: NotIn\n values:\n - \"true\"\n podAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchLabels:\n k8s-app: cilium\n topologyKey: kubernetes.io/hostname\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchLabels:\n k8s-app: cilium-envoy\n topologyKey: kubernetes.io/hostname\n nodeSelector:\n kubernetes.io/os: linux\n tolerations:\n - operator: Exists\n volumes:\n - name: envoy-sockets\n hostPath:\n path: \"/var/run/cilium/envoy/sockets\"\n type: DirectoryOrCreate\n - name: envoy-artifacts\n hostPath:\n path: \"/var/run/cilium/envoy/artifacts\"\n type: DirectoryOrCreate\n - name: envoy-config\n configMap:\n name: \"cilium-envoy-config\"\n # note: the leading zero means this number is in octal representation: do not remove it\n defaultMode: 0400\n items:\n - key: bootstrap-config.json\n path: bootstrap-config.json\n # To keep state between restarts / upgrades\n # To keep state between restarts / upgrades for bpf maps\n - name: bpf-maps\n hostPath:\n path: /sys/fs/bpf\n type: DirectoryOrCreate\n---\n# Source: cilium/charts/cilium/templates/cilium-operator/deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: cilium-operator\n namespace: kube-system\n labels:\n io.cilium/app: operator\n name: cilium-operator\n app.kubernetes.io/part-of: cilium\n app.kubernetes.io/name: cilium-operator\nspec:\n # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go\n # for more details.\n replicas: 2\n selector:\n matchLabels:\n io.cilium/app: operator\n name: cilium-operator\n # ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case\n # of one replica and no user configured Recreate strategy.\n # otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the\n # podAntiAffinity which prevents deployments of multiple operator replicas on the same node.\n strategy:\n rollingUpdate:\n maxSurge: 25%\n maxUnavailable: 50%\n type: RollingUpdate\n template:\n metadata:\n annotations:\n prometheus.io/port: \"9963\"\n prometheus.io/scrape: \"true\"\n labels:\n io.cilium/app: operator\n name: cilium-operator\n app.kubernetes.io/part-of: cilium\n app.kubernetes.io/name: cilium-operator\n spec:\n containers:\n - name: cilium-operator\n image: \"quay.io/cilium/operator-generic:v1.17.1@sha256:628becaeb3e4742a1c36c4897721092375891b58bae2bfcae48bbf4420aaee97\"\n imagePullPolicy: IfNotPresent\n command:\n - cilium-operator-generic\n args:\n - --config-dir=/tmp/cilium/config-map\n - --debug=$(CILIUM_DEBUG)\n env:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: CILIUM_K8S_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n - name: CILIUM_DEBUG\n valueFrom:\n configMapKeyRef:\n key: debug\n name: cilium-config\n optional: true\n - name: KUBERNETES_SERVICE_HOST\n value: \"localhost\"\n - name: KUBERNETES_SERVICE_PORT\n value: \"7445\"\n ports:\n - name: prometheus\n containerPort: 9963\n hostPort: 9963\n protocol: TCP\n livenessProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9234\n scheme: HTTP\n initialDelaySeconds: 60\n periodSeconds: 10\n timeoutSeconds: 3\n readinessProbe:\n httpGet:\n host: \"127.0.0.1\"\n path: /healthz\n port: 9234\n scheme: HTTP\n initialDelaySeconds: 0\n periodSeconds: 5\n timeoutSeconds: 3\n failureThreshold: 5\n volumeMounts:\n - name: cilium-config-path\n mountPath: /tmp/cilium/config-map\n readOnly: true\n terminationMessagePolicy: FallbackToLogsOnError\n hostNetwork: true\n restartPolicy: Always\n priorityClassName: system-cluster-critical\n serviceAccountName: \"cilium-operator\"\n automountServiceAccountToken: true\n # In HA mode, cilium-operator pods must not be scheduled on the same\n # node as they will clash with each other.\n affinity:\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchLabels:\n io.cilium/app: operator\n topologyKey: kubernetes.io/hostname\n nodeSelector:\n kubernetes.io/os: linux\n tolerations:\n - operator: Exists\n volumes:\n # To read the configuration from the config map\n - name: cilium-config-path\n configMap:\n name: cilium-config\n---\n# Source: cilium/charts/cilium/templates/hubble-relay/deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: hubble-relay\n namespace: kube-system\n labels:\n k8s-app: hubble-relay\n app.kubernetes.io/name: hubble-relay\n app.kubernetes.io/part-of: cilium\n\nspec:\n replicas: 1\n selector:\n matchLabels:\n k8s-app: hubble-relay\n strategy:\n rollingUpdate:\n maxUnavailable: 1\n type: RollingUpdate\n template:\n metadata:\n annotations:\n labels:\n k8s-app: hubble-relay\n app.kubernetes.io/name: hubble-relay\n app.kubernetes.io/part-of: cilium\n spec:\n securityContext:\n fsGroup: 65532\n containers:\n - name: hubble-relay\n securityContext:\n capabilities:\n drop:\n - ALL\n runAsGroup: 65532\n runAsNonRoot: true\n runAsUser: 65532\n image: \"quay.io/cilium/hubble-relay:v1.17.1@sha256:397e8fbb188157f744390a7b272a1dec31234e605bcbe22d8919a166d202a3dc\"\n imagePullPolicy: IfNotPresent\n command:\n - hubble-relay\n args:\n - serve\n ports:\n - name: grpc\n containerPort: 4245\n readinessProbe:\n grpc:\n port: 4222\n timeoutSeconds: 3\n # livenessProbe will kill the pod, we should be very conservative\n # here on failures since killing the pod should be a last resort, and\n # we should provide enough time for relay to retry before killing it.\n livenessProbe:\n grpc:\n port: 4222\n timeoutSeconds: 10\n # Give relay time to establish connections and make a few retries\n # before starting livenessProbes.\n initialDelaySeconds: 10\n # 10 second * 12 failures = 2 minutes of failure.\n # If relay cannot become healthy after 2 minutes, then killing it\n # might resolve whatever issue is occurring.\n #\n # 10 seconds is a reasonable retry period so we can see if it's\n # failing regularly or only sporadically.\n periodSeconds: 10\n failureThreshold: 12\n startupProbe:\n grpc:\n port: 4222\n # Give relay time to get it's certs and establish connections and\n # make a few retries before starting startupProbes.\n initialDelaySeconds: 10\n # 20 * 3 seconds = 1 minute of failure before we consider startup as failed.\n failureThreshold: 20\n # Retry more frequently at startup so that it can be considered started more quickly.\n periodSeconds: 3\n volumeMounts:\n - name: config\n mountPath: /etc/hubble-relay\n readOnly: true\n - name: tls\n mountPath: /var/lib/hubble-relay/tls\n readOnly: true\n terminationMessagePolicy: FallbackToLogsOnError\n \n restartPolicy: Always\n priorityClassName: \n serviceAccountName: \"hubble-relay\"\n automountServiceAccountToken: false\n terminationGracePeriodSeconds: 1\n affinity:\n podAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchLabels:\n k8s-app: cilium\n topologyKey: kubernetes.io/hostname\n nodeSelector:\n kubernetes.io/os: linux\n volumes:\n - name: config\n configMap:\n name: hubble-relay-config\n items:\n - key: config.yaml\n path: config.yaml\n - name: tls\n projected:\n # note: the leading zero means this number is in octal representation: do not remove it\n defaultMode: 0400\n sources:\n - secret:\n name: hubble-relay-client-certs\n items:\n - key: tls.crt\n path: client.crt\n - key: tls.key\n path: client.key\n - key: ca.crt\n path: hubble-server-ca.crt\n---\n# Source: cilium/charts/cilium/templates/hubble-ui/deployment.yaml\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n name: hubble-ui\n namespace: kube-system\n labels:\n k8s-app: hubble-ui\n app.kubernetes.io/name: hubble-ui\n app.kubernetes.io/part-of: cilium\nspec:\n replicas: 1\n selector:\n matchLabels:\n k8s-app: hubble-ui\n strategy:\n rollingUpdate:\n maxUnavailable: 1\n type: RollingUpdate\n template:\n metadata:\n annotations:\n labels:\n k8s-app: hubble-ui\n app.kubernetes.io/name: hubble-ui\n app.kubernetes.io/part-of: cilium\n spec:\n securityContext:\n fsGroup: 1001\n runAsGroup: 1001\n runAsUser: 1001\n priorityClassName: \n serviceAccountName: \"hubble-ui\"\n automountServiceAccountToken: true\n containers:\n - name: frontend\n image: \"quay.io/cilium/hubble-ui:v0.13.1@sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6\"\n imagePullPolicy: IfNotPresent\n ports:\n - name: http\n containerPort: 8081\n livenessProbe:\n httpGet:\n path: /healthz\n port: 8081\n readinessProbe:\n httpGet:\n path: /\n port: 8081\n volumeMounts:\n - name: hubble-ui-nginx-conf\n mountPath: /etc/nginx/conf.d/default.conf\n subPath: nginx.conf\n - name: tmp-dir\n mountPath: /tmp\n terminationMessagePolicy: FallbackToLogsOnError\n - name: backend\n image: \"quay.io/cilium/hubble-ui-backend:v0.13.1@sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b\"\n imagePullPolicy: IfNotPresent\n env:\n - name: EVENTS_SERVER_PORT\n value: \"8090\"\n - name: FLOWS_API_ADDR\n value: \"hubble-relay:80\"\n ports:\n - name: grpc\n containerPort: 8090\n volumeMounts:\n terminationMessagePolicy: FallbackToLogsOnError\n nodeSelector:\n kubernetes.io/os: linux\n volumes:\n - configMap:\n defaultMode: 420\n name: hubble-ui-nginx\n name: hubble-ui-nginx-conf\n - emptyDir: {}\n name: tmp-dir\n"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment