Skip to content

chore: updating cilium nightly charts #3676

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pipelines/cni/cilium/nightly-release-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ stages:
scriptType: "bash"
addSpnToEnvironment: true
inlineScript: |
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=$(clusterName)-$(commitID)
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=ciliumnightly-$(commitID)

set -e
echo "Run Cilium Connectivity Tests"
Expand All @@ -171,7 +171,7 @@ stages:

- template: ../../templates/log-check-template.yaml # Operator Check
parameters:
clusterName: $(clusterName)-$(commitID)
clusterName: ciliumnightly-$(commitID)
podLabel: "name=cilium-operator"
logGrep: "level=error"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,10 @@ steps:

- ${{ if eq( parameters['testHubble'], true) }}:
- script: |
echo "enable Hubble metrics server"
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
kubectl apply -f test/integration/manifests/cilium/v1.14.4/cilium-config/cilium-config-hubble.yaml
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
kubectl rollout restart ds cilium -n kube-system
echo "wait <3 minutes for pods to be ready after restart"
kubectl rollout status ds cilium -n kube-system --timeout=3m
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,10 @@ steps:
- ${{ if eq( parameters['testHubble'], true) }}:
- script: |
echo "enable Hubble metrics server"
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
kubectl apply -f test/integration/manifests/cilium/v1.14.4/cilium-config/cilium-config-hubble.yaml
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
kubectl rollout restart ds cilium -n kube-system
echo "wait <3 minutes for pods to be ready after restart"
kubectl rollout status ds cilium -n kube-system --timeout=3m
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,11 @@ steps:
- ${{ if eq( parameters['testHubble'], true) }}:
- script: |
echo "enable Hubble metrics server"
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
echo "installing files from ${DIR}"
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
kubectl apply -f test/integration/manifests/cilium/cilium-config-hubble.yaml
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
kubectl rollout restart ds cilium -n kube-system
echo "wait <3 minutes for pods to be ready after restart"
kubectl rollout status ds cilium -n kube-system --timeout=3m
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,10 @@ steps:
- ${{ if eq( parameters['testHubble'], true) }}:
- script: |
echo "enable Hubble metrics server"
export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG}
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml
kubectl apply -f test/integration/manifests/cilium/cilium-config-hubble.yaml
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml
kubectl rollout restart ds cilium -n kube-system
echo "wait <3 minutes for pods to be ready after restart"
kubectl rollout status ds cilium -n kube-system --timeout=3m
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- networking.k8s.io
Expand Down Expand Up @@ -45,8 +47,6 @@ rules:
- apiGroups:
- cilium.io
resources:
#Naming changed from ciliumbgploadbalancerippools
- ciliumloadbalancerippools
- ciliumbgppeeringpolicies
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
Expand All @@ -59,8 +59,13 @@ rules:
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
#Added in 1.14.0 snapshot 2
- ciliumloadbalancerippools
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
- ciliumbgpnodeconfigs
- ciliumbgpadvertisements
- ciliumbgppeerconfigs
verbs:
- list
- watch
Expand All @@ -74,6 +79,7 @@ rules:
- create
- apiGroups:
- cilium.io
# To synchronize garbage collection of such resources
resources:
- ciliumidentities
verbs:
Expand All @@ -100,5 +106,16 @@ rules:
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
- ciliumbgpnodeconfigs/status
verbs:
- patch
- apiGroups:
- ""
resourceNames:
- cilium-config
resources:
- configmaps
verbs:
- list
- watch
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
Expand Down
67 changes: 58 additions & 9 deletions test/integration/manifests/cilium/cilium-nightly-config.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
apiVersion: v1
apiVersion: v1 #Not verified, placeholder
data:
agent-not-ready-taint-key: node.cilium.io/agent-not-ready
arping-refresh-period: 30s
Expand All @@ -9,7 +9,6 @@ data:
bpf-map-dynamic-size-ratio: "0.0025"
bpf-policy-map-max: "16384"
bpf-root: /sys/fs/bpf
ces-slice-mode: fcfs
cgroup-root: /run/cilium/cgroupv2
cilium-endpoint-gc-interval: 5m0s
cluster-id: "0"
Expand All @@ -20,7 +19,6 @@ data:
enable-auto-protect-node-port-range: "true"
enable-bgp-control-plane: "false"
enable-bpf-clock-probe: "true"
enable-cilium-endpoint-slice: "true"
enable-endpoint-health-checking: "false"
enable-endpoint-routes: "true"
enable-health-check-nodeport: "true"
Expand All @@ -35,7 +33,7 @@ data:
enable-l2-neigh-discovery: "true"
enable-l7-proxy: "false"
enable-local-node-route: "false"
enable-local-redirect-policy: "true"
enable-local-redirect-policy: "true" # set to true for lrp test
enable-metrics: "true"
enable-policy: default
enable-session-affinity: "true"
Expand All @@ -48,7 +46,7 @@ data:
install-no-conntrack-iptables-rules: "false"
ipam: delegated-plugin
kube-proxy-replacement: "true"
kube-proxy-replacement-healthz-bind-address: ""
kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256"
local-router-ipv4: 169.254.23.0
metrics: +cilium_bpf_map_pressure
monitor-aggregation: medium
Expand All @@ -63,21 +61,72 @@ data:
prometheus-serve-addr: :9962
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
synchronize-k8s-nodes: "true"
tofqdns-dns-reject-response-code: refused
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50"
tofqdns-endpoint-max-ip-per-hostname: "1000"
tofqdns-idle-connection-grace-period: 0s
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-min-ttl: "3600"
tofqdns-min-ttl: "0"
tofqdns-proxy-response-max-delay: 100ms
#Replaces tunnel: disabled in v1.15
routing-mode: "native"
routing-mode: native
unmanaged-pod-watcher-interval: "15"
vtep-cidr: ""
vtep-endpoint: ""
vtep-mac: ""
vtep-mask: ""
enable-sctp: "false"
external-envoy-proxy: "false"
k8s-client-qps: "10"
k8s-client-burst: "20"
mesh-auth-enabled: "true"
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
mesh-auth-gc-interval: "5m0s"
proxy-connect-timeout: "2"
proxy-max-requests-per-connection: "0"
proxy-max-connection-duration-seconds: "0"
set-cilium-node-taints: "true"
## new values added for 1.16 below
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false"
enable-masquerade-to-route-source: "false"
enable-health-check-loadbalancer-ip: "false"
bpf-lb-acceleration: "disabled"
enable-k8s-networkpolicy: "true"
cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down
cni-log-file: "/var/run/cilium/cilium-cni.log"
ipam-cilium-node-update-rate: "15s"
egress-gateway-reconciliation-trigger-interval: "1s"
nat-map-stats-entries: "32"
nat-map-stats-interval: "30s"
bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble
bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble
bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble
enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel
datapath-mode: "veth"
direct-routing-skip-unreachable: "false"
enable-runtime-device-detection: "false"
bpf-lb-sock: "false"
bpf-lb-sock-terminate-pod-connections: "false"
nodeport-addresses: ""
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
enable-node-selector-labels: "false"
## new values for 1.17
ces-slice-mode: "fcfs"
enable-cilium-endpoint-slice: "true"
bpf-lb-source-range-all-types: "false"
bpf-algorithm-annotation: "false"
bpf-lb-mode-annotation: "false"
enable-experimental-lb: "false"
enable-endpoint-lockdown-on-policy-overflow: "false"
health-check-icmp-failure-threshold: "3"
enable-internal-traffic-policy: "true"
enable-lb-ipam: "true"
enable-non-default-deny-policies: "true"
enable-source-ip-verification: "true"
kind: ConfigMap
metadata:
annotations:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
Expand All @@ -14,6 +16,15 @@ rules:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- delete
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- cilium-config
verbs:
# allow patching of the configmap to set annotations
- patch
- apiGroups:
- ""
resources:
Expand Down Expand Up @@ -51,6 +62,7 @@ rules:
resources:
# to check apiserver connectivity
- namespaces
- secrets
verbs:
- get
- list
Expand Down Expand Up @@ -87,6 +99,7 @@ rules:
- ciliumclusterwidenetworkpolicies/status
verbs:
# Update the auto-generated CNPs and CCNPs status.
- patch
- update
- apiGroups:
- cilium.io
Expand All @@ -103,6 +116,7 @@ rules:
resources:
- ciliumidentities
verbs:
# To synchronize garbage collection of such resources
- update
- apiGroups:
- cilium.io
Expand All @@ -127,6 +141,9 @@ rules:
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumbgppeerconfigs
- ciliumbgpadvertisements
- ciliumbgpnodeconfigs
verbs:
- create
- update
Expand All @@ -135,6 +152,13 @@ rules:
- watch
- delete
- patch
- apiGroups:
- cilium.io
resources:
- ciliumbgpclusterconfigs/status
- ciliumbgppeerconfigs/status
verbs:
- update
- apiGroups:
- apiextensions.k8s.io
resources:
Expand All @@ -153,10 +177,14 @@ rules:
resourceNames:
- ciliumloadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumbgpclusterconfigs.cilium.io
- ciliumbgppeerconfigs.cilium.io
- ciliumbgpadvertisements.cilium.io
- ciliumbgpnodeconfigs.cilium.io
- ciliumbgpnodeconfigoverrides.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
Expand All @@ -166,8 +194,34 @@ rules:
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
#Added in 1.14.0 snapshot 2
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
- ciliumbgppeeringpolicies
- ciliumbgpclusterconfigs
- ciliumbgpnodeconfigoverrides
- ciliumbgppeerconfigs
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools/status
verbs:
- patch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
Expand All @@ -181,4 +235,4 @@ rules:
verbs:
- create
- get
- update
- update
Loading
Loading