Skip to content
This repository has been archived by the owner on Jul 5, 2023. It is now read-only.

Commit

Permalink
Add Typha support for Canal Networking
Browse files Browse the repository at this point in the history
  • Loading branch information
KashifSaadat committed Dec 6, 2019
1 parent ebbebc5 commit 9b7a798
Showing 1 changed file with 153 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# calico/cni:v3.10.1
# calico/node:v3.10.1
# calico/pod2daemon-flexvol:v3.10.1
# calico/typha:v3.10.1
# quay.io/coreos/flannel:v0.11.0

# Source: calico/templates/calico-config.yaml
Expand All @@ -14,8 +15,7 @@ metadata:
name: canal-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
Expand Down Expand Up @@ -438,6 +438,149 @@ subjects:

---

apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system

---
{{- if .Networking.Canal.TyphaReplicas }}

# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.

apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha

---

# This manifest creates a Deployment of Typha to back the above service.

apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the canal-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ or .Networking.Canal.TyphaReplicas 0 }}
revisionHistoryLimit: 2
template:
metadata:
labels:
k8s-app: calico-typha
annotations:
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
# add-on, ensuring it gets priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
hostNetwork: true
nodeSelector:
kubernetes.io/role: master
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
serviceAccountName: canal
priorityClassName: system-cluster-critical
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: calico/typha:v3.10.1
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10

---

# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict

apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha

{{- end }}
---

# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
Expand Down Expand Up @@ -539,6 +682,14 @@ spec:
# Configure route aggregation based on pod CIDR.
- name: USE_POD_CIDR
value: "true"
{{- if .Networking.Canal.TyphaReplicas }}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: canal-config
key: typha_service_name
{{- end }}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
Expand Down Expand Up @@ -699,10 +850,3 @@ spec:
hostPath:
type: DirectoryOrCreate
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
---

apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system

0 comments on commit 9b7a798

Please sign in to comment.