diff --git a/_includes/master/manifests/calico-config.yaml b/_includes/master/manifests/calico-config.yaml new file mode 100644 index 00000000000..337676aa91c --- /dev/null +++ b/_includes/master/manifests/calico-config.yaml @@ -0,0 +1,142 @@ +{% comment %} +calico-config.yaml acccepts the following include flags: + +| Name | Accepted Values | +|--------------|--------------------------| +| datastore | kdd, etcd | +| typha | true, false | +| network | calico, flannel, | +| calico_ipam | true, false | +| variant_name | Calico, Canal | + +{% endcomment -%} +# This ConfigMap is used to configure a self-hosted {{include.variant_name}} installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{include.variant_name | downcase}}-config + namespace: kube-system +data: +{%- if include.datastore == "etcd" %} + # Configure this with the location of your etcd cluster. + etcd_endpoints: "http://127.0.0.1:2379" + + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "" # "/calico-secrets/etcd-ca" + etcd_cert: "" # "/calico-secrets/etcd-cert" + etcd_key: "" # "/calico-secrets/etcd-key" +{%- elsif include.datastore == "kdd" and include.typha == "true" %} + # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas + # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is + # essential. + typha_service_name: "none" +{%- endif %} +{%- if include.network == "calico" %} + # Configure the {{site.prodname}} backend to use. + calico_backend: "bird" + + # Configure the MTU to use + veth_mtu: "1440" +{%- elsif include.network == "flannel" %} + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: "" + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: "true" +{%- endif %} + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. +{%- if include.datastore == "etcd" and include.network == "flannel" %} + cni_network_config: |- + { + "name": "canal", + "cniVersion": "0.3.0", + "plugins": [ + { + "type": "flannel", + "delegate": { + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" + } + } + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] + } +{%- else %} + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.0", + "plugins": [ + { + "type": "calico", + "log_level": "info", + {%- if include.datastore == "etcd" %} + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + {%- elsif include.datastore == "kdd" %} + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + {%- endif %} + {%- if include.network == "calico" %} + "mtu": __CNI_MTU__, + {%- elsif include.network == null %} + "mtu": 1500, + {%- endif %} + {%- if include.calico_ipam == "true" %} + "ipam": { + "type": "calico-ipam" + }, + {%- else %} + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + {%- endif %} + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + } +{%- endif %} +{%- if include.network == "flannel" and include.datastore == "kdd" %} + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan" + } + } +{%- endif %} \ No newline at end of file diff --git a/_includes/master/manifests/calico-etcd-secrets.yaml b/_includes/master/manifests/calico-etcd-secrets.yaml new file mode 100644 index 00000000000..61597b9d5bb --- /dev/null +++ b/_includes/master/manifests/calico-etcd-secrets.yaml @@ -0,0 +1,16 @@ +# The following contains k8s Secrets for use with a TLS enabled etcd cluster. +# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: calico-etcd-secrets + namespace: kube-system +data: + # Populate the following files with etcd TLS configuration if desired, but leave blank if + # not using TLS for etcd. + # This self-hosted install expects three files with the following names. The values + # should be base64 encoded strings of the entire contents of each file. + # etcd-key: null + # etcd-cert: null + # etcd-ca: null diff --git a/_includes/master/manifests/calico-kube-controllers.yaml b/_includes/master/manifests/calico-kube-controllers.yaml new file mode 100644 index 00000000000..8f376cc7f22 --- /dev/null +++ b/_includes/master/manifests/calico-kube-controllers.yaml @@ -0,0 +1,91 @@ +{% comment %} +calico-kube-controllers.yaml acccepts the following include flags: + +| Name | Accepted Values | +|------------------|-----------------| +| variant_name | Calico, Canal | + +{% endcomment -%} +# This manifest deploys the {{site.prodname}} Kubernetes controllers. +# See https://github.com/projectcalico/kube-controllers +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' +spec: + # The controllers can only have a single active instance. + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + containers: + - name: calico-kube-controllers + image: {{site.imageNames["kubeControllers"]}}:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} + env: + # The location of the {{site.prodname}} etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: {{include.variant_name | downcase}}-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: {{include.variant_name | downcase}}-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: {{include.variant_name | downcase}}-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: {{include.variant_name | downcase}}-config + key: etcd_cert + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: policy,profile,workloadendpoint,node + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0400 + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system \ No newline at end of file diff --git a/master/getting-started/kubernetes/installation/hosted/canal/canal-etcd-app-layer-policy.yaml b/_includes/master/manifests/calico-node.yaml similarity index 55% rename from master/getting-started/kubernetes/installation/hosted/canal/canal-etcd-app-layer-policy.yaml rename to _includes/master/manifests/calico-node.yaml index 1623fd88c3a..7ff851de372 100644 --- a/master/getting-started/kubernetes/installation/hosted/canal/canal-etcd-app-layer-policy.yaml +++ b/_includes/master/manifests/calico-node.yaml @@ -1,120 +1,49 @@ ---- -layout: null ---- -# Canal Version {{site.data.versions[page.version].first.title}} -# {{site.url}}/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} -# This manifest includes the following component versions: -# calico/node:{{site.data.versions[page.version].first.title}} -# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} -# calico/kube-controllers:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} -# coreos/flannel:v0.9.1 - -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # Configure this with the location of your etcd cluster. - etcd_endpoints: "https://127.0.0.1:2379" - - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosing using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "canal", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "etcd_key_file": "__ETCD_KEY_FILE__", - "etcd_cert_file": "__ETCD_CERT_FILE__", - "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", - "log_level": "info", - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - } - }, - { - "type": "portmap", - "capabilities": {"portMappings": true}, - "snat": true - } - ] - } - - # If you're using TLS enabled etcd uncomment the following. - # You must also populate the Secret below with these files. - etcd_ca: "" # "/calico-secrets/etcd-ca" - etcd_cert: "" # "/calico-secrets/etcd-cert" - etcd_key: "" # "/calico-secrets/etcd-key" +{% comment %} +calico-node.yaml acccepts the following include flags: ---- -# The following contains k8s Secrets for use with a TLS enabled etcd cluster. -# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: calico-etcd-secrets - namespace: kube-system -data: - # Populate the following files with etcd TLS configuration if desired, but leave blank if - # not using TLS for etcd. - # This self-hosted install expects three files with the following names. The values - # should be base64 encoded strings of the entire contents of each file. - # etcd-key: "" - # etcd-cert: "" - # etcd-ca: "" - ---- +| Name | Accepted Values | +|------------------|--------------------------| +| datastore | kdd, etcd | +| typha | true, false | +| network | calico, flannel, | +| ipip | true, false | +| variant_name | Calico, Canal | +| app_layer_policy | true, false | -# This manifest installs the per-node agents, as well -# as the CNI plugins and network config on +{% endcomment -%} +{% capture objname %}{% if include.network == "flannel" and include.datastore == "etcd" %}canal-node{% elsif include.network == "flannel" %}canal{% else %}calico-node{% endif %}{% endcapture -%} +# This manifest installs the calico/node container, as well +# as the {{site.prodname}} CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: extensions/v1beta1 metadata: - name: canal-node + name: {{objname}} namespace: kube-system labels: - k8s-app: canal-node + k8s-app: {{objname}} spec: selector: matchLabels: - k8s-app: canal-node + k8s-app: {{objname}} updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: + labels: + k8s-app: {{objname}} annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' - labels: - k8s-app: canal-node spec: + hostNetwork: true tolerations: - # Make sure canal node can be scheduled on all nodes. + # Make sure {{objname}} gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. @@ -122,11 +51,11 @@ spec: operator: Exists - effect: NoExecute operator: Exists - hostNetwork: true - serviceAccountName: canal + serviceAccountName: {{objname}} # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 +{%- if include.app_layer_policy == "true" %} # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # to communicate with Felix over the Policy Sync API. initContainers: @@ -136,127 +65,137 @@ spec: volumeMounts: - name: flexvol-driver-host mountPath: /host/driver +{%- endif %} containers: - # Runs the flannel daemon to enable vxlan networking between - # container hosts. - - name: flannel - image: {{site.imageNames["flannel"]}}:{{site.data.versions[page.version].first.components["flannel"].version}} + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} env: - # The location of the etcd cluster. - - name: FLANNELD_ETCD_ENDPOINTS +{%- if include.datastore == "etcd" %} + # The location of the {{site.prodname}} etcd cluster. + - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: - name: canal-config + name: {{include.variant_name | downcase}}-config key: etcd_endpoints # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: - name: canal-config + name: {{include.variant_name | downcase}}-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: - name: canal-config + name: {{include.variant_name | downcase}}-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: - name: canal-config + name: {{include.variant_name | downcase}}-config key: etcd_cert - # Location of the CA certificate for etcd. - - name: FLANNELD_ETCD_CAFILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - # Location of the client key for etcd. - - name: FLANNELD_ETCD_KEYFILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_key - # Location of the client certificate for etcd. - - name: FLANNELD_ETCD_CERTFILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_cert - # The interface flannel should run on. - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - # Perform masquerade on traffic leaving the pod cidr. - - name: FLANNELD_IP_MASQ + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - # Write the subnet.env file to the mounted directory. - - name: FLANNELD_SUBNET_FILE - value: "/run/flannel/subnet.env" - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/resolv.conf - name: resolv - - mountPath: /run/flannel - name: run-flannel - - mountPath: /calico-secrets - name: etcd-certs - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and local routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # The location of the etcd cluster. - - name: ETCD_ENDPOINTS + fieldRef: + fieldPath: spec.nodeName +{%- elsif include.datastore == "kdd" %} + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + {%- if include.typha == "true" %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME valueFrom: configMapKeyRef: - name: canal-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE + name: {{include.variant_name | downcase}}-config + key: typha_service_name + {%- endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE + fieldRef: + fieldPath: spec.nodeName +{%- endif %} +{%- if include.network == "calico" %} + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: - name: canal-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE + name: {{include.variant_name | downcase}}-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + {%- if include.ipip == "true" %} + # Enable IP-in-IP within Felix. + - name: FELIX_IPINIPENABLED + value: "true" + {%- endif %} + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: - name: canal-config - key: etcd_cert - # Disable {{site.prodname}} BGP. {{site.prodname}} is simply enforcing policy. + name: {{include.variant_name | downcase}}-config + key: veth_mtu +{%- elsif include.network == "flannel" %} + # Don't enable BGP. - name: CALICO_NETWORKING_BACKEND value: "none" # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,canal" + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: "60" + # No IP address needed. + - name: IP + value: "" +{%- else %} + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s" +{%- endif %} + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" - name: FELIX_HEALTHENABLED value: "true" +{%- if include.app_layer_policy == "true" %} # Enable the Policy Sync API between Felix and Dikastes - name: FELIX_POLICYSYNCPATHPREFIX value: "/var/run/nodeagent" +{%- endif %} securityContext: privileged: true resources: @@ -266,13 +205,27 @@ spec: httpGet: path: /liveness port: 9099 + host: localhost periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: +{%- if include.network == "calico" %} + exec: + command: + - /bin/readiness + - -bird + - -felix +{%- elsif include.network == "flannel" %} httpGet: path: /readiness port: 9099 +{%- else %} + exec: + command: + - /bin/readiness + - -felix +{%- endif %} periodSeconds: 10 volumeMounts: - mountPath: /lib/modules @@ -281,26 +234,36 @@ spec: - mountPath: /var/run/calico name: var-run-calico readOnly: false +{%- if include.network != "flannel" %} + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false +{%- endif %} +{%- if include.datastore == "etcd" %} - mountPath: /calico-secrets name: etcd-certs +{%- endif %} +{%- if include.app_layer_policy == "true" %} - name: policysync mountPath: /var/run/nodeagent +{%- endif %} # This container installs the {{site.prodname}} CNI binaries # and CNI network config file on each node. - - name: install-calico-cni + - name: install-cni image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - imagePullPolicy: Always command: ["/install-cni.sh"] env: - # The name of the CNI network config file to install. + # Name of the CNI config file to create. - name: CNI_CONF_NAME - value: "10-canal.conflist" - # The location of the etcd cluster. + value: "10-{{include.variant_name | downcase}}.conflist" +{%- if include.datastore == "etcd" %} + # The location of the {{site.prodname}} etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: - name: canal-config + name: {{include.variant_name | downcase}}-config key: etcd_endpoints + {%- if include.network == "flannel" %} # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: @@ -319,153 +282,78 @@ spec: configMapKeyRef: name: canal-config key: etcd_cert + {%- endif %} +{%- elsif include.datastore == "kdd" %} + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{%- endif %} # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: - name: canal-config + name: {{include.variant_name | downcase}}-config key: cni_network_config +{%- if include.network == "calico" %} + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: {{include.variant_name | downcase}}-config + key: veth_mtu +{%- endif %} volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir +{%- if include.datastore == "etcd" %} - mountPath: /calico-secrets name: etcd-certs - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel daemon. - - name: run-flannel - hostPath: - path: /run/flannel - - name: resolv - hostPath: - path: /etc/resolv.conf - # Mount in the etcd TLS secrets. - - name: etcd-certs - secret: - secretName: calico-etcd-secrets - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /opt/bin/volume-plugins/nodeagent~uds - ---- - -# This manifest deploys a Job which performs one time -# configuration of Canal. -apiVersion: batch/v1 -kind: Job -metadata: - name: configure-canal - namespace: kube-system - labels: - k8s-app: canal -spec: - template: - metadata: - name: configure-canal - spec: - hostNetwork: true - restartPolicy: OnFailure - containers: - # Writes basic flannel configuration to etcd. - - name: configure-flannel - image: quay.io/coreos/etcd:v3.1.5 - command: - - "etcdctl" - - "--cert-file=/calico-secrets/etcd-cert" - - "--key-file=/calico-secrets/etcd-key" - - "--ca-file=/calico-secrets/etcd-ca" - - "--no-sync" - - "set" - - "/coreos.com/network/config" - - '{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"} }' +{%- endif %} +{%- if include.network == "flannel" %} + {%- if include.datastore == "kdd" %} + # This container runs flannel using the kube-subnet-mgr backend + # for allocating subnets. + - name: kube-flannel + image: quay.io/coreos/flannel:v0.9.1 + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] + securityContext: + privileged: true env: - # The location of the etcd cluster. - - name: ETCDCTL_PEERS + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: FLANNELD_IFACE valueFrom: configMapKeyRef: name: canal-config - key: etcd_endpoints - # The location of the {{site.prodname}} etcd cluster. - - name: ETCDCTL_CACERT + key: canal_iface + - name: FLANNELD_IP_MASQ valueFrom: configMapKeyRef: name: canal-config - key: etcd_ca + key: masquerade volumeMounts: - # Mount in the etcd TLS secrets. - - mountPath: /calico-secrets - name: etcd-certs - volumes: - # Mount in the etcd TLS secrets. - - name: etcd-certs - secret: - secretName: calico-etcd-secrets - ---- - -# This manifest deploys the {{site.prodname}} policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers -spec: - # The policy controller can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - tolerations: - # Make sure canal node can be scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - containers: - - name: calico-kube-controllers - image: {{site.imageNames["kubeControllers"]}}:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} + - name: run + mountPath: /run + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + {%- elsif include.datastore == "etcd" %} + # Runs the flannel daemon to enable vxlan networking between + # container hosts. + - name: flannel + image: quay.io/coreos/flannel:v0.9.1 env: - # The location of the {{site.prodname}} etcd cluster. - - name: ETCD_ENDPOINTS + # The location of the etcd cluster. + - name: FLANNELD_ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: canal-config @@ -488,18 +376,115 @@ spec: configMapKeyRef: name: canal-config key: etcd_cert - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node + # Location of the CA certificate for etcd. + - name: FLANNELD_ETCD_CAFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + # Location of the client key for etcd. + - name: FLANNELD_ETCD_KEYFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_key + # Location of the client certificate for etcd. + - name: FLANNELD_ETCD_CERTFILE + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_cert + # The interface flannel should run on. + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: canal-config + key: canal_iface + # Perform masquerade on traffic leaving the pod cidr. + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: canal-config + key: masquerade + # Write the subnet.env file to the mounted directory. + - name: FLANNELD_SUBNET_FILE + value: "/run/flannel/subnet.env" + securityContext: + privileged: true volumeMounts: - # Mount in the etcd TLS secrets. + - mountPath: /etc/resolv.conf + name: resolv + - mountPath: /run/flannel + name: run-flannel - mountPath: /calico-secrets name: etcd-certs + {%- endif %} +{%- endif %} volumes: - # Mount in the etcd TLS secrets. + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico +{%- if include.network == "flannel" and include.datastore == "kdd" %} + # Used by flannel. + - name: run + hostPath: + path: /run + - name: flannel-cfg + configMap: + name: canal-config +{%- elsif include.network == "flannel" and include.datastore == "etcd" %} + # Used by flannel daemon. + - name: run-flannel + hostPath: + path: /run/flannel + - name: resolv + hostPath: + path: /etc/resolv.conf +{%- else %} + - name: var-lib-calico + hostPath: + path: /var/lib/calico +{%- endif %} + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d +{%- if include.datastore == "etcd" %} + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ - name: etcd-certs secret: secretName: calico-etcd-secrets + defaultMode: 0400 +{%- endif %} +{%- if include.app_layer_policy %} + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /opt/bin/volume-plugins/nodeagent~uds +{%- endif %} +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{objname}} + namespace: kube-system + +{%- if include.datastore == "etcd" and include.network == "flannel" %} --- apiVersion: rbac.authorization.k8s.io/v1beta1 @@ -528,11 +513,4 @@ rules: - nodes verbs: - get - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system +{%- endif %} diff --git a/_includes/master/manifests/calico-typha.yaml b/_includes/master/manifests/calico-typha.yaml new file mode 100644 index 00000000000..524c59c2fa5 --- /dev/null +++ b/_includes/master/manifests/calico-typha.yaml @@ -0,0 +1,98 @@ +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: 0 + revisionHistoryLimit: 2 + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical + # add-on, ensuring it gets priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + containers: + - image: quay.io/calico/typha:master + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + #- name: TYPHA_PROMETHEUSMETRICSENABLED + # value: "true" + #- name: TYPHA_PROMETHEUSMETRICSPORT + # value: "9093" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + periodSeconds: 30 + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /readiness + port: 9098 + periodSeconds: 10 diff --git a/_includes/master/manifests/calico.yaml b/_includes/master/manifests/calico.yaml new file mode 100644 index 00000000000..ce578503b5e --- /dev/null +++ b/_includes/master/manifests/calico.yaml @@ -0,0 +1,52 @@ +{% comment %} +calico.yaml acccepts the following include flags: + +| Name | Accepted Values | +|------------------|--------------------------| +| datastore | kdd, etcd | +| typha | true, false | +| network | calico, flannel, | +| calico_ipam | true, false | +| ipip | true, false | +| variant_name | Calico, Canal | +| app_layer_policy | true, false | + +{% endcomment -%} +{% capture variant_name %}{% if include.network == "flannel" %}Canal{% else %}Calico{% endif %}{% endcapture -%} +# {{variant_name}} Version {{site.data.versions[page.version].first.title}} +# {{site.url}}/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} +{%- if include.datastore == "etcd" %} +# calico/kube-controllers:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} +{%- endif %} +{%- if include.network == "flannel" %} +# coreos/flannel:{{site.data.versions[page.version].first.components["flannel"].version}} +{%- endif %} + +{% include {{page.version}}/manifests/calico-config.yaml datastore=include.datastore network=include.network calico_ipam=include.calico_ipam variant_name=variant_name typha=include.typha %} +--- + +{% if include.datastore == "etcd" %} +{% include {{page.version}}/manifests/calico-etcd-secrets.yaml %} +--- +{%- elsif include.datastore == "kdd" and include.typha == "true" %} +{% include {{page.version}}/manifests/calico-typha.yaml %} +--- +{%- endif %} + +{% include {{page.version}}/manifests/calico-node.yaml datastore=include.datastore network=include.network ipip=include.ipip variant_name=variant_name typha=include.typha app_layer_policy=include.app_layer_policy %} +--- +{%- if include.datastore == "etcd" and include.network == "flannel" %} + +{% include {{page.version}}/manifests/configure-canal.yaml %} +--- +{%- endif %} +{%- if include.datastore == "etcd" %} + +{% include {{page.version}}/manifests/calico-kube-controllers.yaml variant_name=variant_name %} +{%- elsif include.datastore == "kdd" %} + +{% include {{page.version}}/manifests/kdd-crds.yaml network=include.network %} +{%- endif -%} \ No newline at end of file diff --git a/_includes/master/manifests/configure-canal.yaml b/_includes/master/manifests/configure-canal.yaml new file mode 100644 index 00000000000..4d85e3b6c31 --- /dev/null +++ b/_includes/master/manifests/configure-canal.yaml @@ -0,0 +1,51 @@ +# This manifest deploys a Job which performs one time +# configuration of Canal. +apiVersion: batch/v1 +kind: Job +metadata: + name: configure-canal + namespace: kube-system + labels: + k8s-app: canal +spec: + template: + metadata: + name: configure-canal + spec: + hostNetwork: true + restartPolicy: OnFailure + containers: + # Writes basic flannel configuration to etcd. + - name: configure-flannel + image: quay.io/coreos/etcd:v3.1.5 + command: + - "etcdctl" + - "--cert-file=/calico-secrets/etcd-cert" + - "--key-file=/calico-secrets/etcd-key" + - "--ca-file=/calico-secrets/etcd-ca" + - "--no-sync" + - "set" + - "/coreos.com/network/config" + - '{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"} }' + env: + # The location of the etcd cluster. + - name: ETCDCTL_PEERS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # The location of the Calico etcd cluster. + - name: ETCDCTL_CACERT + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_ca + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Mount in the etcd TLS secrets. + - name: etcd-certs + secret: + secretName: calico-etcd-secrets diff --git a/_includes/master/manifests/kdd-crds.yaml b/_includes/master/manifests/kdd-crds.yaml new file mode 100644 index 00000000000..59c2c69b182 --- /dev/null +++ b/_includes/master/manifests/kdd-crds.yaml @@ -0,0 +1,153 @@ +{% comment %} +kdd-crds.yaml acccepts the following include flags: + +| Name | Accepted Values | +|------------------|--------------------------| +| network | calico, flannel, | + +{% endcomment -%} +# Create all the CustomResourceDefinitions needed for +# {{site.prodname}} {% if include.network %}policy and networking{% else %}policy-only{% endif %} mode. + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico Felix Configuration +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration +--- +{%- if include.network == "calico" %} + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico BGP Peers +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer + +--- +{%- endif %} + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico BGP Configuration +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico IP Pools +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico Host Endpoints +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico Cluster Information +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico Global Network Policies +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico Global Network Sets +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +description: Calico Network Policies +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy \ No newline at end of file diff --git a/master/getting-started/kubernetes/installation/hosted/calico.yaml b/master/getting-started/kubernetes/installation/hosted/calico.yaml index f5eacccdb8a..2d232396b5e 100644 --- a/master/getting-started/kubernetes/installation/hosted/calico.yaml +++ b/master/getting-started/kubernetes/installation/hosted/calico.yaml @@ -1,383 +1,4 @@ --- layout: null --- -# {{site.prodname}} Version {{site.data.versions[page.version].first.title}} -# {{site.url}}/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} -# This manifest includes the following component versions: -# calico/node:{{site.data.versions[page.version].first.title}} -# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} -# calico/kube-controllers:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} - -# This ConfigMap is used to configure a self-hosted {{site.prodname}} installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # Configure this with the location of your etcd cluster. - etcd_endpoints: "http://127.0.0.1:2379" - - # Configure the {{site.prodname}} backend to use. - calico_backend: "bird" - - # Configure the MTU to use - veth_mtu: "1440" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "etcd_key_file": "__ETCD_KEY_FILE__", - "etcd_cert_file": "__ETCD_CERT_FILE__", - "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", - "log_level": "info", - "mtu": __CNI_MTU__, - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - - # If you're using TLS enabled etcd uncomment the following. - # You must also populate the Secret below with these files. - etcd_ca: "" # "/calico-secrets/etcd-ca" - etcd_cert: "" # "/calico-secrets/etcd-cert" - etcd_key: "" # "/calico-secrets/etcd-key" - ---- - -# The following contains k8s Secrets for use with a TLS enabled etcd cluster. -# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: calico-etcd-secrets - namespace: kube-system -data: - # Populate the following files with etcd TLS configuration if desired, but leave blank if - # not using TLS for etcd. - # This self-hosted install expects three files with the following names. The values - # should be base64 encoded strings of the entire contents of each file. - # etcd-key: null - # etcd-cert: null - # etcd-ca: null - ---- - -# This manifest installs the calico/node container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # The location of the {{site.prodname}} etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_cert - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/readiness - - -bird - - -felix - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - mountPath: /calico-secrets - name: etcd-certs - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The location of the {{site.prodname}} etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - - mountPath: /calico-secrets - name: etcd-certs - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Mount in the etcd TLS secrets with mode 400. - # See https://kubernetes.io/docs/concepts/configuration/secret/ - - name: etcd-certs - secret: - secretName: calico-etcd-secrets - defaultMode: 0400 - ---- - -# This manifest deploys the {{site.prodname}} Kubernetes controllers. -# See https://github.com/projectcalico/kube-controllers -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' -spec: - # The controllers can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - spec: - # The controllers must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - serviceAccountName: calico-kube-controllers - containers: - - name: calico-kube-controllers - image: {{site.imageNames["kubeControllers"]}}:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} - env: - # The location of the {{site.prodname}} etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_cert - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node - volumeMounts: - # Mount in the etcd TLS secrets. - - mountPath: /calico-secrets - name: etcd-certs - volumes: - # Mount in the etcd TLS secrets with mode 400. - # See https://kubernetes.io/docs/concepts/configuration/secret/ - - name: etcd-certs - secret: - secretName: calico-etcd-secrets - defaultMode: 0400 - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system +{% include {{page.version}}/manifests/calico.yaml datastore="etcd" network="calico" calico_ipam="true" %} diff --git a/master/getting-started/kubernetes/installation/hosted/canal/canal-etcd.yaml b/master/getting-started/kubernetes/installation/hosted/canal/canal-etcd.yaml index 55231921f7b..5ad5fc0cf46 100644 --- a/master/getting-started/kubernetes/installation/hosted/canal/canal-etcd.yaml +++ b/master/getting-started/kubernetes/installation/hosted/canal/canal-etcd.yaml @@ -1,515 +1,4 @@ --- layout: null --- -# Canal Version {{site.data.versions[page.version].first.title}} -# {{site.url}}/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} -# This manifest includes the following component versions: -# calico/node:{{site.data.versions[page.version].first.title}} -# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} -# calico/kube-controllers:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} -# coreos/flannel:v0.9.1 - -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # Configure this with the location of your etcd cluster. - etcd_endpoints: "https://127.0.0.1:2379" - - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosing using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "canal", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "etcd_key_file": "__ETCD_KEY_FILE__", - "etcd_cert_file": "__ETCD_CERT_FILE__", - "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", - "log_level": "info", - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - } - }, - { - "type": "portmap", - "capabilities": {"portMappings": true}, - "snat": true - } - ] - } - - # If you're using TLS enabled etcd uncomment the following. - # You must also populate the Secret below with these files. - etcd_ca: "" # "/calico-secrets/etcd-ca" - etcd_cert: "" # "/calico-secrets/etcd-cert" - etcd_key: "" # "/calico-secrets/etcd-key" - ---- -# The following contains k8s Secrets for use with a TLS enabled etcd cluster. -# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: calico-etcd-secrets - namespace: kube-system -data: - # Populate the following files with etcd TLS configuration if desired, but leave blank if - # not using TLS for etcd. - # This self-hosted install expects three files with the following names. The values - # should be base64 encoded strings of the entire contents of each file. - # etcd-key: "" - # etcd-cert: "" - # etcd-ca: "" - ---- - -# This manifest installs the per-node agents, as well -# as the CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal-node - namespace: kube-system - labels: - k8s-app: canal-node -spec: - selector: - matchLabels: - k8s-app: canal-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - labels: - k8s-app: canal-node - spec: - tolerations: - # Make sure canal node can be scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - hostNetwork: true - serviceAccountName: canal - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs the flannel daemon to enable vxlan networking between - # container hosts. - - name: flannel - image: {{site.imageNames["flannel"]}}:{{site.data.versions[page.version].first.components["flannel"].version}} - env: - # The location of the etcd cluster. - - name: FLANNELD_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_cert - # Location of the CA certificate for etcd. - - name: FLANNELD_ETCD_CAFILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - # Location of the client key for etcd. - - name: FLANNELD_ETCD_KEYFILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_key - # Location of the client certificate for etcd. - - name: FLANNELD_ETCD_CERTFILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_cert - # The interface flannel should run on. - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - # Perform masquerade on traffic leaving the pod cidr. - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - # Write the subnet.env file to the mounted directory. - - name: FLANNELD_SUBNET_FILE - value: "/run/flannel/subnet.env" - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/resolv.conf - name: resolv - - mountPath: /run/flannel - name: run-flannel - - mountPath: /calico-secrets - name: etcd-certs - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and local routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # The location of the etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_cert - # Disable {{site.prodname}} BGP. {{site.prodname}} is simply enforcing policy. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /calico-secrets - name: etcd-certs - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-calico-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - imagePullPolicy: Always - command: ["/install-cni.sh"] - env: - # The name of the CNI network config file to install. - - name: CNI_CONF_NAME - value: "10-canal.conflist" - # The location of the etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_cert - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - - mountPath: /calico-secrets - name: etcd-certs - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel daemon. - - name: run-flannel - hostPath: - path: /run/flannel - - name: resolv - hostPath: - path: /etc/resolv.conf - # Mount in the etcd TLS secrets. - - name: etcd-certs - secret: - secretName: calico-etcd-secrets - ---- - -# This manifest deploys a Job which performs one time -# configuration of Canal. -apiVersion: batch/v1 -kind: Job -metadata: - name: configure-canal - namespace: kube-system - labels: - k8s-app: canal -spec: - template: - metadata: - name: configure-canal - spec: - hostNetwork: true - restartPolicy: OnFailure - containers: - # Writes basic flannel configuration to etcd. - - name: configure-flannel - image: quay.io/coreos/etcd:v3.1.5 - command: - - "etcdctl" - - "--cert-file=/calico-secrets/etcd-cert" - - "--key-file=/calico-secrets/etcd-key" - - "--ca-file=/calico-secrets/etcd-ca" - - "--no-sync" - - "set" - - "/coreos.com/network/config" - - '{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"} }' - env: - # The location of the etcd cluster. - - name: ETCDCTL_PEERS - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_endpoints - # The location of the {{site.prodname}} etcd cluster. - - name: ETCDCTL_CACERT - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - volumeMounts: - # Mount in the etcd TLS secrets. - - mountPath: /calico-secrets - name: etcd-certs - volumes: - # Mount in the etcd TLS secrets. - - name: etcd-certs - secret: - secretName: calico-etcd-secrets - ---- - -# This manifest deploys the {{site.prodname}} policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers -spec: - # The policy controller can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - tolerations: - # Make sure canal node can be scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - containers: - - name: calico-kube-controllers - image: {{site.imageNames["kubeControllers"]}}:{{site.data.versions[page.version].first.components["calico/kube-controllers"].version}} - env: - # The location of the {{site.prodname}} etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: canal-config - key: etcd_cert - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node - volumeMounts: - # Mount in the etcd TLS secrets. - - mountPath: /calico-secrets - name: etcd-certs - volumes: - # Mount in the etcd TLS secrets. - - name: etcd-certs - secret: - secretName: calico-etcd-secrets ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: canal -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: canal -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: canal -rules: - - apiGroups: [""] - resources: - - pods - - nodes - verbs: - - get - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system +{% include {{page.version}}/manifests/calico.yaml datastore="etcd" network="flannel" %} diff --git a/master/getting-started/kubernetes/installation/hosted/canal/canal.yaml b/master/getting-started/kubernetes/installation/hosted/canal/canal.yaml index 805e0ce8f43..3115a71832b 100644 --- a/master/getting-started/kubernetes/installation/hosted/canal/canal.yaml +++ b/master/getting-started/kubernetes/installation/hosted/canal/canal.yaml @@ -1,356 +1,4 @@ --- layout: null --- -# Canal Version {{site.data.versions[page.version].first.title}} -# {{site.url}}/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} -# This manifest includes the following component versions: -# calico/node:{{site.data.versions[page.version].first.title}} -# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} -# coreos/flannel:{{site.data.versions[page.version].first.components["flannel"].version}} - -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "10.244.0.0/16", - "Backend": { - "Type": "vxlan" - } - } - ---- - -# This manifest installs the {{site.nodecontainer}} container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: canal - tolerations: - # Tolerate this effect so the pods will be schedulable at all times - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix logging. - - name: FELIX_LOGSEVERITYSYS - value: "info" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # Disable IPV6 support in Felix. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # No IP address needed. - - name: IP - value: "" - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: {{site.imageNames["flannel"]}}:{{site.data.versions[page.version].first.components["flannel"].version}} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config - - -# Create all the CustomResourceDefinitions needed for -# {{site.prodname}} policy-only mode. ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Felix Configuration -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico BGP Configuration -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico IP Pools -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Cluster Information -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Global Network Policies -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Network Policies -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system +{% include {{page.version}}/manifests/calico.yaml datastore="kdd" network="flannel" %} \ No newline at end of file diff --git a/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml b/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml index 76804b6dc90..08109d1bb06 100644 --- a/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml +++ b/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml @@ -1,495 +1,4 @@ --- layout: null --- -# {{site.prodname}} Version {{site.data.versions[page.version].first.title}} -# {{site.url}}/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} -# This manifest includes the following component versions: -# calico/node:{{site.data.versions[page.version].first.title}} -# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} - -# This ConfigMap is used to configure a self-hosted {{site.prodname}} installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas - # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is - # essential. - typha_service_name: "none" - - # Configure the MTU to use - veth_mtu: "1440" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- - -# This manifest creates a Service, which will be backed by {{site.prodname}}'s Typha daemon. -# Typha sits in between Felix and the API server, reducing {{site.prodname}}'s load on the API server. - -apiVersion: v1 -kind: Service -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha - selector: - k8s-app: calico-typha - ---- - -# This manifest creates a Deployment of Typha to back the above service. - -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the calico-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: 0 - revisionHistoryLimit: 2 - template: - metadata: - labels: - k8s-app: calico-typha - annotations: - # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical - # add-on, ensuring it gets priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - # Since {{site.prodname}} can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: calico-node - containers: - - image: {{site.imageNames["typha"]}}:{{site.data.versions[page.version].first.components["typha"].version}} - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, - # this opens a port on the host, which may need to be secured. - #- name: TYPHA_PROMETHEUSMETRICSENABLED - # value: "true" - #- name: TYPHA_PROMETHEUSMETRICSPORT - # value: "9093" - livenessProbe: - httpGet: - path: /liveness - port: 9098 - periodSeconds: 30 - initialDelaySeconds: 30 - readinessProbe: - httpGet: - path: /readiness - port: 9098 - periodSeconds: 10 - ---- - -# This manifest installs the calico/node container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix info logging. - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPV6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Enable IP-in-IP within Felix. - - name: FELIX_IPINIPENABLED - value: "true" - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/readiness - - -bird - - -felix - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - -# Create all the CustomResourceDefinitions needed for -# {{site.prodname}} policy and networking mode. ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Felix Configuration -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico BGP Peers -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico BGP Configuration -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico IP Pools -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico HostEndpoints -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Cluster Information -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Global Network Policies -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Global Network Sets -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Network Policies -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system +{% include {{page.version}}/manifests/calico.yaml datastore="kdd" network="calico" ipip="true" typha="true" %} diff --git a/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.7/calico.yaml b/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.7/calico.yaml index 4e766fc7c48..9d7a5bba2c7 100644 --- a/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.7/calico.yaml +++ b/master/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.7/calico.yaml @@ -1,459 +1,4 @@ --- layout: null --- -# {{site.prodname}} Version {{site.data.versions[page.version].first.title}} -# {{site.url}}/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} -# This manifest includes the following component versions: -# calico/node:{{site.data.versions[page.version].first.title}} -# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} - -# This ConfigMap is used to configure a self-hosted {{site.prodname}} installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas - # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is - # essential. - typha_service_name: "none" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": 1500, - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- - -# This manifest creates a Service, which will be backed by {{site.prodname}}'s Typha daemon. -# Typha sits in between Felix and the API server, reducing {{site.prodname}}'s load on the API server. - -apiVersion: v1 -kind: Service -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha - selector: - k8s-app: calico-typha - ---- - -# This manifest creates a Deployment of Typha to back the above service. - -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the calico-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: 0 - revisionHistoryLimit: 2 - template: - metadata: - labels: - k8s-app: calico-typha - annotations: - # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical - # add-on, ensuring it gets priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - # Since {{site.prodname}} can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: calico-node - containers: - - image: {{site.imageNames["typha"]}}:{{site.data.versions[page.version].first.components["typha"].version}} - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, - # this opens a port on the host, which may need to be secured. - #- name: TYPHA_PROMETHEUSMETRICSENABLED - # value: "true" - #- name: TYPHA_PROMETHEUSMETRICSPORT - # value: "9093" - livenessProbe: - httpGet: - path: /liveness - port: 9098 - periodSeconds: 30 - initialDelaySeconds: 30 - readinessProbe: - httpGet: - path: /readiness - port: 9098 - periodSeconds: 10 - ---- - -# This manifest installs the calico/node container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix info logging. - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPV6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/readiness - - -felix - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - -# Create all the CustomResourceDefinitions needed for -# {{site.prodname}} policy-only mode. ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Felix Configuration -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico BGP Configuration -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico IP Pools -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Host Endpoints -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Cluster Information -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Global Network Policies -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Global Network Sets -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -description: Calico Network Policies -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system +{% include {{page.version}}/manifests/calico.yaml datastore="kdd" typha="true" %} \ No newline at end of file diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/etcd/calico-networking/calico-node.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/etcd/calico-networking/calico-node.yaml index 500de854950..344010436c1 100644 --- a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/etcd/calico-networking/calico-node.yaml +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/etcd/calico-networking/calico-node.yaml @@ -1,230 +1,4 @@ --- layout: null --- -# This manifest installs the calico/node container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - initContainers: - - name: flexvol-driver - image: {{site.imageNames["flexvol"]}}:{{site.data.versions[page.version].first.components["flexvol"].version}} - imagePullPolicy: Always - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # The location of the {{site.prodname}} etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_cert - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: FELIX_HEALTHENABLED - value: "true" - # Enable the Policy Sync API between Felix and Dikastes - - name: FELIX_POLICYSYNCPATHPREFIX - value: "/var/run/nodeagent" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/readiness - - -bird - - -felix - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - mountPath: /calico-secrets - name: etcd-certs - - name: policysync - mountPath: /var/run/nodeagent - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The location of the {{site.prodname}} etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - - mountPath: /calico-secrets - name: etcd-certs - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Mount in the etcd TLS secrets with mode 400. - # See https://kubernetes.io/docs/concepts/configuration/secret/ - - name: etcd-certs - secret: - secretName: calico-etcd-secrets - defaultMode: 0400 - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /opt/bin/volume-plugins/nodeagent~uds +{% include {{page.version}}/manifests/calico-node.yaml datastore="etcd" network="calico" calico_ipam="true" app_layer_policy="true" %} diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/etcd/calico-networking/calico.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/etcd/calico-networking/calico.yaml new file mode 100644 index 00000000000..2df4cf0d79e --- /dev/null +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/etcd/calico-networking/calico.yaml @@ -0,0 +1,4 @@ +--- +layout: null +--- +{% include {{page.version}}/manifests/calico.yaml datastore="etcd" network="calico" calico_ipam="true" app_layer_policy="true" %} diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/istio-inject-configmap.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/istio-inject-configmap.yaml index c867e289e10..ff8d82c88d4 100644 --- a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/istio-inject-configmap.yaml +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/istio-inject-configmap.yaml @@ -1,7 +1,6 @@ --- layout: null --- - kind: ConfigMap metadata: name: istio-inject @@ -10,13 +9,13 @@ apiVersion: v1 data: config: | policy: enabled - template: |-{% raw %} + template: |- initContainers: - name: istio-init image: docker.io/istio/proxy_init:0.6.0 args: - "-p" - - {{ .MeshConfig.ProxyListenPort }} + - {% raw %}{{ .MeshConfig.ProxyListenPort }}{% endraw %} - "-u" - 1337 imagePullPolicy: IfNotPresent @@ -46,7 +45,7 @@ data: - proxy - sidecar - --configPath - - {{ .ProxyConfig.ConfigPath }} + - {% raw %}{{ .ProxyConfig.ConfigPath }} - --binaryPath - {{ .ProxyConfig.BinaryPath }} - --serviceCluster @@ -72,7 +71,7 @@ data: - --proxyAdminPort - {{ .ProxyConfig.ProxyAdminPort }} - --controlPlaneAuthPolicy - - {{ .ProxyConfig.ControlPlaneAuthPolicy }} + - {{ .ProxyConfig.ControlPlaneAuthPolicy }}{% endraw %} env: - name: POD_NAME valueFrom: @@ -103,7 +102,7 @@ data: - mountPath: /var/run/dikastes name: dikastes-sock - name: dikastes - image: {% endraw %}{{site.imageNames["dikastes"]}}:{{site.data.versions[page.version].first.components["calico/dikastes"].version}}{% raw %} + image: {{site.imageNames["dikastes"]}}:{{site.data.versions[page.version].first.components["calico/dikastes"].version}} args: ["/dikastes", "server", "-l", "/var/run/dikastes/dikastes.sock", "-d", "/var/run/felix/nodeagent/socket", "--debug"] volumeMounts: - mountPath: /var/run/dikastes @@ -117,7 +116,7 @@ data: - name: istio-certs secret: optional: true - {{ if eq .Spec.ServiceAccountName "" }}secretName: istio.default{{ else }}secretName: {{ printf "istio.%s" .Spec.ServiceAccountName }}{{ end }}{% endraw %} + {% raw %}{{ if eq .Spec.ServiceAccountName "" }}secretName: istio.default{{ else }}secretName: {{ printf "istio.%s" .Spec.ServiceAccountName }}{{ end }}{% endraw %} - name: dikastes-sock emptyDir: medium: Memory diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/calico-networking/calico-node.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/calico-networking/calico-node.yaml index 8e2b1938eda..a38a5170aa8 100644 --- a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/calico-networking/calico-node.yaml +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/calico-networking/calico-node.yaml @@ -1,209 +1,4 @@ --- layout: null --- -# This manifest installs the calico/node container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - initContainers: - - name: flexvol-driver - image: {{site.imageNames["flexvol"]}}:{{site.data.versions[page.version].first.components["flexvol"].version}} - imagePullPolicy: Always - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix info logging. - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPV6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Enable IP-in-IP within Felix. - - name: FELIX_IPINIPENABLED - value: "true" - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: FELIX_HEALTHENABLED - value: "true" - # Enable the Policy Sync API between Felix and Dikastes - - name: FELIX_POLICYSYNCPATHPREFIX - value: "/var/run/nodeagent" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/readiness - - -bird - - -felix - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /opt/bin/volume-plugins/nodeagent~uds +{% include {{page.version}}/manifests/calico-node.yaml datastore="kdd" network="calico" ipip="true" typha="true" app_layer_policy="true" %} diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/calico-networking/calico.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/calico-networking/calico.yaml new file mode 100644 index 00000000000..b744a7b139e --- /dev/null +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/calico-networking/calico.yaml @@ -0,0 +1,4 @@ +--- +layout: null +--- +{% include {{page.version}}/manifests/calico.yaml datastore="kdd" network="calico" ipip="true" typha="true" app_layer_policy="true" %} diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/flannel/calico-node.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/flannel/calico-node.yaml index ce8b614e308..a60dc324aa1 100644 --- a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/flannel/calico-node.yaml +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/flannel/calico-node.yaml @@ -1,209 +1,4 @@ --- layout: null --- -# This manifest installs the {{site.nodecontainer}} container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: canal - tolerations: - # Tolerate this effect so the pods will be schedulable at all times - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - initContainers: - - name: flexvol-driver - image: {{site.imageNames["flexvol"]}}:{{site.data.versions[page.version].first.components["flexvol"].version}} - imagePullPolicy: Always - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix logging. - - name: FELIX_LOGSEVERITYSYS - value: "info" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # Disable IPV6 support in Felix. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # No IP address needed. - - name: IP - value: "" - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - - name: FELIX_HEALTHENABLED - value: "true" - # Enable the Policy Sync API between Felix and Dikastes - - name: FELIX_POLICYSYNCPATHPREFIX - value: "/var/run/nodeagent" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: {{site.imageNames["flannel"]}}:{{site.data.versions[page.version].first.components["flannel"].version}} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /opt/bin/volume-plugins/nodeagent~uds +{% include {{page.version}}/manifests/calico-node.yaml datastore="kdd" network="flannel" app_layer_policy="true" %} diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/flannel/canal.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/flannel/canal.yaml new file mode 100644 index 00000000000..d79db8d259b --- /dev/null +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/flannel/canal.yaml @@ -0,0 +1,4 @@ +--- +layout: null +--- +{% include {{page.version}}/manifests/calico.yaml datastore="kdd" network="flannel" app_layer_policy="true" %} \ No newline at end of file diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/policy-only/calico-node.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/policy-only/calico-node.yaml index 29292412a52..5ee7a1e1a08 100644 --- a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/policy-only/calico-node.yaml +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/policy-only/calico-node.yaml @@ -1,193 +1,4 @@ --- layout: null --- -# This manifest installs the calico/node container, as well -# as the {{site.prodname}} CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - initContainers: - - name: flexvol-driver - image: {{site.imageNames["flexvol"]}}:{{site.data.versions[page.version].first.components["flexvol"].version}} - imagePullPolicy: Always - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{site.imageNames["node"]}}:{{site.data.versions[page.version].first.title}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix info logging. - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPV6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: FELIX_HEALTHENABLED - value: "true" - # Enable the Policy Sync API between Felix and Dikastes - - name: FELIX_POLICYSYNCPATHPREFIX - value: "/var/run/nodeagent" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/readiness - - -felix - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # This container installs the {{site.prodname}} CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{site.imageNames["cni"]}}:{{site.data.versions[page.version].first.components["calico/cni"].version}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /opt/bin/volume-plugins/nodeagent~uds +{% include {{page.version}}/manifests/calico-node.yaml datastore="kdd" typha="true" app_layer_policy="true" %} diff --git a/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/policy-only/calico.yaml b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/policy-only/calico.yaml new file mode 100644 index 00000000000..7ec8a07a58c --- /dev/null +++ b/master/getting-started/kubernetes/installation/manifests/app-layer-policy/kubernetes-datastore/policy-only/calico.yaml @@ -0,0 +1,4 @@ +--- +layout: null +--- +{% include {{page.version}}/manifests/calico.yaml datastore="kdd" typha="true" app_layer_policy="true" %} \ No newline at end of file