generated from onedr0p/cluster-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathk3s.yml
71 lines (68 loc) · 3.03 KB
/
k3s.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
---
# https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/
# https://github.com/PyratLabs/ansible-role-k3s
# (bool) Specify if a host (or host group) are part of the control plane
k3s_control_node: true
interface_name: ens10
ansible_interface: "ansible_{{ interface_name }}"
# (dict) k3s settings for all control-plane nodes
k3s_server:
advertise-address: "{{ hostvars[inventory_hostname][ansible_interface].ipv4.address }}"
node-ip: "{{ hostvars[inventory_hostname][ansible_interface].ipv4.address }}"
node-external-ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address }}"
node-taint:
# Node taints are only added at registration time, when the node first joins the cluster
# Make master node not schedulable, only to certain system pods
# As this was added after the cluster initial build, I added it manually with :
# kubectl taint nodes k8s-0 node-role.kubernetes.io/master=true:NoSchedule
- "node-role.kubernetes.io/master=true:NoSchedule"
#- "k3s-controlplane=true:NoExecute" # core-dns only tolerates node-role.kubernetes.io/master
#- "node-role.kubernetes.io/control-plane" # core-dns only tolerates node-role.kubernetes.io/master
bind-address: 0.0.0.0
tls-san:
# kube-vip
- "{{ kubevip_address }}"
# Disable Docker - this will use the default containerd CRI
docker: false
flannel-backend: "none" # This needs to be in quotes
disable:
# Disable flannel - replaced with Calico
- flannel
# Disable traefik - installed with Flux
- traefik
# Disable servicelb - replaced with metallb and install with Flux
- servicelb
# Disable metrics-server - installed with Flux
- metrics-server
disable-network-policy: true
disable-cloud-controller: true
write-kubeconfig-mode: "644"
# Network CIDR to use for pod IPs
cluster-cidr: "10.42.0.0/16"
# Network CIDR to use for service IPs
service-cidr: "10.43.0.0/16"
#kubelet-arg:
# Allow k8s services to contain TCP and UDP on the same port
#- "feature-gates=MixedProtocolLBService=true"
kube-controller-manager-arg:
# Allow k8s services to contain TCP and UDP on the same port
#- "feature-gates=MixedProtocolLBService=true"
# Required to monitor kube-controller-manager with kube-prometheus-stack
- "bind-address=0.0.0.0"
kube-proxy-arg:
# Allow k8s services to contain TCP and UDP on the same port
#- "feature-gates=MixedProtocolLBService=true"
# Required to monitor kube-proxy with kube-prometheus-stack
- "metrics-bind-address=0.0.0.0"
kube-scheduler-arg:
# Allow k8s services to contain TCP and UDP on the same port
#- "feature-gates=MixedProtocolLBService=true"
# Required to monitor kube-scheduler with kube-prometheus-stack
- "bind-address=0.0.0.0"
# Required to monitor etcd with kube-prometheus-stack
etcd-expose-metrics: true
kube-apiserver-arg:
# Allow k8s services to contain TCP and UDP on the same port
#- "feature-gates=MixedProtocolLBService=true"
# Required for HAProxy health-checks
- "anonymous-auth=true"