Skip to content

Commit

Permalink
Replace kube-master with kube_control_plane (kubernetes-sigs#7256)
Browse files Browse the repository at this point in the history
This replaces kube-master with kube_control_plane because of [1]:

  The Kubernetes project is moving away from wording that is
  considered offensive. A new working group WG Naming was created
  to track this work, and the word "master" was declared as offensive.
  A proposal was formalized for replacing the word "master" with
  "control plane". This means it should be removed from source code,
  documentation, and user-facing configuration from Kubernetes and
  its sub-projects.

NOTE: The reason why this changes it to kube_control_plane not
      kube-control-plane is for valid group names on ansible.

[1]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#motivation
  • Loading branch information
oomichi authored Mar 24, 2021
1 parent d53fd29 commit 486b223
Show file tree
Hide file tree
Showing 159 changed files with 564 additions and 485 deletions.
2 changes: 1 addition & 1 deletion .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ variables:
MITOGEN_ENABLE: "false"
ANSIBLE_LOG_LEVEL: "-vv"
RECOVER_CONTROL_PLANE_TEST: "false"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"

before_script:
- ./tests/scripts/rebase.sh
Expand Down
4 changes: 2 additions & 2 deletions .gitlab-ci/packet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -223,12 +223,12 @@ packet_ubuntu18-calico-ha-recover:
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"

packet_ubuntu18-calico-ha-recover-noquorum:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
4 changes: 2 additions & 2 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -253,9 +253,9 @@ Vagrant.configure("2") do |config|
#ansible.tags = ['download']
ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
"k8s-cluster:children" => ["kube-master", "kube-node"],
"k8s-cluster:children" => ["kube_control_plane", "kube-node"],
}
end
end
Expand Down
17 changes: 13 additions & 4 deletions cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,15 @@
- name: Check ansible version
import_playbook: ansible_version.yml

- name: Add kube-master nodes to kube_control_plane
# This is for old inventory which contains kube-master instead of kube_control_plane
hosts: kube-master
gather_facts: false
tasks:
- name: add nodes to kube_control_plane group
group_by:
key: 'kube_control_plane'

- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
Expand Down Expand Up @@ -66,7 +75,7 @@
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }

- hosts: kube-master
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand Down Expand Up @@ -94,15 +103,15 @@
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }

- hosts: kube-master[0]
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }

- hosts: kube-master
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand All @@ -114,7 +123,7 @@
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }

- hosts: kube-master
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
Expand Down
4 changes: 2 additions & 2 deletions contrib/aws_inventory/kubespray-aws-inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def search_tags(self):
hosts['_meta'] = { 'hostvars': {} }

##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube-master", "kube-node", "etcd"]:
for group in ["kube_control_plane", "kube-node", "etcd"]:
hosts[group] = []
tag_key = "kubespray-role"
tag_value = ["*"+group+"*"]
Expand Down Expand Up @@ -70,7 +70,7 @@ def search_tags(self):
hosts[group].append(dns_name)
hosts['_meta']['hostvars'][dns_name] = ansible_host

hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
print(json.dumps(hosts, sort_keys=True, indent=2))

SearchEC2Tags()
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
{% endif %}
{% endfor %}

[kube-master]
[kube_control_plane]
{% for vm in vm_list %}
{% if 'kube-master' in vm.tags.roles %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
Expand All @@ -30,4 +30,4 @@

[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
{% endif %}
{% endfor %}

[kube-master]
[kube_control_plane]
{% for vm in vm_roles_list %}
{% if 'kube-master' in vm.tags.roles %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
Expand All @@ -30,5 +30,5 @@

[k8s-cluster:children]
kube-node
kube-master
kube_control_plane

Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
],
"tags": {
"roles": "kube-master,etcd"
"roles": "kube_control_plane,etcd"
},
"apiVersion": "{{apiVersion}}",
"properties": {
Expand Down
18 changes: 10 additions & 8 deletions contrib/inventory_builder/inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
import subprocess
import sys

ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
'calico-rr']
PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
Expand Down Expand Up @@ -299,21 +299,23 @@ def add_host_to_group(self, group, host, opts=""):

def set_kube_control_plane(self, hosts):
for host in hosts:
self.add_host_to_group('kube-master', host)
self.add_host_to_group('kube_control_plane', host)

def set_all(self, hosts):
for host, opts in hosts.items():
self.add_host_to_group('all', host, opts)

def set_k8s_cluster(self):
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
k8s_cluster = {'children': {'kube_control_plane': None,
'kube-node': None}}
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster

def set_calico_rr(self, hosts):
for host in hosts:
if host in self.yaml_config['all']['children']['kube-master']:
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube-master group".format(host))
"conflicts with kube_control_plane "
"group".format(host))
continue
if host in self.yaml_config['all']['children']['kube-node']:
self.debug("Not adding {0} to calico-rr group because it "
Expand All @@ -330,10 +332,10 @@ def set_kube_node(self, hosts):
"group.".format(host))
continue
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in kube-master "
"group.".format(host))
"scale deployment and host is in "
"kube_control_plane group.".format(host))
continue
self.add_host_to_group('kube-node', host)

Expand Down
4 changes: 2 additions & 2 deletions contrib/inventory_builder/tests/test_inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def test_add_host_to_group(self):
None)

def test_set_kube_control_plane(self):
group = 'kube-master'
group = 'kube_control_plane'
host = 'node1'

self.inv.set_kube_control_plane([host])
Expand All @@ -242,7 +242,7 @@ def test_set_all(self):

def test_set_k8s_cluster(self):
group = 'k8s-cluster'
expected_hosts = ['kube-node', 'kube-master']
expected_hosts = ['kube-node', 'kube_control_plane']

self.inv.set_k8s_cluster()
for host in expected_hosts:
Expand Down
2 changes: 1 addition & 1 deletion contrib/network-storage/glusterfs/glusterfs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@
roles:
- { role: glusterfs/client }

- hosts: kube-master[0]
- hosts: kube_control_plane[0]
roles:
- { role: kubernetes-pv }
4 changes: 2 additions & 2 deletions contrib/network-storage/glusterfs/inventory.example
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9

# [kube-master]
# [kube_control_plane]
# node1
# node2

Expand All @@ -32,7 +32,7 @@

# [k8s-cluster:children]
# kube-node
# kube-master
# kube_control_plane

# [gfs-cluster]
# gfs_node1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
register: gluster_pv
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined

- name: Kubernetes Apps | Set GlusterFS endpoint and PV
kube:
Expand All @@ -19,4 +19,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
state: "{{ item.changed | ternary('latest','present') }}"
with_items: "{{ gluster_pv.results }}"
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
2 changes: 1 addition & 1 deletion contrib/network-storage/heketi/heketi-tear-down.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
roles:
- { role: tear-down }

Expand Down
2 changes: 1 addition & 1 deletion contrib/network-storage/heketi/heketi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
roles:
- { role: prepare }

- hosts: kube-master[0]
- hosts: kube_control_plane[0]
tags:
- "provision"
roles:
Expand Down
2 changes: 1 addition & 1 deletion contrib/network-storage/heketi/inventory.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ all:
vars:
kubelet_fail_swap_on: false
children:
kube-master:
kube_control_plane:
hosts:
node1:
etcd:
Expand Down
2 changes: 1 addition & 1 deletion contrib/terraform/aws/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n

```commandline
# Get the controller's IP address.
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
# Get the hostname of the load balancer.
Expand Down
2 changes: 1 addition & 1 deletion contrib/terraform/aws/create-infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ resource "aws_instance" "k8s-master" {

vpc_security_group_ids = module.aws-vpc.aws_security_group

iam_instance_profile = module.aws-iam.kube-master-profile
iam_instance_profile = module.aws-iam.kube_control_plane-profile
key_name = var.AWS_SSH_KEY_NAME

tags = merge(var.default_tags, map(
Expand Down
10 changes: 5 additions & 5 deletions contrib/terraform/aws/modules/iam/main.tf
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#Add AWS Roles for Kubernetes

resource "aws_iam_role" "kube-master" {
resource "aws_iam_role" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"

assume_role_policy = <<EOF
Expand Down Expand Up @@ -40,9 +40,9 @@ EOF

#Add AWS Policies for Kubernetes

resource "aws_iam_role_policy" "kube-master" {
resource "aws_iam_role_policy" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = aws_iam_role.kube-master.id
role = aws_iam_role.kube_control_plane.id

policy = <<EOF
{
Expand Down Expand Up @@ -130,9 +130,9 @@ EOF

#Create AWS Instance Profiles

resource "aws_iam_instance_profile" "kube-master" {
resource "aws_iam_instance_profile" "kube_control_plane" {
name = "kube_${var.aws_cluster_name}_master_profile"
role = aws_iam_role.kube-master.name
role = aws_iam_role.kube_control_plane.name
}

resource "aws_iam_instance_profile" "kube-worker" {
Expand Down
4 changes: 2 additions & 2 deletions contrib/terraform/aws/modules/iam/outputs.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
output "kube-master-profile" {
value = aws_iam_instance_profile.kube-master.name
output "kube_control_plane-profile" {
value = aws_iam_instance_profile.kube_control_plane.name
}

output "kube-worker-profile" {
Expand Down
4 changes: 2 additions & 2 deletions contrib/terraform/aws/templates/inventory.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ ${public_ip_address_bastion}
[bastion]
${public_ip_address_bastion}

[kube-master]
[kube_control_plane]
${list_master}


Expand All @@ -21,7 +21,7 @@ ${list_etcd}

[k8s-cluster:children]
kube-node
kube-master
kube_control_plane


[k8s-cluster:vars]
Expand Down
6 changes: 3 additions & 3 deletions contrib/terraform/exoscale/templates/inventory.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
${connection_strings_master}
${connection_strings_worker}

[kube-master]
[kube_control_plane]
${list_master}

[kube-master:vars]
[kube_control_plane:vars]
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]

[etcd]
Expand All @@ -15,5 +15,5 @@ ${list_master}
${list_worker}

[k8s-cluster:children]
kube-master
kube_control_plane
kube-node
Loading

0 comments on commit 486b223

Please sign in to comment.