diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py index a2005847581..d030d3a2265 100644 --- a/contrib/inventory_builder/inventory.py +++ b/contrib/inventory_builder/inventory.py @@ -44,7 +44,7 @@ ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster', 'calico-rr'] PROTECTED_NAMES = ROLES -AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load'] +AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames', 'load'] _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False} yaml = YAML() @@ -348,6 +348,8 @@ def parse_command(self, command, args=None): self.print_config() elif command == 'print_ips': self.print_ips() + elif command == 'print_hostnames': + self.print_hostnames() elif command == 'load': self.load_file(args) else: @@ -361,6 +363,7 @@ def show_help(self): help - Display this message print_cfg - Write inventory file to stdout print_ips - Write a space-delimited list of IPs from "all" group +print_hostnames - Write a space-delimited list of Hostnames from "all" group Advanced usage: Add another host after initial creation: inventory.py 10.10.1.5 @@ -381,6 +384,9 @@ def show_help(self): def print_config(self): yaml.dump(self.yaml_config, sys.stdout) + def print_hostnames(self): + print(' '.join(self.yaml_config['all']['hosts'].keys())) + def print_ips(self): ips = [] for host, opts in self.yaml_config['all']['hosts'].items(): diff --git a/contrib/metallb/roles/provision/templates/metallb.yml.j2 b/contrib/metallb/roles/provision/templates/metallb.yml.j2 index ea8da29e9a0..b830c5f5247 100644 --- a/contrib/metallb/roles/provision/templates/metallb.yml.j2 +++ b/contrib/metallb/roles/provision/templates/metallb.yml.j2 @@ -115,7 +115,7 @@ roleRef: kind: Role name: config-watcher --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: DaemonSet metadata: namespace: metallb-system @@ -169,7 +169,7 @@ spec: - net_raw --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: namespace: metallb-system diff --git a/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 index 74c031ffedd..15efb3d6af8 100644 --- a/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 +++ b/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 @@ -1,6 +1,6 @@ { "kind": "DaemonSet", - "apiVersion": "extensions/v1beta1", + "apiVersion": "apps/v1", "metadata": { "name": "glusterfs", "labels": { diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 index 764de5efaaf..6f82bb40947 100644 --- a/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 +++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 @@ -30,7 +30,7 @@ }, { "kind": "Deployment", - "apiVersion": "extensions/v1beta1", + "apiVersion": "apps/v1", "metadata": { "name": "deploy-heketi", "labels": { diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 index cd7466ae5aa..0cd24e167a9 100644 --- a/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 +++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 @@ -44,7 +44,7 @@ }, { "kind": "Deployment", - "apiVersion": "extensions/v1beta1", + "apiVersion": "apps/v1", "metadata": { "name": "heketi", "labels": { diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index 1e997558836..57e9f370f9d 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -224,6 +224,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |Variable | Description | |---------|-------------| |`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. | +|`az_list` | List of Availability Zones available in your OpenStack cluster. | |`network_name` | The name to be given to the internal network that will be generated | |`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated | |`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. | diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tfvars b/contrib/terraform/openstack/sample-inventory/cluster.tfvars index 1854e1fba2c..c27d6972361 100644 --- a/contrib/terraform/openstack/sample-inventory/cluster.tfvars +++ b/contrib/terraform/openstack/sample-inventory/cluster.tfvars @@ -1,6 +1,9 @@ # your Kubernetes cluster name here cluster_name = "i-didnt-read-the-docs" +# list of availability zones available in your OpenStack cluster +#az_list = ["nova"] + # SSH key to use for access to nodes public_key_path = "~/.ssh/id_rsa.pub" diff --git a/docs/vagrant.md b/docs/vagrant.md index 61d20262726..555d91e78a7 100644 --- a/docs/vagrant.md +++ b/docs/vagrant.md @@ -3,9 +3,9 @@ Introduction Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `vagrant up`. This will spin up 3 VMs and install kubernetes on them. Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`. -To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especcially during boot-up time. +To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especially during boot-up time. -For proper performance a mimimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tnpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well. +For proper performance a minimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tmpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well. Customize Vagrant ================= diff --git a/remove-node.yml b/remove-node.yml index 1f4a6616988..c2f33b3b116 100644 --- a/remove-node.yml +++ b/remove-node.yml @@ -34,14 +34,14 @@ - { role: remove-node/pre-remove, tags: pre-remove } - hosts: "{{ node | default('kube-node') }}" - gather_facts: no + gather_facts: yes roles: - { role: kubespray-defaults } - { role: reset, tags: reset, when: reset_nodes|default(True) } # Currently cannot remove first master or etcd - hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}" - gather_facts: no + gather_facts: yes roles: - { role: kubespray-defaults } - { role: remove-node/post-remove, tags: post-remove } diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml index 52e6df7912b..057231a0726 100644 --- a/roles/bootstrap-os/tasks/bootstrap-centos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml @@ -39,9 +39,9 @@ # libselinux-python is required on SELinux enabled hosts # See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements -- name: Install libselinux-python +- name: Install libselinux python package package: - name: libselinux-python + name: "{{ ( (ansible_facts.distribution_major_version | default(0) | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" state: present become: true when: diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 2e3912d53da..94d9911bbfe 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -53,8 +53,15 @@ kube_version: v1.15.3 kubeadm_version: "{{ kube_version }}" etcd_version: v3.3.10 -# kubernetes image repo define -kube_image_repo: "gcr.io/google-containers" +# gcr and kubernetes image repo define +gcr_image_repo: "gcr.io" +kube_image_repo: "{{ gcr_image_repo }}/google-containers" + +# docker image repo define +docker_image_repo: "docker.io" + +# quay image repo define +quay_image_repo: "quay.io" # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download @@ -105,10 +112,12 @@ crictl_checksums: # Checksums hyperkube_checksums: arm: + v1.16.0-beta.2: 2b64ef9e8e2f92b72352cc7ab95b416407f4fca9ed1a5020aeeb6a3777bd16ed v1.15.3: 100d8bddb29e77397b90e6dfbcf0af2d901a90ea4bde90b83b5a39f394c3900b v1.15.2: eeaa8e071541c7bcaa186ff1d2919d076b27ef70c9e9df70f910756eba55dc99 v1.15.1: fc5af96fd9341776d84c38675be7b8045dee20af327af9331972c422a4109918 v1.15.0: d923c781031bfd97d0fbe50311e4d7c3616aa5b6d466b99049931f09d73d07b9 + v1.14.6: 93d35b16785e71f6a38e9a54ddf1aca08c924b0f49e5f99ea8ccaff59bd9721b v1.14.5: 860b84dd32611a6008fe20fb998a2fc0a25ff44067eae556224827d05429c91e v1.14.4: 429a10369b2ef35a9c2d662347277339d53fa66ef55ffeabcc7d9b850e31056d v1.14.3: 3fac785261bcf79f7a80b12c4a1dda893ce8c0879caf57b36d4701730671b574 @@ -116,10 +125,12 @@ hyperkube_checksums: v1.14.1: 839a4abfeafbd5f5ab057ad0e8a0b0b488b3cde14a646eba040a7f579875f565 v1.14.0: d090b1da23564a7e9bb8f1f4264f2116536c52611ae203fe2ca13eaad0a8003e arm64: + v1.16.0-beta.2: 0f1e694db5c75cff526c6c066e618c43e83384c36d4e38cf1ac6e9baf71b38d4 v1.15.3: 1e3e70b8d1e8ebc642f2801d9c7938a27764dfb2f5aea432ab4326d43c04a1f5 v1.15.2: c4cf69f52c7013faee9d54e0f376e0732a4a7b0f7ffc7241e9b7e28bad0ac77f v1.15.1: 80ed372c5f6c5178df88616175310057c06bdc9d0905953814a1927eb3aaa657 v1.15.0: 824af7d925b87a5ade63575b98b59ee81005fc76eac1dc399602308d7a60bc3c + v1.14.6: 97646bffe61e54a0c6f61d68b5625ec2e98d8b9d04cec2c8382266e437835e93 v1.14.5: 90c77847d64eb857c8e686e8593fe7a9e505bcbf960b0407217255827a9da59a v1.14.4: 9e0b4fde88a07c705e0937cd3161392684e3ca08535d14a99ae3b86bbf4c56b3 v1.14.3: f29211d668cbcf1aa415dfa64aad95ffc53b5410482a23cddb680caec4e907a3 @@ -127,10 +138,12 @@ hyperkube_checksums: v1.14.1: d5236efc2547fd07c7cc2ed9345dfbcd1204385847ca686cf1c62d15056de399 v1.14.0: 708e00a41f6516d525dee00c91ebe3c3bf2feaf9b7f0af7689487e3e17e356c2 amd64: + v1.16.0-beta.2: 2f05aba15c163883a610681a563d89fd7adf10cb70b90cdb6760f00f8d023a4b v1.15.3: 3685c65b4fb85d552f77346900affc2e9a1bc997b4cd3dde0e705fd8c1d9be7a v1.15.2: ab885606438748eb89a7738e219f5353d94c40c63a4935a539ce89760280f065 v1.15.1: 22b7b1e7f5f2a452d62e0ca4c2cba67119c51e04219aaeaf8452825f9177069e v1.15.0: 3cc72cc58517b97c608c7a59a20255675bc70f07217c9e11e58cac7746139283 + v1.14.6: 4f9a8984985786797fa3353961ba2b58f50235581c9b5978130fbb4199005538 v1.14.5: 2c3410518980b8705ba9b7b708076a206f2bde37cb8bf5ba8f15c32c697f4d97 v1.14.4: 5f31434f3a884257a7b0e3178fc869720a7526c8637af5713d23433ddf2592dd v1.14.3: 6c6cb5c118b2129ba4e56697f42567be3587eb636a477cd342b69f87b3b049d1 @@ -139,10 +152,12 @@ hyperkube_checksums: v1.14.0: af8b04504365dbe4ce6a1772f42eb390d4221a21149b522fc8a0c4b1cd3d97aa kubeadm_checksums: arm: + v1.16.0-beta.2: 6cf8b364b40aba09e1aaa4ed873d90df2b17725dafa78252470777df9404a736 v1.15.3: 6c6fa56810908b5be83882094ea199844edc94b7e969160623c86512d9251c06 v1.15.2: 4b35ad0031c08a83de7c8d9f9bbed6a30d93a5c74e16ea9e6211ad2e0e12bdd1 v1.15.1: 855abd520291dcef0577a1a2ef87a70f522fd2b22603a12abcd86c2f7ec9c022 v1.15.0: 9464030a1d4e101de5f47348f3514d5a9eb95cbce2e5e31f53ada1ca485cf75e + v1.14.6: 6283ac962d02714e962e4f206c6bc8d6be58f5c9a12d2918aaa2fac7f73add09 v1.14.5: 0bb551f7468de2fa6f98ce60653495327be052364ac9f9e8917a4d1ad864412b v1.14.4: 36835488d7187406690ee6aa4b3c9c54855cb5c55d786d0574a508b955fe3a46 v1.14.3: 270b8c346aeaa309d11d65695c4a90f6bff5b1ea14bdec3c417ca2dfb3de0db3 @@ -150,10 +165,12 @@ kubeadm_checksums: v1.14.1: 4bd111411208f1270ed3af8780b87d24a3c17c9fdbe4b0f8c7a9a21cd765543e v1.14.0: 11f2cfa8bf7ee177dbac8073ab0f039dc265536baaa8dc0c4dea699f981f6fd1 arm64: + v1.16.0-beta.2: 0e3ae66f2f57a18eb363af1d49a22b35a24e32bf36af5ef630aa5ceeedc9feed v1.15.3: 6f472bc8ab1ba3d76448bd45b200edef96741e5affde8dc1429300af3a4904d8 v1.15.2: d3b6ee2048b366726ca366d2db4c46b2cacc38e8ec09cc35781d16593753d930 v1.15.1: 44fbfad0f1026d249fc4f365f1e9562cd52d75360d4d1032731122ba5a4d57dc v1.15.0: fe3c79070814fe847a23209b1027672fe5c5e7e5c9611e329225058926836f96 + v1.14.6: d935de033e7442ce5f8a35294fa890b884454d0482a9cf136c4abacd8c6ec165 v1.14.5: 7dd1195d16980c4c888d13e49d97c3513f668e192bf2778bc0f0516e0f7fe2ac v1.14.4: 60745b3ac761d3aa55ab9a24677ecf4e7f48b5abed34c725047a174456e5a79b v1.14.3: 8edcc07c65f81eea3fc47cd237dd6560c6907c5e0ca52d71eab53ca1164e7d01 @@ -161,10 +178,12 @@ kubeadm_checksums: v1.14.1: 5cf05464168e45ee4719264a267c65f9319fae1ceb9923fedab97a9d6a629e0b v1.14.0: 7ed9d706e50cd6d3fc618a7af3d19b691b8a5343ddedaeccb4ea09af3ecfae2c amd64: + v1.16.0-beta.2: bba224360cfb4e6471f84523fcc954951c05c0fef0a4311a07e76f306cadebf1 v1.15.3: ec56a00bc8d9ec4ac2b081a3b2127d8593daf3b2c86560cf9e6cba5ada2d5a80 v1.15.2: fe2a13a1dea73249560ea44ab54c0359a9722e9c66832f6bcad86798438cba2f v1.15.1: 3d42441ae177826f1181e559cd2a729464ca8efadef196cfa0e8053a615333b5 v1.15.0: fc4aa44b96dc143d7c3062124e25fed671cab884ebb8b2446edd10abb45e88c2 + v1.14.6: 4ef6030ab059ed434702c003975273dc855c370c4fcdae1109a3bb137c16ecb9 v1.14.5: b3e840f7816f64e071d25f8a90b984eecd6251b68e568b420d85ef0a4dd514bb v1.14.4: 291790a1cef82c4de28cc3338a199ca8356838ca26f775f2c2acba165b633d9f v1.14.3: 026700dfff3c78be1295417e96d882136e5e1f095eb843e6575e57ef9930b5d3 @@ -214,115 +233,115 @@ crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}" # You need to deploy kubernetes cluster on local private development. # Also provide the address of your own private registry. # And use --insecure-registry options for docker -etcd_image_repo: "quay.io/coreos/etcd" +etcd_image_repo: "{{ quay_image_repo }}/coreos/etcd" etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}" -flannel_image_repo: "quay.io/coreos/flannel" +flannel_image_repo: "{{ quay_image_repo }}/coreos/flannel" flannel_image_tag: "{{ flannel_version }}" -flannel_cni_image_repo: "quay.io/coreos/flannel-cni" +flannel_cni_image_repo: "{{ quay_image_repo }}/coreos/flannel-cni" flannel_cni_image_tag: "{{ flannel_cni_version }}" -calico_node_image_repo: "docker.io/calico/node" +calico_node_image_repo: "{{ docker_image_repo }}/calico/node" calico_node_image_tag: "{{ calico_version }}" -calico_cni_image_repo: "docker.io/calico/cni" +calico_cni_image_repo: "{{ docker_image_repo }}/calico/cni" calico_cni_image_tag: "{{ calico_cni_version }}" -calico_policy_image_repo: "docker.io/calico/kube-controllers" +calico_policy_image_repo: "{{ docker_image_repo }}/calico/kube-controllers" calico_policy_image_tag: "{{ calico_policy_version }}" -calico_rr_image_repo: "docker.io/calico/routereflector" +calico_rr_image_repo: "{{ docker_image_repo }}/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" -calico_typha_image_repo: "docker.io/calico/typha" +calico_typha_image_repo: "{{ docker_image_repo }}/calico/typha" calico_typha_image_tag: "{{ calico_typha_version }}" -pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}" +pod_infra_image_repo: "{{ gcr_image_repo }}/google_containers/pause-{{ image_arch }}" pod_infra_image_tag: "{{ pod_infra_version }}" -install_socat_image_repo: "docker.io/xueshanf/install-socat" +install_socat_image_repo: "{{ docker_image_repo }}/xueshanf/install-socat" install_socat_image_tag: "latest" netcheck_version: "v1.0" -netcheck_agent_image_repo: "quay.io/l23network/k8s-netchecker-agent" +netcheck_agent_image_repo: "{{ quay_image_repo }}/l23network/k8s-netchecker-agent" netcheck_agent_image_tag: "{{ netcheck_version }}" -netcheck_server_image_repo: "quay.io/l23network/k8s-netchecker-server" +netcheck_server_image_repo: "{{ quay_image_repo }}/l23network/k8s-netchecker-server" netcheck_server_image_tag: "{{ netcheck_version }}" -weave_kube_image_repo: "docker.io/weaveworks/weave-kube" +weave_kube_image_repo: "{{ docker_image_repo }}/weaveworks/weave-kube" weave_kube_image_tag: "{{ weave_version }}" -weave_npc_image_repo: "docker.io/weaveworks/weave-npc" +weave_npc_image_repo: "{{ docker_image_repo }}/weaveworks/weave-npc" weave_npc_image_tag: "{{ weave_version }}" -contiv_image_repo: "docker.io/contiv/netplugin" +contiv_image_repo: "{{ docker_image_repo }}/contiv/netplugin" contiv_image_tag: "{{ contiv_version }}" -contiv_init_image_repo: "docker.io/contiv/netplugin-init" +contiv_init_image_repo: "{{ docker_image_repo }}/contiv/netplugin-init" contiv_init_image_tag: "latest" -contiv_auth_proxy_image_repo: "docker.io/contiv/auth_proxy" +contiv_auth_proxy_image_repo: "{{ docker_image_repo }}/contiv/auth_proxy" contiv_auth_proxy_image_tag: "{{ contiv_version }}" -contiv_etcd_init_image_repo: "docker.io/ferest/etcd-initer" +contiv_etcd_init_image_repo: "{{ docker_image_repo }}/ferest/etcd-initer" contiv_etcd_init_image_tag: latest -contiv_ovs_image_repo: "docker.io/contiv/ovs" +contiv_ovs_image_repo: "{{ docker_image_repo }}/contiv/ovs" contiv_ovs_image_tag: "latest" -cilium_image_repo: "docker.io/cilium/cilium" +cilium_image_repo: "{{ docker_image_repo }}/cilium/cilium" cilium_image_tag: "{{ cilium_version }}" -cilium_init_image_repo: "docker.io/cilium/cilium-init" +cilium_init_image_repo: "{{ docker_image_repo }}/cilium/cilium-init" cilium_init_image_tag: "2019-04-05" -cilium_operator_image_repo: "docker.io/cilium/operator" +cilium_operator_image_repo: "{{ docker_image_repo }}/cilium/operator" cilium_operator_image_tag: "{{ cilium_version }}" -kube_ovn_db_image_repo: "docker.io/kubeovn/kube-ovn-db" -kube_ovn_node_image_repo: "docker.io/kubeovn/kube-ovn-node" -kube_ovn_cni_image_repo: "docker.io/kubeovn/kube-ovn-cni" -kube_ovn_controller_image_repo: "kubeovn/kube-ovn-controller" +kube_ovn_db_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn-db" +kube_ovn_node_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn-node" +kube_ovn_cni_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn-cni" +kube_ovn_controller_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn-controller" kube_ovn_db_image_tag: "{{ kube_ovn_version }}" kube_ovn_node_image_tag: "{{ kube_ovn_version }}" kube_ovn_controller_image_tag: "{{ kube_ovn_version }}" kube_ovn_cni_image_tag: "{{ kube_ovn_version }}" -kube_router_image_repo: "docker.io/cloudnativelabs/kube-router" +kube_router_image_repo: "{{ docker_image_repo }}/cloudnativelabs/kube-router" kube_router_image_tag: "{{ kube_router_version }}" -multus_image_repo: "docker.io/nfvpe/multus" +multus_image_repo: "{{ docker_image_repo }}/nfvpe/multus" multus_image_tag: "{{ multus_version }}" -nginx_image_repo: docker.io/nginx +nginx_image_repo: "{{ docker_image_repo }}/library/nginx" nginx_image_tag: 1.15 -haproxy_image_repo: docker.io/haproxy +haproxy_image_repo: "{{ docker_image_repo }}/library/haproxy" haproxy_image_tag: 1.9 coredns_version: "1.6.0" -coredns_image_repo: "docker.io/coredns/coredns" +coredns_image_repo: "{{ docker_image_repo }}/coredns/coredns" coredns_image_tag: "{{ coredns_version }}" nodelocaldns_version: "1.15.4" -nodelocaldns_image_repo: "k8s.gcr.io/k8s-dns-node-cache" +nodelocaldns_image_repo: "{{ kube_image_repo }}/k8s-dns-node-cache" nodelocaldns_image_tag: "{{ nodelocaldns_version }}" dnsautoscaler_version: 1.6.0 -dnsautoscaler_image_repo: "k8s.gcr.io/cluster-proportional-autoscaler-{{ image_arch }}" +dnsautoscaler_image_repo: "{{ kube_image_repo }}/cluster-proportional-autoscaler-{{ image_arch }}" dnsautoscaler_image_tag: "{{ dnsautoscaler_version }}" -test_image_repo: docker.io/busybox +test_image_repo: "{{ docker_image_repo }}/library/busybox" test_image_tag: latest -busybox_image_repo: docker.io/busybox +busybox_image_repo: "{{ docker_image_repo }}/library/busybox" busybox_image_tag: 1.29.2 helm_version: "v2.14.3" -helm_image_repo: "docker.io/lachlanevenson/k8s-helm" +helm_image_repo: "{{ docker_image_repo }}/lachlanevenson/k8s-helm" helm_image_tag: "{{ helm_version }}" -tiller_image_repo: "gcr.io/kubernetes-helm/tiller" +tiller_image_repo: "{{ gcr_image_repo }}/kubernetes-helm/tiller" tiller_image_tag: "{{ helm_version }}" -registry_image_repo: "docker.io/registry" +registry_image_repo: "{{ docker_image_repo }}/library/registry" registry_image_tag: "2.6" -registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy" +registry_proxy_image_repo: "{{ gcr_image_repo }}/google_containers/kube-registry-proxy" registry_proxy_image_tag: "0.4" metrics_server_version: "v0.3.3" -metrics_server_image_repo: "gcr.io/google_containers/metrics-server-amd64" +metrics_server_image_repo: "{{ gcr_image_repo }}/google_containers/metrics-server-amd64" metrics_server_image_tag: "{{ metrics_server_version }}" -local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner" +local_volume_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/local-volume-provisioner" local_volume_provisioner_image_tag: "v2.3.2" -cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner" +cephfs_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/cephfs-provisioner" cephfs_provisioner_image_tag: "v2.1.0-k8s1.11" -rbd_provisioner_image_repo: "quay.io/external_storage/rbd-provisioner" +rbd_provisioner_image_repo: "{{ quay_image_repo }}/external_storage/rbd-provisioner" rbd_provisioner_image_tag: "v2.1.1-k8s1.11" -local_path_provisioner_image_repo: "docker.io/rancher/local-path-provisioner" +local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner" local_path_provisioner_image_tag: "v0.0.2" -ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" +ingress_nginx_controller_image_repo: "{{ quay_image_repo }}/kubernetes-ingress-controller/nginx-ingress-controller" ingress_nginx_controller_image_tag: "0.25.1" cert_manager_version: "v0.5.2" -cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" +cert_manager_controller_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-controller" cert_manager_controller_image_tag: "{{ cert_manager_version }}" addon_resizer_version: "1.8.3" -addon_resizer_image_repo: "k8s.gcr.io/addon-resizer" +addon_resizer_image_repo: "{{ kube_image_repo }}/addon-resizer" addon_resizer_image_tag: "{{ addon_resizer_version }}" -dashboard_image_repo: "gcr.io/google_containers/kubernetes-dashboard-{{ image_arch }}" +dashboard_image_repo: "{{ gcr_image_repo }}/google_containers/kubernetes-dashboard-{{ image_arch }}" dashboard_image_tag: "v1.10.1" image_pull_command: "{{ docker_bin_dir }}/docker pull" diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 0d81eef181a..c780ad89b03 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -7,21 +7,6 @@ - download - upload -- name: Use cri-o for cri connection - set_fact: - cri_socket: /var/run/crio/crio.sock - when: container_manager == 'crio' - -- name: Use containerd for cri connetion - set_fact: - cri_socket: /var/run/containerd/containerd.sock - when: container_manager == 'containerd' - -- name: Use docker for cri connetion - set_fact: - cri_socket: /var/run/dockershim.sock - when: container_manager == 'docker' - - include_tasks: ../../container-engine/containerd/tasks/crictl.yml when: - not skip_downloads|default(false) diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index d99700dbbe1..81121c53bc1 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -1,25 +1,4 @@ --- - -- name: Kubernetes Apps | Check if netchecker-server manifest already exists - stat: - path: "{{ kube_config_dir }}/netchecker-server-deployment.yml" - register: netchecker_server_manifest - tags: - - facts - - upgrade - -- name: Kubernetes Apps | Apply netchecker-server manifest to update annotations - kube: - name: "netchecker-server" - namespace: "{{ netcheck_namespace }}" - filename: "{{ netchecker_server_manifest.stat.path }}" - kubectl: "{{ bin_dir }}/kubectl" - resource: "deploy" - state: latest - when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists - tags: - - upgrade - - name: Kubernetes Apps | Netchecker Templates list set_fact: netchecker_templates: diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 index e6406efb5fc..dcb976de743 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: "coredns{{ coredns_ordinal_suffix }}" diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 index c91733cb340..eafb1029fe3 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: @@ -6,6 +6,9 @@ metadata: name: netchecker-agent namespace: {{ netcheck_namespace }} spec: + selector: + matchLabels: + app: netchecker-agent template: metadata: name: netchecker-agent diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 index 22eb2db6895..af57ceb6f88 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: @@ -6,6 +6,9 @@ metadata: name: netchecker-agent-hostnet namespace: {{ netcheck_namespace }} spec: + selector: + matchLabels: + app: netchecker-agent-hostnet template: metadata: name: netchecker-agent-hostnet diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 index 76d50e4a4ea..53f6c480782 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 @@ -1,15 +1,20 @@ -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: netchecker-server + namespace: {{ netcheck_namespace }} + labels: + app: netchecker-server spec: replicas: 1 + selector: + matchLabels: + app: netchecker-server template: metadata: name: netchecker-server labels: app: netchecker-server - namespace: {{ netcheck_namespace }} spec: priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}} containers: diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 index 97aff97ac7a..ea097ede41f 100644 --- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 +++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: nvidia-driver-installer namespace: kube-system spec: + selector: + matchLabels: + name: nvidia-driver-installer template: metadata: labels: diff --git a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 index 384d13b586b..231d4c4ac81 100644 --- a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: local-path-provisioner diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 index 4f67c0e6ae5..3aa90fccd4a 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 @@ -1,6 +1,6 @@ # Macro to convert camelCase dictionary keys to snake_case keys -{%- macro convert_keys(mydict) %} - {% for key in mydict.keys() -%} +{% macro convert_keys(mydict) -%} + {% for key in mydict.keys()|list -%} {% set key_split = key.split('_') -%} {% set new_key = key_split[0] + key_split[1:]|map('capitalize')|join -%} {% set value = mydict.pop(key) -%} @@ -21,5 +21,4 @@ data: {{ class_name }}: {{- convert_keys(storage_class) }} {{ storage_class | to_nice_yaml(indent=2) | indent(6) }} -{% endfor %} - +{%- endfor %} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 index 4a4afae3a19..81e0260c7d0 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 @@ -1,4 +1,4 @@ -{% for class_name in local_volume_provisioner_storage_classes.keys() %} +{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} --- apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -6,4 +6,7 @@ metadata: name: {{ class_name }} provisioner: kubernetes.io/no-provisioner volumeBindingMode: WaitForFirstConsumer +{% if class_config.reclaim_policy is defined %} +reclaimPolicy: {{ class_config.reclaim_policy }} +{% endif %} {% endfor %} diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml index 11bec1f7a8a..f09e25ac0f9 100644 --- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/defaults/main.yml @@ -5,7 +5,7 @@ rbd_provisioner_monitors: ~ rbd_provisioner_pool: kube rbd_provisioner_admin_id: admin rbd_provisioner_secret_name: ceph-secret-admin -rbd_provisioner_secret_token: ceph-key-admin +rbd_provisioner_secret: ceph-key-admin rbd_provisioner_user_id: kube rbd_provisioner_user_secret_name: ceph-secret-user rbd_provisioner_user_secret: ceph-key-user diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index ecb3688a746..721cd59dd34 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -36,8 +36,9 @@ include_tasks: "gen_helm_tiller_certs.yml" when: tiller_enable_tls +# FIXME: https://github.com/helm/helm/issues/6374 - name: Helm | Install/upgrade helm - command: > + shell: > {{ bin_dir }}/helm init --tiller-namespace={{ tiller_namespace }} {% if helm_skip_refresh %} --skip-refresh{% endif %} {% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %} @@ -51,6 +52,10 @@ {% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %} {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %} {% if tiller_wait %} --wait{% endif %} + --output yaml + | sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' + | {{ bin_dir }}/kubectl patch --local -oyaml -f - -p '{"spec":{"selector": {"app":"helm","name":"tiller"} } }' + | {{ bin_dir }}/kubectl apply -f - {% else %} --client-only {% endif %} @@ -73,6 +78,8 @@ {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %} {% if tiller_wait %} --wait{% endif %} --output yaml + | sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' + | {{ bin_dir }}/kubectl patch --local -oyaml -f - -p '{"spec":{"selector": {"app":"helm","name":"tiller"} } }' | {{ bin_dir }}/kubectl apply -f - changed_when: false when: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2 index daf82ebe058..383dab5d3b7 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: cert-manager diff --git a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 index a536cfd3527..9e27626e7b7 100644 --- a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 +++ b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: metrics-server diff --git a/roles/kubernetes-apps/registry/README.md b/roles/kubernetes-apps/registry/README.md index 06bc99a2b46..eccf384a9f4 100644 --- a/roles/kubernetes-apps/registry/README.md +++ b/roles/kubernetes-apps/registry/README.md @@ -186,7 +186,7 @@ node by creating following daemonset. ``` yaml -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-registry-proxy diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index af5a0855d9b..d749e7956af 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -104,10 +104,12 @@ - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") notify: restart kubelet +# FIXME(mattymo): Need to point to localhost, otherwise masters will all point +# incorrectly to first master, creating SPoF. - name: Update server field in kube-proxy kubeconfig shell: >- {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml - | sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g' + | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g' | {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f - run_once: true when: diff --git a/roles/kubernetes/master/defaults/main/main.yml b/roles/kubernetes/master/defaults/main/main.yml index 4cdd20979af..ae34bf1673f 100644 --- a/roles/kubernetes/master/defaults/main/main.yml +++ b/roles/kubernetes/master/defaults/main/main.yml @@ -172,3 +172,6 @@ kube_override_hostname: >- {%- endif -%} secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret" + +# use HyperKube image to control plane containers +kubeadm_use_hyperkube_image: False diff --git a/roles/kubernetes/master/tasks/kubeadm-fix-apiserver.yml b/roles/kubernetes/master/tasks/kubeadm-fix-apiserver.yml new file mode 100644 index 00000000000..4200e6d7147 --- /dev/null +++ b/roles/kubernetes/master/tasks/kubeadm-fix-apiserver.yml @@ -0,0 +1,45 @@ +--- +- name: Test if correct apiserver is set in all kubeconfigs + shell: >- + grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/admin.conf && + grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/controller-manager.conf && + grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/kubelet.conf && + grep -Fq "{{ kube_apiserver_endpoint }}" {{ kube_config_dir }}/scheduler.conf + register: kubeconfig_correct_apiserver + changed_when: False + failed_when: False + +- name: Create temporary directory + tempfile: + state: directory + register: kubeconfig_temp_dir + when: kubeconfig_correct_apiserver.rc != 0 + +- name: Generate new kubeconfigs with correct apiserver + command: >- + {{ bin_dir }}/kubeadm init phase kubeconfig all + --config {{ kube_config_dir }}/kubeadm-config.yaml + --kubeconfig-dir {{ kubeconfig_temp_dir.path }} + when: kubeconfig_correct_apiserver.rc != 0 + +- name: Copy new kubeconfigs to kube config dir + copy: + src: "{{ kubeconfig_temp_dir.path }}/{{ item }}" + dest: "{{ kube_config_dir }}/{{ item }}" + remote_src: yes + when: kubeconfig_correct_apiserver.rc != 0 + with_items: + - admin.conf + - controller-manager.conf + - kubelet.conf + - scheduler.conf + notify: + - "Master | Restart kube-controller-manager" + - "Master | Restart kube-scheduler" + - "Master | reload kubelet" + +- name: Cleanup temporary directory + file: + path: "{{ kubeconfig_temp_dir.path }}" + state: absent + when: kubeconfig_correct_apiserver.rc != 0 diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 6d881caf917..4d646d22f46 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -73,3 +73,6 @@ - name: Include kubeadm etcd extra tasks include_tasks: kubeadm-etcd.yml when: etcd_kubeadm_enabled + +- name: Include kubeadm secondary server apiserver fixes + include_tasks: kubeadm-fix-apiserver.yml diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 index eeb557d3f82..961ab5b8800 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 @@ -91,7 +91,7 @@ controlPlaneEndpoint: {{ ip | default(fallback_ips[inventory_hostname]) }}:{{ ku {% endif %} certificatesDir: {{ kube_cert_dir }} imageRepository: {{ kube_image_repo }} -useHyperKubeImage: false +useHyperKubeImage: {{ kubeadm_use_hyperkube_image }} apiServer: extraArgs: {% if kube_api_anonymous_auth is defined %} diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1beta2.yaml.j2 index 0bd64cd00ec..1f3031430dc 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.v1beta2.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.v1beta2.yaml.j2 @@ -94,7 +94,7 @@ controlPlaneEndpoint: {{ ip | default(fallback_ips[inventory_hostname]) }}:{{ ku {% endif %} certificatesDir: {{ kube_cert_dir }} imageRepository: {{ kube_image_repo }} -useHyperKubeImage: false +useHyperKubeImage: {{ kubeadm_use_hyperkube_image }} apiServer: extraArgs: {% if kube_api_anonymous_auth is defined %} diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 3b549da86ff..c5180028cec 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -46,9 +46,17 @@ kubelet_status_update_frequency: 10s loadbalancer_apiserver_memory_requests: 32M loadbalancer_apiserver_cpu_requests: 25m +loadbalancer_apiserver_keepalive_timeout: 5m + +# Uncomment if you need to enable deprecated runtimes # kube_api_runtime_config: +# - apps/v1beta1=true +# - apps/ve1beta2=true # - extensions/v1beta1/daemonsets=true # - extensions/v1beta1/deployments=true +# - extensions/v1beta1/replicasets=true +# - extensions/v1beta1/networkpolicies=true +# - extensions/v1beta1/podsecuritypolicies=true # A port range to reserve for services with NodePort visibility. # Inclusive at both ends of the range. diff --git a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml index 529bc3f8fbe..41373eb30c9 100644 --- a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml +++ b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml @@ -4,7 +4,7 @@ msg: "azure_tenant_id is missing" when: azure_tenant_id is not defined or not azure_tenant_id -- name: check openstack_username value +- name: check azure_subscription_id value fail: msg: "azure_subscription_id is missing" when: azure_subscription_id is not defined or not azure_subscription_id diff --git a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 index 6c467bda28d..ef3269fc85c 100644 --- a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 @@ -13,7 +13,7 @@ defaults timeout http-request 5m timeout queue 5m timeout connect 30s - timeout client 15m + timeout client {{ loadbalancer_apiserver_keepalive_timeout }} timeout server 15m timeout http-keep-alive 30s timeout check 30s diff --git a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 index 862a7c6db9e..6361a6f391b 100644 --- a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 @@ -32,7 +32,7 @@ http { tcp_nopush on; tcp_nodelay on; - keepalive_timeout 75s; + keepalive_timeout {{ loadbalancer_apiserver_keepalive_timeout }}; keepalive_requests 100; reset_timedout_connection on; server_tokens off; diff --git a/roles/kubernetes/preinstall/vars/centos.yml b/roles/kubernetes/preinstall/vars/centos.yml index 67fbfe085da..68406d61d5b 100644 --- a/roles/kubernetes/preinstall/vars/centos.yml +++ b/roles/kubernetes/preinstall/vars/centos.yml @@ -1,6 +1,6 @@ --- required_pkgs: - - libselinux-python + - "{{ ( (ansible_facts.distribution_major_version | default(0) | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" - device-mapper-libs - ebtables - nss diff --git a/roles/kubernetes/preinstall/vars/redhat.yml b/roles/kubernetes/preinstall/vars/redhat.yml index 67fbfe085da..68406d61d5b 100644 --- a/roles/kubernetes/preinstall/vars/redhat.yml +++ b/roles/kubernetes/preinstall/vars/redhat.yml @@ -1,6 +1,6 @@ --- required_pkgs: - - libselinux-python + - "{{ ( (ansible_facts.distribution_major_version | default(0) | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" - device-mapper-libs - ebtables - nss diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index b6bd45dd29b..4945fdd2e86 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -198,6 +198,16 @@ kube_profiling: false # Container for runtime container_manager: docker +# CRI socket path +cri_socket: >- + {%- if container_manager == 'crio' -%} + /var/run/crio/crio.sock + {%- elif container_manager == 'containerd' -%} + /var/run/containerd/containerd.sock + {%- else -%} + /var/run/dockershim.sock + {%- endif -%} + ## Uncomment this if you want to force overlay/overlay2 as docker storage driver ## Please note that overlay2 is only supported on newer kernels # docker_storage_options: -s overlay2 diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 75c5bbe73a4..ab64450c2a7 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -3,7 +3,7 @@ # as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-typha.yml.j2 b/roles/network_plugin/calico/templates/calico-typha.yml.j2 index c7402dacec7..87bdd81626c 100644 --- a/roles/network_plugin/calico/templates/calico-typha.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-typha.yml.j2 @@ -21,7 +21,7 @@ spec: # This manifest creates a Deployment of Typha to back the above service. -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: calico-typha diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2 index 354cd0b94c4..127d52330ce 100644 --- a/roles/network_plugin/canal/templates/canal-node.yaml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -1,6 +1,6 @@ --- kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: canal-node namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 index 156c3d9a063..74c60896fdc 100644 --- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 @@ -1,5 +1,5 @@ # This manifest deploys the Contiv API Proxy Server on Kubernetes. -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: contiv-api-proxy @@ -9,6 +9,9 @@ metadata: spec: updateStrategy: type: RollingUpdate + selector: + matchLabels: + k8s-app: contiv-api-proxy template: metadata: name: contiv-api-proxy diff --git a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 index a1354f4259b..6d6938823e5 100644 --- a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 @@ -1,6 +1,6 @@ --- kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: contiv-cleanup namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 index 6651ad9a5c9..9725a0f2a22 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 @@ -1,6 +1,6 @@ --- kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: contiv-etcd-proxy namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 index 0b49bbfa468..6341be719b7 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 @@ -1,6 +1,6 @@ --- kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: contiv-etcd namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 index e3d3907893e..2ffa8962b22 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 @@ -1,6 +1,6 @@ --- kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: contiv-netmaster namespace: kube-system @@ -9,6 +9,9 @@ metadata: spec: updateStrategy: type: RollingUpdate + selector: + matchLabels: + k8s-app: contiv-netmaster template: metadata: name: contiv-netmaster diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 index a3cf6bc9e98..80c4e32fc79 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 @@ -3,7 +3,7 @@ # as the Contiv CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: contiv-netplugin namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 index 4bb4be09600..56680b0bd0c 100644 --- a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 @@ -2,7 +2,7 @@ apiVersion: apps/v1 # This manifest deploys the contiv-ovs pod. kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: contiv-ovs namespace: kube-system diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index c549e080413..9afabd15177 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -37,7 +37,7 @@ data: } } --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel @@ -46,6 +46,10 @@ metadata: tier: node k8s-app: flannel spec: + selector: + matchLabels: + tier: node + k8s-app: flannel template: metadata: labels: diff --git a/roles/network_plugin/kube-router/templates/kube-router.yml.j2 b/roles/network_plugin/kube-router/templates/kube-router.yml.j2 index cc820882473..5bf07ea58e5 100644 --- a/roles/network_plugin/kube-router/templates/kube-router.yml.j2 +++ b/roles/network_plugin/kube-router/templates/kube-router.yml.j2 @@ -41,7 +41,7 @@ data: current-context: kube-router-context --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: @@ -55,6 +55,10 @@ spec: rollingUpdate: maxUnavailable: 1 type: RollingUpdate + selector: + matchLabels: + k8s-app: kube-router + tier: node template: metadata: labels: diff --git a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 index 96847aa14e7..3e2fbd9cdf8 100644 --- a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 +++ b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 @@ -1,6 +1,6 @@ --- kind: DaemonSet -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 metadata: name: kube-multus-ds-amd64 namespace: kube-system @@ -8,6 +8,10 @@ metadata: tier: node app: multus spec: + selector: + matchLabels: + tier: node + app: multus template: metadata: labels: diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index 33931fd4878..222b2f97a20 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -101,7 +101,7 @@ items: - kind: ServiceAccount name: weave-net namespace: kube-system - - apiVersion: extensions/v1beta1 + - apiVersion: apps/v1 kind: DaemonSet metadata: name: weave-net @@ -110,6 +110,9 @@ items: namespace: kube-system spec: minReadySeconds: 5 + selector: + matchLabels: + name: weave-net template: metadata: labels: