Skip to content

Commit

Permalink
Merge branch 'master' into feature/virtualbox
Browse files Browse the repository at this point in the history
* master: (23 commits)
  retry to fetch binary if it fails first time (kubernetes-sigs#7839)
  Update vSphere CPI (kubernetes-sigs#7838)
  doc: Update 'Kubespray vs Kubeadm' (kubernetes-sigs#7834)
  Update MetalLB documentation (kubernetes-sigs#7833)
  Disable OVH CI until  voucher situation is cleared up (kubernetes-sigs#7824)
  Fix how to get image ID on offline deployment (kubernetes-sigs#7808)
  CRI-O: Install libseccomp2 from backports on Debian 10 (kubernetes-sigs#7816)
  fix(misc): contrib/terraform/aws (kubernetes-sigs#7818)
  Separate gvisor_download_url for runsc and shim (kubernetes-sigs#7760)
  Allow failure on tf-elax_ubuntu18-calico (kubernetes-sigs#7814)
  Add containerd on Flatcar Container Linux (kubernetes-sigs#7681)
  Fixup label for oracle linux bootstrap
  Update multus to 3.7.2 (and move to ghcr.io)
  Set default k8s version to 1.21.3
  Add hashes for k8s 1.20.8/.9 and 1.19.12/.13 and 1.21.3
  Fix erroneous ansible args
  Update kube-router to 1.3.0
  Update flannel to 0.14.0 (moved from coreos repo to flannel-io)
  Use dashboard 2.3.1 image
  Set Helm default version to 3.6.3
  ...
  • Loading branch information
Quehenr committed Aug 2, 2021
2 parents 0301861 + 31a5a4e commit 474597b
Show file tree
Hide file tree
Showing 31 changed files with 423 additions and 143 deletions.
80 changes: 41 additions & 39 deletions .gitlab-ci/terraform.yml
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,7 @@ tf-elastx_ubuntu18-calico:
extends: .terraform_apply
stage: deploy-part3
when: on_success
allow_failure: true
variables:
<<: *elastx_variables
TF_VERSION: $TERRAFORM_14_VERSION
Expand Down Expand Up @@ -235,44 +236,45 @@ tf-elastx_ubuntu18-calico:
TF_VAR_image: ubuntu-18.04-server-latest
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

# OVH voucher expired, commenting job until things are sorted out

tf-ovh_cleanup:
stage: unit-tests
tags: [light]
image: python
environment: ovh
variables:
<<: *ovh_variables
before_script:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
# tf-ovh_cleanup:
# stage: unit-tests
# tags: [light]
# image: python
# environment: ovh
# variables:
# <<: *ovh_variables
# before_script:
# - pip install -r scripts/openstack-cleanup/requirements.txt
# script:
# - ./scripts/openstack-cleanup/main.py

tf-ovh_ubuntu18-calico:
extends: .terraform_apply
when: on_success
environment: ovh
variables:
<<: *ovh_variables
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_network_name: "Ext-Net"
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_image: "Ubuntu 18.04"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
# tf-ovh_ubuntu18-calico:
# extends: .terraform_apply
# when: on_success
# environment: ovh
# variables:
# <<: *ovh_variables
# TF_VERSION: $TERRAFORM_14_VERSION
# PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60"
# SSH_USER: ubuntu
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_image: "Ubuntu 18.04"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ Note: Upstart/SysV init based OS types are not supported.
## Supported Components

- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.21.2
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.21.3
- [etcd](https://github.com/coreos/etcd) v3.4.13
- [docker](https://www.docker.com/) v20.10 (see note)
- [containerd](https://containerd.io/) v1.4.6
Expand All @@ -140,10 +140,10 @@ Note: Upstart/SysV init based OS types are not supported.
- [calico](https://github.com/projectcalico/calico) v3.17.4
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.8.9
- [flanneld](https://github.com/coreos/flannel) v0.13.0
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.7.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.2.2
- [multus](https://github.com/intel/multus-cni) v3.7.0
- [flanneld](https://github.com/flannel-io/flannel) v0.14.0
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.7.1
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.3.0
- [multus](https://github.com/intel/multus-cni) v3.7.2
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
- [weave](https://github.com/weaveworks/weave) v2.8.1
- Application
Expand Down
30 changes: 25 additions & 5 deletions contrib/offline/manage-offline-container-images.sh
Original file line number Diff line number Diff line change
Expand Up @@ -100,15 +100,35 @@ function register_container_images() {

tar -zxvf ${IMAGE_TAR_FILE}
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
set +e

sudo docker container inspect registry >/dev/null 2>&1
if [ $? -ne 0 ]; then
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
fi
set -e

while read -r line; do
file_name=$(echo ${line} | awk '{print $1}')
org_image=$(echo ${line} | awk '{print $2}')
new_image="${LOCALHOST_NAME}:5000/${org_image}"
image_id=$(tar -tf ${IMAGE_DIR}/${file_name} | grep "\.json" | grep -v manifest.json | sed s/"\.json"//)
raw_image=$(echo ${line} | awk '{print $2}')
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
if [ -z "${file_name}" ]; then
echo "Failed to get file_name for line ${line}"
exit 1
fi
if [ -z "${raw_image}" ]; then
echo "Failed to get raw_image for line ${line}"
exit 1
fi
if [ -z "${org_image}" ]; then
echo "Failed to get org_image for line ${line}"
exit 1
fi
if [ -z "${image_id}" ]; then
echo "Failed to get image_id for file ${file_name}"
exit 1
fi
sudo docker load -i ${IMAGE_DIR}/${file_name}
sudo docker tag ${image_id} ${new_image}
sudo docker push ${new_image}
Expand Down
1 change: 1 addition & 0 deletions contrib/terraform/aws/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
*.tfstate*
.terraform.lock.hcl
.terraform
30 changes: 21 additions & 9 deletions contrib/terraform/aws/create-infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ module "aws-vpc" {

aws_cluster_name = var.aws_cluster_name
aws_vpc_cidr_block = var.aws_vpc_cidr_block
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
aws_cidr_subnets_private = var.aws_cidr_subnets_private
aws_cidr_subnets_public = var.aws_cidr_subnets_public
default_tags = var.default_tags
Expand All @@ -31,7 +31,7 @@ module "aws-elb" {

aws_cluster_name = var.aws_cluster_name
aws_vpc_id = module.aws-vpc.aws_vpc_id
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
aws_elb_api_port = var.aws_elb_api_port
k8s_secure_api_port = var.k8s_secure_api_port
Expand All @@ -52,9 +52,9 @@ module "aws-iam" {
resource "aws_instance" "bastion-server" {
ami = data.aws_ami.distro.id
instance_type = var.aws_bastion_size
count = length(var.aws_cidr_subnets_public)
count = var.aws_bastion_num
associate_public_ip_address = true
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)

vpc_security_group_ids = module.aws-vpc.aws_security_group
Expand All @@ -79,11 +79,15 @@ resource "aws_instance" "k8s-master" {

count = var.aws_kube_master_num

availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)

vpc_security_group_ids = module.aws-vpc.aws_security_group

root_block_device {
volume_size = var.aws_kube_master_disk_size
}

iam_instance_profile = module.aws-iam.kube_control_plane-profile
key_name = var.AWS_SSH_KEY_NAME

Expand All @@ -106,11 +110,15 @@ resource "aws_instance" "k8s-etcd" {

count = var.aws_etcd_num

availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)

vpc_security_group_ids = module.aws-vpc.aws_security_group

root_block_device {
volume_size = var.aws_etcd_disk_size
}

key_name = var.AWS_SSH_KEY_NAME

tags = merge(var.default_tags, tomap({
Expand All @@ -126,11 +134,15 @@ resource "aws_instance" "k8s-worker" {

count = var.aws_kube_worker_num

availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)

vpc_security_group_ids = module.aws-vpc.aws_security_group

root_block_device {
volume_size = var.aws_kube_worker_disk_size
}

iam_instance_profile = module.aws-iam.kube-worker-profile
key_name = var.AWS_SSH_KEY_NAME

Expand All @@ -152,10 +164,10 @@ data "template_file" "inventory" {
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
list_master = join("\n", aws_instance.k8s-master.*.private_dns)
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns)
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)), ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip))))
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)))
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
}
}
Expand Down
2 changes: 1 addition & 1 deletion contrib/terraform/aws/output.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ output "workers" {
}

output "etcd" {
value = join("\n", aws_instance.k8s-etcd.*.private_ip)
value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip)))
}

output "aws_elb_api_fqdn" {
Expand Down
10 changes: 8 additions & 2 deletions contrib/terraform/aws/sample-inventory/cluster.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]

#Bastion Host
aws_bastion_num = 1

aws_bastion_size = "t2.medium"

#Kubernetes Cluster
Expand All @@ -17,22 +19,26 @@ aws_kube_master_num = 3

aws_kube_master_size = "t2.medium"

aws_kube_master_disk_size = 50

aws_etcd_num = 3

aws_etcd_size = "t2.medium"

aws_etcd_disk_size = 50

aws_kube_worker_num = 4

aws_kube_worker_size = "t2.medium"

aws_kube_worker_disk_size = 50

#Settings AWS ELB

aws_elb_api_port = 6443

k8s_secure_api_port = 6443

kube_insecure_apiserver_address = "0.0.0.0"

default_tags = {
# Env = "devtest" # Product = "kubernetes"
}
Expand Down
5 changes: 2 additions & 3 deletions contrib/terraform/aws/templates/inventory.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,18 @@ ${public_ip_address_bastion}
[kube_control_plane]
${list_master}


[kube_node]
${list_node}


[etcd]
${list_etcd}

[calico_rr]

[k8s_cluster:children]
kube_node
kube_control_plane

calico_rr

[k8s_cluster:vars]
${elb_api_fqdn}
34 changes: 21 additions & 13 deletions contrib/terraform/aws/terraform.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -6,26 +6,34 @@ aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]

#Bastion Host
aws_bastion_size = "t2.medium"
# single AZ deployment
#aws_cidr_subnets_private = ["10.250.192.0/20"]
#aws_cidr_subnets_public = ["10.250.224.0/20"]

# 3+ AZ deployment
#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"]
#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"]

#Kubernetes Cluster
#Bastion Host
aws_bastion_num = 1
aws_bastion_size = "t3.small"

aws_kube_master_num = 3
aws_kube_master_size = "t2.medium"
#Kubernetes Cluster
aws_kube_master_num = 3
aws_kube_master_size = "t3.medium"
aws_kube_master_disk_size = 50

aws_etcd_num = 3
aws_etcd_size = "t2.medium"
aws_etcd_num = 0
aws_etcd_size = "t3.medium"
aws_etcd_disk_size = 50

aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"
aws_kube_worker_num = 4
aws_kube_worker_size = "t3.medium"
aws_kube_worker_disk_size = 50

#Settings AWS ELB

aws_elb_api_port = 6443
k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"
aws_elb_api_port = 6443
k8s_secure_api_port = 6443

default_tags = {
# Env = "devtest"
Expand Down
Loading

0 comments on commit 474597b

Please sign in to comment.