diff --git a/aws/test/k8s/backend/pong.yaml b/aws/test/k8s/backend/pong.yaml index 94d5e43..c1da658 100644 --- a/aws/test/k8s/backend/pong.yaml +++ b/aws/test/k8s/backend/pong.yaml @@ -42,7 +42,7 @@ metadata: labels: app: pong spec: - replicas: 1 + replicas: 2 revisionHistoryLimit: 1 selector: matchLabels: @@ -52,6 +52,7 @@ spec: labels: app: pong spec: + terminationGracePeriodSeconds: 10 nodeSelector: duty: webserver containers: @@ -80,3 +81,18 @@ spec: - name: http containerPort: 8000 protocol: TCP + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: ["pong"] + topologyKey: kubernetes.io/hostname + - labelSelector: + matchExpressions: + - key: app + operator: In + values: ["pong"] + topologyKey: failure-domain.beta.kubernetes.io/zone diff --git a/aws/test/k8s/ingress/nginx-ingress-controller.yaml b/aws/test/k8s/ingress/nginx-ingress-controller.yaml index a363afb..c66ce1b 100644 --- a/aws/test/k8s/ingress/nginx-ingress-controller.yaml +++ b/aws/test/k8s/ingress/nginx-ingress-controller.yaml @@ -190,6 +190,30 @@ spec: maxReplicas: 10 targetCPUUtilizationPercentage: 70 --- +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + namespace: ingress + labels: + k8s-app: nginx-ingress-controller + annotations: + # Enable PROXY protocol + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*' + # Increase the ELB idle timeout to avoid issues with WebSockets or Server-Sent Events. + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +spec: + type: LoadBalancer + selector: + k8s-app: nginx-ingress-controller + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: https +--- # Ingress controller deployment kind: Deployment apiVersion: extensions/v1beta1 @@ -229,7 +253,7 @@ spec: topologyKey: kubernetes.io/hostname terminationGracePeriodSeconds: 60 serviceAccountName: nginx-ingress-controller - hostNetwork: true + # hostNetwork: true initContainers: - image: busybox:latest imagePullPolicy: Always @@ -275,17 +299,16 @@ spec: - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15 name: nginx-ingress-controller imagePullPolicy: Always - # Doesn't work with Calico before kops 1.8.0... we have to use host - # networking instead - # ports: - # - name: http - # containerPort: 80 - # hostPort: 80 - # protocol: TCP - # - name: https - # containerPort: 443 - # hostPort: 443 - # protocol: TCP + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: health + containerPort: 10254 + protocol: TCP readinessProbe: httpGet: path: /healthz diff --git a/aws/test/test-cluster.tf b/aws/test/test-cluster.tf index 2106940..e1d2fea 100644 --- a/aws/test/test-cluster.tf +++ b/aws/test/test-cluster.tf @@ -31,7 +31,7 @@ module "test-cluster" { # First minion instance group (HTTP webservers of all types + kube-system pods) cluster-base-minion-ig-name = "default" cluster-base-minion-machine-type = "t2.medium" - cluster-base-minions-min = 1 + cluster-base-minions-min = 2 cluster-base-minions-max = 15 # Ingress nodes diff --git a/aws/test/test-cluster/ingress.tf b/aws/test/test-cluster/ingress.tf index b6e77a4..55ece32 100644 --- a/aws/test/test-cluster/ingress.tf +++ b/aws/test/test-cluster/ingress.tf @@ -1,65 +1,6 @@ -resource "aws_security_group" "ingress" { - name = "https" - description = "[Managed by Terraform] Opens up ports 80, 443" - vpc_id = "${module.kops-cluster.vpc-id}" - - # HTTPs - ingress { - protocol = "tcp" - from_port = 443 - to_port = 443 - cidr_blocks = ["0.0.0.0/0"] - } - - # HTTP - ingress { - protocol = "tcp" - from_port = 80 - to_port = 80 - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elb" "ingress" { - cross_zone_load_balancing = true - name = "ingress-${var.vpc-name}" - security_groups = ["${aws_security_group.ingress.id}", "${module.kops-cluster.nodes-sg}"] - subnets = ["${module.kops-cluster.utility-subnets}"] - internal = false - idle_timeout = 600 - connection_draining = "true" - connection_draining_timeout = "300" - - listener { - instance_port = 443 - instance_protocol = "tcp" - lb_port = 443 - lb_protocol = "tcp" - } - - listener { - instance_port = 80 - instance_protocol = "tcp" - lb_port = 80 - lb_protocol = "tcp" - } - - health_check { - healthy_threshold = 5 - unhealthy_threshold = 2 - timeout = 5 - target = "TCP:80" - interval = 10 - } -} - -# NOTE: with the new AWS network ELB, we shouldn't need that any more... should be investigated -resource "aws_proxy_protocol_policy" "ingress" { - load_balancer = "${aws_elb.ingress.name}" - instance_ports = ["80", "443"] -} - -# And instances backing the pure TCP proxy protocoled ELB +# Note: we should put the ingress controller on default nodes and deploy a single AZ database node group on which to +# spawn statefulsets (maybe an EFK monitoring stack ?) so that we can check the successful creation of stateful sets and +# - therefore - persistent volumes. module "ingress-ig" { source = "../../ig" @@ -80,9 +21,3 @@ module "ingress-ig" { max-size = "${var.ingress-max-nodes}" node-labels = "${map("duty", "intake")}" } - -# Let's attach our instance group to the ingress ELB once it is created -resource "aws_autoscaling_attachment" "ingress_lb_attachment" { - autoscaling_group_name = "${module.ingress-ig.asg-name}" - elb = "${aws_elb.ingress.id}" -} diff --git a/aws/test/test-cluster/kops-cluster.tf b/aws/test/test-cluster/kops-cluster.tf index 0123c41..ee37de6 100644 --- a/aws/test/test-cluster/kops-cluster.tf +++ b/aws/test/test-cluster/kops-cluster.tf @@ -21,7 +21,7 @@ module "kops-cluster" { # Kops & Kuberntetes kops-state-bucket = "${var.kops-state-bucket}" - disable-sg-ingress = "true" + disable-sg-ingress = "false" channel = "${var.kops-channel}" kubernetes-version = "${var.kubernetes-version}" cloud-labels = "${var.cloud-labels}"