diff --git a/examples/gke-private-tiller/README.md b/examples/gke-private-tiller/README.md index d9b2fcd..58f2bc5 100644 --- a/examples/gke-private-tiller/README.md +++ b/examples/gke-private-tiller/README.md @@ -1,12 +1,10 @@ # GKE Private Helm Example -This example shows how to use Terraform to launch a GKE private cluster with Helm configured and installed. We achieve -this by utilizing the [k8s-tiller module in the terraform-kubernetes-helm -repository](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-tiller). Note that we +The root folder of this repo shows an example of Terraform code that launches a GKE private cluster with Helm configured +and Tiller installed. We achieve this by utilizing the [k8s-tiller module in the terraform-kubernetes-helm repository](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-tiller). Note that we utilize our `kubergrunt` utility to securely manage TLS certificate key pairs used by Tiller - the server component of Helm. - ## Background We strongly recommend reading [our guide on Helm](https://github.com/gruntwork-io/kubergrunt/blob/master/HELM_GUIDE.md) @@ -50,8 +48,7 @@ Now that all the prerequisite tools are installed, we are ready to deploy the GK 1. If you haven't already, clone this repo: - `git clone https://github.com/gruntwork-io/terraform-google-gke.git` -1. Make sure you are in the `gke-basic-tiller` example folder: - - `cd examples/gke-basic-tiller` +1. Make sure you are in the root folder of the repo 1. Initialize terraform: - `terraform init` 1. Check the terraform plan: diff --git a/examples/gke-private-tiller/example-app/nginx.yml b/examples/gke-private-tiller/example-app/nginx.yml deleted file mode 100644 index e4b2476..0000000 --- a/examples/gke-private-tiller/example-app/nginx.yml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment - -metadata: - name: nginx - labels: - app: nginx - tier: backend -spec: - replicas: 2 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - tier: backend - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 diff --git a/examples/gke-private-tiller/main.tf b/examples/gke-private-tiller/main.tf deleted file mode 100644 index 6de37b2..0000000 --- a/examples/gke-private-tiller/main.tf +++ /dev/null @@ -1,399 +0,0 @@ -# --------------------------------------------------------------------------------------------------------------------- -# DEPLOY A GKE PRIVATE CLUSTER W/ TILLER IN GOOGLE CLOUD PLATFORM -# This is an example of how to use the gke-cluster module to deploy a private Kubernetes cluster in GCP -# --------------------------------------------------------------------------------------------------------------------- - -# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via -# https://github.com/terraform-providers/terraform-provider-google -terraform { - required_version = ">= 0.10.3" -} - -# --------------------------------------------------------------------------------------------------------------------- -# PREPARE PROVIDERS -# --------------------------------------------------------------------------------------------------------------------- - -provider "google" { - version = "~> 2.3.0" - project = "${var.project}" - region = "${var.region}" - - scopes = [ - # Default scopes - "https://www.googleapis.com/auth/compute", - - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - - # Required for google_client_openid_userinfo - "https://www.googleapis.com/auth/userinfo.email", - ] -} - -provider "google-beta" { - version = "~> 2.3.0" - project = "${var.project}" - region = "${var.region}" - - scopes = [ - # Default scopes - "https://www.googleapis.com/auth/compute", - - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - - # Required for google_client_openid_userinfo - "https://www.googleapis.com/auth/userinfo.email", - ] -} - -# We use this data provider to expose an access token for communicating with the GKE cluster. -data "google_client_config" "client" {} - -# Use this datasource to access the Terraform account's email for Kubernetes permissions. -data "google_client_openid_userinfo" "terraform_user" {} - -provider "kubernetes" { - load_config_file = false - - host = "${data.template_file.gke_host_endpoint.rendered}" - token = "${data.template_file.access_token.rendered}" - cluster_ca_certificate = "${data.template_file.cluster_ca_certificate.rendered}" -} - -provider "helm" { - # We don't install Tiller automatically, but instead use Kubergrunt as it sets up the TLS certificates much easier. - install_tiller = false - - # Enable TLS so Helm can communicate with Tiller securely. - enable_tls = true - - kubernetes { - host = "${data.template_file.gke_host_endpoint.rendered}" - token = "${data.template_file.access_token.rendered}" - cluster_ca_certificate = "${data.template_file.cluster_ca_certificate.rendered}" - } -} - -# --------------------------------------------------------------------------------------------------------------------- -# DEPLOY A PRIVATE CLUSTER IN GOOGLE CLOUD PLATFORM -# --------------------------------------------------------------------------------------------------------------------- - -module "gke_cluster" { - # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you - # to a specific version of the modules, such as the following example: - # source = "git::git@github.com:gruntwork-io/terraform-google-gke.git//modules/gke-cluster?ref=v0.0.5" - source = "../../modules/gke-cluster" - - name = "${var.cluster_name}" - - project = "${var.project}" - location = "${var.location}" - network = "${module.vpc_network.network}" - - # We're deploying the cluster in the 'public' subnetwork to allow outbound internet access - # See the network access tier table for full details: - # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier - subnetwork = "${module.vpc_network.public_subnetwork}" - - # When creating a private cluster, the 'master_ipv4_cidr_block' has to be defined and the size must be /28 - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" - - # This setting will make the cluster private - enable_private_nodes = "true" - - # To make testing easier, we keep the public endpoint available. In production, we highly recommend restricting access to only within the network boundary, requiring your users to use a bastion host or VPN. - disable_public_endpoint = "false" - - # With a private cluster, it is highly recommended to restrict access to the cluster master - # However, for testing purposes we will allow all inbound traffic. - master_authorized_networks_config = [{ - cidr_blocks = [{ - cidr_block = "0.0.0.0/0" - display_name = "all-for-testing" - }] - }] - - cluster_secondary_range_name = "${module.vpc_network.public_subnetwork_secondary_range_name}" -} - -# --------------------------------------------------------------------------------------------------------------------- -# CREATE A NODE POOL -# --------------------------------------------------------------------------------------------------------------------- - -resource "google_container_node_pool" "node_pool" { - provider = "google-beta" - - name = "private-pool" - project = "${var.project}" - location = "${var.location}" - cluster = "${module.gke_cluster.name}" - - initial_node_count = "1" - - autoscaling { - min_node_count = "1" - max_node_count = "5" - } - - management { - auto_repair = "true" - auto_upgrade = "true" - } - - node_config { - image_type = "COS" - machine_type = "n1-standard-1" - - labels = { - private-pools-example = "true" - } - - # Add a private tag to the instances. See the network access tier table for full details: - # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier - tags = [ - "${module.vpc_network.private}", - "private-pool-example", - ] - - disk_size_gb = "30" - disk_type = "pd-standard" - preemptible = false - - service_account = "${module.gke_service_account.email}" - - oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - ] - } - - lifecycle { - ignore_changes = ["initial_node_count"] - } - - timeouts { - create = "30m" - update = "30m" - delete = "30m" - } -} - -# --------------------------------------------------------------------------------------------------------------------- -# CREATE A CUSTOM SERVICE ACCOUNT TO USE WITH THE GKE CLUSTER -# --------------------------------------------------------------------------------------------------------------------- - -module "gke_service_account" { - # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you - # to a specific version of the modules, such as the following example: - # source = "git::git@github.com:gruntwork-io/terraform-google-gke.git//modules/gke-service-account?ref=v0.0.5" - source = "../../modules/gke-service-account" - - name = "${var.cluster_service_account_name}" - project = "${var.project}" - description = "${var.cluster_service_account_description}" -} - -# --------------------------------------------------------------------------------------------------------------------- -# CREATE A NETWORK TO DEPLOY THE CLUSTER TO -# --------------------------------------------------------------------------------------------------------------------- - -resource "random_string" "suffix" { - length = 4 - special = false - upper = false -} - -module "vpc_network" { - source = "git::git@github.com:gruntwork-io/terraform-google-network.git//modules/vpc-network?ref=v0.0.3" - - name_prefix = "${var.cluster_name}-network-${random_string.suffix.result}" - project = "${var.project}" - region = "${var.region}" - - cidr_block = "${var.vpc_cidr_block}" - secondary_cidr_block = "${var.vpc_secondary_cidr_block}" -} - -# --------------------------------------------------------------------------------------------------------------------- -# CONFIGURE KUBECTL AND RBAC ROLE PERMISSIONS -# --------------------------------------------------------------------------------------------------------------------- - -# configure kubectl with the credentials of the GKE cluster -resource "null_resource" "configure_kubectl" { - provisioner "local-exec" { - command = "gcloud beta container clusters get-credentials ${module.gke_cluster.name} --region ${var.region} --project ${var.project}" - } - - depends_on = ["google_container_node_pool.node_pool"] -} - -# Create a ServiceAccount for Tiller -resource "kubernetes_service_account" "tiller" { - metadata { - name = "tiller" - namespace = "${local.tiller_namespace}" - } -} - -resource "kubernetes_cluster_role_binding" "user" { - metadata { - name = "admin-user" - } - - role_ref { - kind = "ClusterRole" - name = "cluster-admin" - api_group = "rbac.authorization.k8s.io" - } - - subject { - kind = "User" - name = "${data.google_client_openid_userinfo.terraform_user.email}" - api_group = "rbac.authorization.k8s.io" - } - - # We give the Tiller ServiceAccount cluster admin status so that we can deploy anything in any namespace using this - # Tiller instance for testing purposes. In production, you might want to use a more restricted role. - subject { - # this is a workaround for https://github.com/terraform-providers/terraform-provider-kubernetes/issues/204. - # we have to set an empty api_group or the k8s call will fail. It will be fixed in v1.5.2 of the k8s provider. - api_group = "" - - kind = "ServiceAccount" - name = "${kubernetes_service_account.tiller.metadata.0.name}" - namespace = "${local.tiller_namespace}" - } - - subject { - kind = "Group" - name = "system:masters" - api_group = "rbac.authorization.k8s.io" - } -} - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# GENERATE TLS CERTIFICATES FOR USE WITH TILLER -# This will use kubergrunt to generate TLS certificates, and upload them as Kubernetes Secrets that can then be used by -# Tiller. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -resource "null_resource" "tiller_tls_certs" { - provisioner "local-exec" { - command = <<-EOF - # Generate CA TLS certs - kubergrunt tls gen --ca --namespace kube-system --secret-name ${local.tls_ca_secret_name} --secret-label gruntwork.io/tiller-namespace=${local.tiller_namespace} --secret-label gruntwork.io/tiller-credentials=true --secret-label gruntwork.io/tiller-credentials-type=ca --tls-subject-json '${jsonencode(var.tls_subject)}' ${local.tls_algorithm_config} ${local.kubectl_auth_config} - - # Then use that CA to generate server TLS certs - kubergrunt tls gen --namespace ${local.tiller_namespace} --ca-secret-name ${local.tls_ca_secret_name} --ca-namespace kube-system --secret-name ${local.tls_secret_name} --secret-label gruntwork.io/tiller-namespace=${local.tiller_namespace} --secret-label gruntwork.io/tiller-credentials=true --secret-label gruntwork.io/tiller-credentials-type=server --tls-subject-json '${jsonencode(var.tls_subject)}' ${local.tls_algorithm_config} ${local.kubectl_auth_config} - EOF - - # Use environment variables for Kubernetes credentials to avoid leaking into the logs - environment = { - KUBECTL_SERVER_ENDPOINT = "${data.template_file.gke_host_endpoint.rendered}" - KUBECTL_CA_DATA = "${base64encode(data.template_file.cluster_ca_certificate.rendered)}" - KUBECTL_TOKEN = "${data.template_file.access_token.rendered}" - } - } -} - -# --------------------------------------------------------------------------------------------------------------------- -# DEPLOY TILLER TO THE GKE CLUSTER -# --------------------------------------------------------------------------------------------------------------------- - -module "tiller" { - source = "git::git@github.com:gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-tiller?ref=v0.3.0" - - tiller_service_account_name = "${kubernetes_service_account.tiller.metadata.0.name}" - tiller_service_account_token_secret_name = "${kubernetes_service_account.tiller.default_secret_name}" - tiller_tls_secret_name = "${local.tls_secret_name}" - namespace = "${local.tiller_namespace}" - tiller_image_version = "${local.tiller_version}" - - # Kubergrunt will store the private key under the key "tls.pem" in the corresponding Secret resource, which will be - # accessed as a file when mounted into the container. - tiller_tls_key_file_name = "tls.pem" - - dependencies = ["${null_resource.tiller_tls_certs.id}", "${kubernetes_cluster_role_binding.user.id}"] -} - -# The Deployment resources created in the module call to `k8s-tiller` will be complete creation before the rollout is -# complete. We use kubergrunt here to wait for the deployment to complete, so that when this resource is done creating, -# any resources that depend on this can assume Tiller is successfully deployed and up at that point. -resource "null_resource" "wait_for_tiller" { - provisioner "local-exec" { - command = "kubergrunt helm wait-for-tiller --tiller-namespace ${local.tiller_namespace} --tiller-deployment-name ${module.tiller.deployment_name} --expected-tiller-version ${local.tiller_version} ${local.kubectl_auth_config}" - - # Use environment variables for Kubernetes credentials to avoid leaking into the logs - environment = { - KUBECTL_SERVER_ENDPOINT = "${data.template_file.gke_host_endpoint.rendered}" - KUBECTL_CA_DATA = "${base64encode(data.template_file.cluster_ca_certificate.rendered)}" - KUBECTL_TOKEN = "${data.template_file.access_token.rendered}" - } - } -} - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# CONFIGURE OPERATOR HELM CLIENT -# To allow usage of the helm client immediately, we grant access to the admin RBAC user and configure the local helm -# client. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -resource "null_resource" "grant_and_configure_helm" { - provisioner "local-exec" { - command = <<-EOF - kubergrunt helm grant --tiller-namespace ${local.tiller_namespace} --tls-subject-json '${jsonencode(var.client_tls_subject)}' --rbac-user ${data.google_client_openid_userinfo.terraform_user.email} ${local.kubectl_auth_config} - - kubergrunt helm configure --helm-home ${pathexpand("~/.helm")} --tiller-namespace ${local.tiller_namespace} --resource-namespace ${local.resource_namespace} --rbac-user ${data.google_client_openid_userinfo.terraform_user.email} ${local.kubectl_auth_config} - EOF - } - - depends_on = ["null_resource.wait_for_tiller"] -} - -# --------------------------------------------------------------------------------------------------------------------- -# COMPUTATIONS -# These locals set constants and compute various useful information used throughout this Terraform module. -# --------------------------------------------------------------------------------------------------------------------- - -locals { - # For this example, we hardcode our tiller namespace to kube-system. In production, you might want to consider using a - # different Namespace. - tiller_namespace = "kube-system" - - # For this example, we setup Tiller to manage the default Namespace. - resource_namespace = "default" - - # We install an older version of Tiller to match the Helm library version used in the Terraform helm provider. - tiller_version = "v2.11.0" - - # We store the CA Secret in the kube-system Namespace, given that only cluster admins should access these. - tls_ca_secret_namespace = "kube-system" - - # We name the TLS Secrets to be compatible with the `kubergrunt helm grant` command - tls_ca_secret_name = "${local.tiller_namespace}-namespace-tiller-ca-certs" - tls_secret_name = "tiller-certs" - tls_algorithm_config = "--tls-private-key-algorithm ${var.private_key_algorithm} ${var.private_key_algorithm == "ECDSA" ? "--tls-private-key-ecdsa-curve ${var.private_key_ecdsa_curve}" : "--tls-private-key-rsa-bits ${var.private_key_rsa_bits}"}" - - # These will be filled in by the shell environment - kubectl_auth_config = "--kubectl-server-endpoint \"$KUBECTL_SERVER_ENDPOINT\" --kubectl-certificate-authority \"$KUBECTL_CA_DATA\" --kubectl-token \"$KUBECTL_TOKEN\"" -} - -# --------------------------------------------------------------------------------------------------------------------- -# WORKAROUNDS -# --------------------------------------------------------------------------------------------------------------------- - -# This is a workaround for the Kubernetes and Helm providers as Terraform doesn't currently support passing in module -# outputs to providers directly. -data "template_file" "gke_host_endpoint" { - template = "${module.gke_cluster.endpoint}" -} - -data "template_file" "access_token" { - template = "${data.google_client_config.client.access_token}" -} - -data "template_file" "cluster_ca_certificate" { - template = "${module.gke_cluster.cluster_ca_certificate}" -} diff --git a/examples/gke-private-tiller/outputs.tf b/examples/gke-private-tiller/outputs.tf deleted file mode 100644 index 51f473b..0000000 --- a/examples/gke-private-tiller/outputs.tf +++ /dev/null @@ -1,22 +0,0 @@ -output "cluster_endpoint" { - description = "The IP address of the cluster master." - sensitive = true - value = "${module.gke_cluster.endpoint}" -} - -output "client_certificate" { - description = "Public certificate used by clients to authenticate to the cluster endpoint." - value = "${module.gke_cluster.client_certificate}" -} - -output "client_key" { - description = "Private key used by clients to authenticate to the cluster endpoint." - sensitive = true - value = "${module.gke_cluster.client_key}" -} - -output "cluster_ca_certificate" { - description = "The public certificate that is the root of trust for the cluster." - sensitive = true - value = "${module.gke_cluster.cluster_ca_certificate}" -} diff --git a/examples/gke-private-tiller/variables.tf b/examples/gke-private-tiller/variables.tf deleted file mode 100644 index 9ffcb30..0000000 --- a/examples/gke-private-tiller/variables.tf +++ /dev/null @@ -1,122 +0,0 @@ -# --------------------------------------------------------------------------------------------------------------------- -# REQUIRED PARAMETERS -# These variables are expected to be passed in by the operator. -# --------------------------------------------------------------------------------------------------------------------- - -variable "project" { - description = "The project ID where all resources will be launched." -} - -variable "location" { - description = "The location (region or zone) of the GKE cluster." -} - -variable "region" { - description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone." -} - -# --------------------------------------------------------------------------------------------------------------------- -# OPTIONAL PARAMETERS -# These parameters have reasonable defaults. -# --------------------------------------------------------------------------------------------------------------------- - -variable "cluster_name" { - description = "The name of the Kubernetes cluster." - default = "example-cluster" -} - -variable "cluster_service_account_name" { - description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters." - default = "example-cluster-sa" -} - -variable "cluster_service_account_description" { - description = "A description of the custom service account used for the GKE cluster." - default = "Example GKE Cluster Service Account managed by Terraform" -} - -# Tiller TLS settings - -variable "tls_subject" { - description = "The issuer information that contains the identifying information for the Tiller server. Used to generate the TLS certificate keypairs." - type = "map" - - default = { - common_name = "tiller" - org = "Gruntwork" - } - - # Expects the following keys - # - common_name (required) - # - org (required) - # - org_unit - # - city - # - state - # - country -} - -variable "client_tls_subject" { - description = "The issuer information that contains the identifying information for the helm client of the operator. Used to generate the TLS certificate keypairs." - type = "map" - - default = { - common_name = "admin" - org = "Gruntwork" - } - - # Expects the following keys - # - common_name (required) - # - org (required) - # - org_unit - # - city - # - state - # - country -} - -# TLS algorithm configuration - -variable "private_key_algorithm" { - description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." - default = "ECDSA" -} - -variable "private_key_ecdsa_curve" { - description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." - default = "P256" -} - -variable "private_key_rsa_bits" { - description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." - default = "2048" -} - -# Tiller undeploy options - -variable "force_undeploy" { - description = "If true, will remove the Tiller server resources even if there are releases deployed." - default = false -} - -variable "undeploy_releases" { - description = "If true, will delete deployed releases from the Tiller instance before undeploying Tiller." - default = false -} - -variable "master_ipv4_cidr_block" { - description = "The IP range in CIDR notation (size must be /28) to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network." - default = "10.5.0.0/28" -} - -# For the example, we recommend a /16 network for the VPC. Note that when changing the size of the network, -# you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. -variable "vpc_cidr_block" { - description = "The IP address range of the VPC in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." - default = "10.3.0.0/16" -} - -# For the example, we recommend a /16 network for the secondary range. Note that when changing the size of the network, -# you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. -variable "vpc_secondary_cidr_block" { - description = "The IP address range of the VPC's secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." - default = "10.4.0.0/16" -} diff --git a/main.tf b/main.tf index 8b13789..73093b3 100644 --- a/main.tf +++ b/main.tf @@ -1 +1,399 @@ +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY A GKE PRIVATE CLUSTER W/ TILLER IN GOOGLE CLOUD PLATFORM +# This is an example of how to use the gke-cluster module to deploy a private Kubernetes cluster in GCP +# --------------------------------------------------------------------------------------------------------------------- +# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via +# https://github.com/terraform-providers/terraform-provider-google +terraform { + required_version = ">= 0.10.3" +} + +# --------------------------------------------------------------------------------------------------------------------- +# PREPARE PROVIDERS +# --------------------------------------------------------------------------------------------------------------------- + +provider "google" { + version = "~> 2.3.0" + project = "${var.project}" + region = "${var.region}" + + scopes = [ + # Default scopes + "https://www.googleapis.com/auth/compute", + + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + + # Required for google_client_openid_userinfo + "https://www.googleapis.com/auth/userinfo.email", + ] +} + +provider "google-beta" { + version = "~> 2.3.0" + project = "${var.project}" + region = "${var.region}" + + scopes = [ + # Default scopes + "https://www.googleapis.com/auth/compute", + + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + + # Required for google_client_openid_userinfo + "https://www.googleapis.com/auth/userinfo.email", + ] +} + +# We use this data provider to expose an access token for communicating with the GKE cluster. +data "google_client_config" "client" {} + +# Use this datasource to access the Terraform account's email for Kubernetes permissions. +data "google_client_openid_userinfo" "terraform_user" {} + +provider "kubernetes" { + load_config_file = false + + host = "${data.template_file.gke_host_endpoint.rendered}" + token = "${data.template_file.access_token.rendered}" + cluster_ca_certificate = "${data.template_file.cluster_ca_certificate.rendered}" +} + +provider "helm" { + # We don't install Tiller automatically, but instead use Kubergrunt as it sets up the TLS certificates much easier. + install_tiller = false + + # Enable TLS so Helm can communicate with Tiller securely. + enable_tls = true + + kubernetes { + host = "${data.template_file.gke_host_endpoint.rendered}" + token = "${data.template_file.access_token.rendered}" + cluster_ca_certificate = "${data.template_file.cluster_ca_certificate.rendered}" + } +} + +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY A PRIVATE CLUSTER IN GOOGLE CLOUD PLATFORM +# --------------------------------------------------------------------------------------------------------------------- + +module "gke_cluster" { + # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you + # to a specific version of the modules, such as the following example: + # source = "git::git@github.com:gruntwork-io/terraform-google-gke.git//modules/gke-cluster?ref=v0.0.5" + source = "./modules/gke-cluster" + + name = "${var.cluster_name}" + + project = "${var.project}" + location = "${var.location}" + network = "${module.vpc_network.network}" + + # We're deploying the cluster in the 'public' subnetwork to allow outbound internet access + # See the network access tier table for full details: + # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier + subnetwork = "${module.vpc_network.public_subnetwork}" + + # When creating a private cluster, the 'master_ipv4_cidr_block' has to be defined and the size must be /28 + master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + + # This setting will make the cluster private + enable_private_nodes = "true" + + # To make testing easier, we keep the public endpoint available. In production, we highly recommend restricting access to only within the network boundary, requiring your users to use a bastion host or VPN. + disable_public_endpoint = "false" + + # With a private cluster, it is highly recommended to restrict access to the cluster master + # However, for testing purposes we will allow all inbound traffic. + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "0.0.0.0/0" + display_name = "all-for-testing" + }] + }] + + cluster_secondary_range_name = "${module.vpc_network.public_subnetwork_secondary_range_name}" +} + +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A NODE POOL +# --------------------------------------------------------------------------------------------------------------------- + +resource "google_container_node_pool" "node_pool" { + provider = "google-beta" + + name = "private-pool" + project = "${var.project}" + location = "${var.location}" + cluster = "${module.gke_cluster.name}" + + initial_node_count = "1" + + autoscaling { + min_node_count = "1" + max_node_count = "5" + } + + management { + auto_repair = "true" + auto_upgrade = "true" + } + + node_config { + image_type = "COS" + machine_type = "n1-standard-1" + + labels = { + private-pools-example = "true" + } + + # Add a private tag to the instances. See the network access tier table for full details: + # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier + tags = [ + "${module.vpc_network.private}", + "private-pool-example", + ] + + disk_size_gb = "30" + disk_type = "pd-standard" + preemptible = false + + service_account = "${module.gke_service_account.email}" + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + lifecycle { + ignore_changes = ["initial_node_count"] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A CUSTOM SERVICE ACCOUNT TO USE WITH THE GKE CLUSTER +# --------------------------------------------------------------------------------------------------------------------- + +module "gke_service_account" { + # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you + # to a specific version of the modules, such as the following example: + # source = "git::git@github.com:gruntwork-io/terraform-google-gke.git//modules/gke-service-account?ref=v0.0.5" + source = "./modules/gke-service-account" + + name = "${var.cluster_service_account_name}" + project = "${var.project}" + description = "${var.cluster_service_account_description}" +} + +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A NETWORK TO DEPLOY THE CLUSTER TO +# --------------------------------------------------------------------------------------------------------------------- + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +module "vpc_network" { + source = "git::git@github.com:gruntwork-io/terraform-google-network.git//modules/vpc-network?ref=v0.0.3" + + name_prefix = "${var.cluster_name}-network-${random_string.suffix.result}" + project = "${var.project}" + region = "${var.region}" + + cidr_block = "${var.vpc_cidr_block}" + secondary_cidr_block = "${var.vpc_secondary_cidr_block}" +} + +# --------------------------------------------------------------------------------------------------------------------- +# CONFIGURE KUBECTL AND RBAC ROLE PERMISSIONS +# --------------------------------------------------------------------------------------------------------------------- + +# configure kubectl with the credentials of the GKE cluster +resource "null_resource" "configure_kubectl" { + provisioner "local-exec" { + command = "gcloud beta container clusters get-credentials ${module.gke_cluster.name} --region ${var.region} --project ${var.project}" + } + + depends_on = ["google_container_node_pool.node_pool"] +} + +# Create a ServiceAccount for Tiller +resource "kubernetes_service_account" "tiller" { + metadata { + name = "tiller" + namespace = "${local.tiller_namespace}" + } +} + +resource "kubernetes_cluster_role_binding" "user" { + metadata { + name = "admin-user" + } + + role_ref { + kind = "ClusterRole" + name = "cluster-admin" + api_group = "rbac.authorization.k8s.io" + } + + subject { + kind = "User" + name = "${data.google_client_openid_userinfo.terraform_user.email}" + api_group = "rbac.authorization.k8s.io" + } + + # We give the Tiller ServiceAccount cluster admin status so that we can deploy anything in any namespace using this + # Tiller instance for testing purposes. In production, you might want to use a more restricted role. + subject { + # this is a workaround for https://github.com/terraform-providers/terraform-provider-kubernetes/issues/204. + # we have to set an empty api_group or the k8s call will fail. It will be fixed in v1.5.2 of the k8s provider. + api_group = "" + + kind = "ServiceAccount" + name = "${kubernetes_service_account.tiller.metadata.0.name}" + namespace = "${local.tiller_namespace}" + } + + subject { + kind = "Group" + name = "system:masters" + api_group = "rbac.authorization.k8s.io" + } +} + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# GENERATE TLS CERTIFICATES FOR USE WITH TILLER +# This will use kubergrunt to generate TLS certificates, and upload them as Kubernetes Secrets that can then be used by +# Tiller. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +resource "null_resource" "tiller_tls_certs" { + provisioner "local-exec" { + command = <<-EOF + # Generate CA TLS certs + kubergrunt tls gen --ca --namespace kube-system --secret-name ${local.tls_ca_secret_name} --secret-label gruntwork.io/tiller-namespace=${local.tiller_namespace} --secret-label gruntwork.io/tiller-credentials=true --secret-label gruntwork.io/tiller-credentials-type=ca --tls-subject-json '${jsonencode(var.tls_subject)}' ${local.tls_algorithm_config} ${local.kubectl_auth_config} + + # Then use that CA to generate server TLS certs + kubergrunt tls gen --namespace ${local.tiller_namespace} --ca-secret-name ${local.tls_ca_secret_name} --ca-namespace kube-system --secret-name ${local.tls_secret_name} --secret-label gruntwork.io/tiller-namespace=${local.tiller_namespace} --secret-label gruntwork.io/tiller-credentials=true --secret-label gruntwork.io/tiller-credentials-type=server --tls-subject-json '${jsonencode(var.tls_subject)}' ${local.tls_algorithm_config} ${local.kubectl_auth_config} + EOF + + # Use environment variables for Kubernetes credentials to avoid leaking into the logs + environment = { + KUBECTL_SERVER_ENDPOINT = "${data.template_file.gke_host_endpoint.rendered}" + KUBECTL_CA_DATA = "${base64encode(data.template_file.cluster_ca_certificate.rendered)}" + KUBECTL_TOKEN = "${data.template_file.access_token.rendered}" + } + } +} + +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY TILLER TO THE GKE CLUSTER +# --------------------------------------------------------------------------------------------------------------------- + +module "tiller" { + source = "git::git@github.com:gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-tiller?ref=v0.3.0" + + tiller_service_account_name = "${kubernetes_service_account.tiller.metadata.0.name}" + tiller_service_account_token_secret_name = "${kubernetes_service_account.tiller.default_secret_name}" + tiller_tls_secret_name = "${local.tls_secret_name}" + namespace = "${local.tiller_namespace}" + tiller_image_version = "${local.tiller_version}" + + # Kubergrunt will store the private key under the key "tls.pem" in the corresponding Secret resource, which will be + # accessed as a file when mounted into the container. + tiller_tls_key_file_name = "tls.pem" + + dependencies = ["${null_resource.tiller_tls_certs.id}", "${kubernetes_cluster_role_binding.user.id}"] +} + +# The Deployment resources created in the module call to `k8s-tiller` will be complete creation before the rollout is +# complete. We use kubergrunt here to wait for the deployment to complete, so that when this resource is done creating, +# any resources that depend on this can assume Tiller is successfully deployed and up at that point. +resource "null_resource" "wait_for_tiller" { + provisioner "local-exec" { + command = "kubergrunt helm wait-for-tiller --tiller-namespace ${local.tiller_namespace} --tiller-deployment-name ${module.tiller.deployment_name} --expected-tiller-version ${local.tiller_version} ${local.kubectl_auth_config}" + + # Use environment variables for Kubernetes credentials to avoid leaking into the logs + environment = { + KUBECTL_SERVER_ENDPOINT = "${data.template_file.gke_host_endpoint.rendered}" + KUBECTL_CA_DATA = "${base64encode(data.template_file.cluster_ca_certificate.rendered)}" + KUBECTL_TOKEN = "${data.template_file.access_token.rendered}" + } + } +} + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# CONFIGURE OPERATOR HELM CLIENT +# To allow usage of the helm client immediately, we grant access to the admin RBAC user and configure the local helm +# client. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +resource "null_resource" "grant_and_configure_helm" { + provisioner "local-exec" { + command = <<-EOF + kubergrunt helm grant --tiller-namespace ${local.tiller_namespace} --tls-subject-json '${jsonencode(var.client_tls_subject)}' --rbac-user ${data.google_client_openid_userinfo.terraform_user.email} ${local.kubectl_auth_config} + + kubergrunt helm configure --helm-home ${pathexpand("~/.helm")} --tiller-namespace ${local.tiller_namespace} --resource-namespace ${local.resource_namespace} --rbac-user ${data.google_client_openid_userinfo.terraform_user.email} ${local.kubectl_auth_config} + EOF + } + + depends_on = ["null_resource.wait_for_tiller"] +} + +# --------------------------------------------------------------------------------------------------------------------- +# COMPUTATIONS +# These locals set constants and compute various useful information used throughout this Terraform module. +# --------------------------------------------------------------------------------------------------------------------- + +locals { + # For this example, we hardcode our tiller namespace to kube-system. In production, you might want to consider using a + # different Namespace. + tiller_namespace = "kube-system" + + # For this example, we setup Tiller to manage the default Namespace. + resource_namespace = "default" + + # We install an older version of Tiller to match the Helm library version used in the Terraform helm provider. + tiller_version = "v2.11.0" + + # We store the CA Secret in the kube-system Namespace, given that only cluster admins should access these. + tls_ca_secret_namespace = "kube-system" + + # We name the TLS Secrets to be compatible with the `kubergrunt helm grant` command + tls_ca_secret_name = "${local.tiller_namespace}-namespace-tiller-ca-certs" + tls_secret_name = "tiller-certs" + tls_algorithm_config = "--tls-private-key-algorithm ${var.private_key_algorithm} ${var.private_key_algorithm == "ECDSA" ? "--tls-private-key-ecdsa-curve ${var.private_key_ecdsa_curve}" : "--tls-private-key-rsa-bits ${var.private_key_rsa_bits}"}" + + # These will be filled in by the shell environment + kubectl_auth_config = "--kubectl-server-endpoint \"$KUBECTL_SERVER_ENDPOINT\" --kubectl-certificate-authority \"$KUBECTL_CA_DATA\" --kubectl-token \"$KUBECTL_TOKEN\"" +} + +# --------------------------------------------------------------------------------------------------------------------- +# WORKAROUNDS +# --------------------------------------------------------------------------------------------------------------------- + +# This is a workaround for the Kubernetes and Helm providers as Terraform doesn't currently support passing in module +# outputs to providers directly. +data "template_file" "gke_host_endpoint" { + template = "${module.gke_cluster.endpoint}" +} + +data "template_file" "access_token" { + template = "${data.google_client_config.client.access_token}" +} + +data "template_file" "cluster_ca_certificate" { + template = "${module.gke_cluster.cluster_ca_certificate}" +} diff --git a/outputs.tf b/outputs.tf index 8b13789..51f473b 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1 +1,22 @@ +output "cluster_endpoint" { + description = "The IP address of the cluster master." + sensitive = true + value = "${module.gke_cluster.endpoint}" +} +output "client_certificate" { + description = "Public certificate used by clients to authenticate to the cluster endpoint." + value = "${module.gke_cluster.client_certificate}" +} + +output "client_key" { + description = "Private key used by clients to authenticate to the cluster endpoint." + sensitive = true + value = "${module.gke_cluster.client_key}" +} + +output "cluster_ca_certificate" { + description = "The public certificate that is the root of trust for the cluster." + sensitive = true + value = "${module.gke_cluster.cluster_ca_certificate}" +} diff --git a/variables.tf b/variables.tf index 8b13789..9ffcb30 100644 --- a/variables.tf +++ b/variables.tf @@ -1 +1,122 @@ +# --------------------------------------------------------------------------------------------------------------------- +# REQUIRED PARAMETERS +# These variables are expected to be passed in by the operator. +# --------------------------------------------------------------------------------------------------------------------- +variable "project" { + description = "The project ID where all resources will be launched." +} + +variable "location" { + description = "The location (region or zone) of the GKE cluster." +} + +variable "region" { + description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone." +} + +# --------------------------------------------------------------------------------------------------------------------- +# OPTIONAL PARAMETERS +# These parameters have reasonable defaults. +# --------------------------------------------------------------------------------------------------------------------- + +variable "cluster_name" { + description = "The name of the Kubernetes cluster." + default = "example-cluster" +} + +variable "cluster_service_account_name" { + description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters." + default = "example-cluster-sa" +} + +variable "cluster_service_account_description" { + description = "A description of the custom service account used for the GKE cluster." + default = "Example GKE Cluster Service Account managed by Terraform" +} + +# Tiller TLS settings + +variable "tls_subject" { + description = "The issuer information that contains the identifying information for the Tiller server. Used to generate the TLS certificate keypairs." + type = "map" + + default = { + common_name = "tiller" + org = "Gruntwork" + } + + # Expects the following keys + # - common_name (required) + # - org (required) + # - org_unit + # - city + # - state + # - country +} + +variable "client_tls_subject" { + description = "The issuer information that contains the identifying information for the helm client of the operator. Used to generate the TLS certificate keypairs." + type = "map" + + default = { + common_name = "admin" + org = "Gruntwork" + } + + # Expects the following keys + # - common_name (required) + # - org (required) + # - org_unit + # - city + # - state + # - country +} + +# TLS algorithm configuration + +variable "private_key_algorithm" { + description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." + default = "ECDSA" +} + +variable "private_key_ecdsa_curve" { + description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." + default = "P256" +} + +variable "private_key_rsa_bits" { + description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." + default = "2048" +} + +# Tiller undeploy options + +variable "force_undeploy" { + description = "If true, will remove the Tiller server resources even if there are releases deployed." + default = false +} + +variable "undeploy_releases" { + description = "If true, will delete deployed releases from the Tiller instance before undeploying Tiller." + default = false +} + +variable "master_ipv4_cidr_block" { + description = "The IP range in CIDR notation (size must be /28) to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network." + default = "10.5.0.0/28" +} + +# For the example, we recommend a /16 network for the VPC. Note that when changing the size of the network, +# you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. +variable "vpc_cidr_block" { + description = "The IP address range of the VPC in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." + default = "10.3.0.0/16" +} + +# For the example, we recommend a /16 network for the secondary range. Note that when changing the size of the network, +# you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. +variable "vpc_secondary_cidr_block" { + description = "The IP address range of the VPC's secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." + default = "10.4.0.0/16" +}