Skip to content
This repository has been archived by the owner on Mar 29, 2023. It is now read-only.

Private cluster #22

Merged
merged 10 commits into from
Apr 10, 2019
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
149 changes: 149 additions & 0 deletions examples/gke-private-cluster/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY A GKE PRIVATE CLUSTER IN GOOGLE CLOUD
# This is an example of how to use the gke-cluster module to deploy a public Kubernetes cluster in GCP
# ---------------------------------------------------------------------------------------------------------------------

# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via
# https://github.com/terraform-providers/terraform-provider-google
terraform {
required_version = ">= 0.10.3"
}

provider "google" {
version = "~> 2.3.0"
project = "${var.project}"
region = "${var.region}"
}

provider "google-beta" {
version = "~> 2.3.0"
project = "${var.project}"
region = "${var.region}"
}

module "gke_cluster" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "git::[email protected]:gruntwork-io/gke-cluster.git//modules/gke-cluster?ref=v0.0.1"
autero1 marked this conversation as resolved.
Show resolved Hide resolved
source = "../../modules/gke-cluster"

name = "${var.cluster_name}"

project = "${var.project}"
location = "${var.location}"
network = "${google_compute_network.main.name}"
subnetwork = "${google_compute_subnetwork.main.self_link}"

# When creating a private cluster, the 'master_ipv4_cidr_block' has to be defined and the size must be /28
master_ipv4_cidr_block = "10.5.0.0/28"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

any point parameterizing this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Was thinking that also. The other examples use hardcoded values, so I went with that. Maybe parameterize with those values as defaults?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay understood, wondering if we should do this now or ship as is. If we change, we should change all examples

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd be tempted to create pre_launch branch and PR we all work on. Some of the items include

  • README updates
  • Code documentation (comments and section within modules and examples)
  • Use VPC module
  • Optionally adding a deployment step in one one of the examples + exposing the service


# This setting will make the cluster private
enable_private_nodes = "true"

# For simplicity's sake we're not disabling the public endpoint
autero1 marked this conversation as resolved.
Show resolved Hide resolved
enable_private_endpoint = "false"

# With a private cluster, it is highly recommended to restrict access to the cluster master
# However, for testing purposes we will allow all inbound traffic.
master_authorized_networks_config = [{
cidr_blocks = [{
cidr_block = "0.0.0.0/0"
display_name = "all-for-testing"
}]
}]

cluster_secondary_range_name = "${google_compute_subnetwork.main.secondary_ip_range.0.range_name}"
}

# Node Pool

// Node Pool Resource
resource "google_container_node_pool" "node_pool" {
provider = "google-beta"

name = "private-pool"
project = "${var.project}"
location = "${var.location}"
cluster = "${module.gke_cluster.name}"

initial_node_count = "1"

autoscaling {
min_node_count = "1"
max_node_count = "5"
}

management {
auto_repair = "true"
auto_upgrade = "true"
}

node_config {
image_type = "COS"
machine_type = "n1-standard-1"

labels = {
private-pools-example = "true"
}

tags = ["private-pool-example"]
disk_size_gb = "30"
disk_type = "pd-standard"
preemptible = false

service_account = "${module.gke_service_account.email}"

oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
]
}

lifecycle {
ignore_changes = ["initial_node_count"]
}

timeouts {
create = "30m"
update = "30m"
delete = "30m"
}
}

# ---------------------------------------------------------------------------------------------------------------------
# CREATE A CUSTOM SERVICE ACCOUNT TO USE WITH THE GKE CLUSTER
# ---------------------------------------------------------------------------------------------------------------------

module "gke_service_account" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "git::[email protected]:gruntwork-io/gke-cluster.git//modules/gke-service-account?ref=v0.0.1"
source = "../../modules/gke-service-account"

name = "${var.cluster_service_account_name}"
project = "${var.project}"
description = "${var.cluster_service_account_description}"
}

# TODO(rileykarson): Add proper VPC network config once we've made a VPC module
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rileykarson Is this still valid? I thought v0.0.1 has everything necessary, or am I missing something?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@yorinasub17 nope, no longer valid. We should use @rileykarson 's module. Another thing on my pre-launch list.

resource "random_string" "suffix" {
length = 4
special = false
upper = false
}

resource "google_compute_network" "main" {
name = "${var.cluster_name}-network-${random_string.suffix.result}"
auto_create_subnetworks = "false"
}

resource "google_compute_subnetwork" "main" {
name = "${var.cluster_name}-subnetwork-${random_string.suffix.result}"
ip_cidr_range = "10.3.0.0/17"
region = "${var.region}"
network = "${google_compute_network.main.self_link}"

secondary_ip_range {
range_name = "private-cluster-pods"
ip_cidr_range = "10.4.0.0/18"
}
}
22 changes: 22 additions & 0 deletions examples/gke-private-cluster/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
output "cluster_endpoint" {
description = "The IP address of the cluster master."
sensitive = true
value = "${module.gke_cluster.endpoint}"
}

output "client_certificate" {
description = "Public certificate used by clients to authenticate to the cluster endpoint."
value = "${module.gke_cluster.client_certificate}"
}

output "client_key" {
description = "Private key used by clients to authenticate to the cluster endpoint."
sensitive = true
value = "${module.gke_cluster.client_key}"
}

output "cluster_ca_certificate" {
description = "The public certificate that is the root of trust for the cluster."
sensitive = true
value = "${module.gke_cluster.cluster_ca_certificate}"
}
36 changes: 36 additions & 0 deletions examples/gke-private-cluster/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# These variables are expected to be passed in by the operator.
# ---------------------------------------------------------------------------------------------------------------------

variable "project" {
description = "The name of the GCP Project where all resources will be launched."
}

variable "location" {
description = "The location (region or zone) of the GKE cluster."
}

variable "region" {
description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone."
}

# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# These parameters have reasonable defaults.
# ---------------------------------------------------------------------------------------------------------------------

variable "cluster_name" {
description = "The name of the Kubernetes cluster."
default = "example-private-cluster"
}

variable "cluster_service_account_name" {
description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters."
default = "example-private-cluster-sa"
}

variable "cluster_service_account_description" {
description = "A description of the custom service account used for the GKE cluster."
default = "Example GKE Cluster Service Account managed by Terraform"
}
39 changes: 39 additions & 0 deletions modules/gke-cluster/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,45 @@ using a shared VPC network (a network from another GCP project) using an explici
See [considerations for cluster sizing](https://cloud.google.com/kubernetes-engine/docs/how-to/alias-ips#cluster_sizing)
for more information on sizing secondary ranges for your VPC-native cluster.

## What is a private cluster?

In a private cluster, the nodes have internal IP addresses only, which ensures that their workloads are isolated from the public Internet.
Private nodes do not have outbound Internet access, but Private Google Access provides private nodes and their workloads with
limited outbound access to Google Cloud Platform APIs and services over Google's private network.

If you want your cluster nodes to be able to access the Internet, for example pull images from external container registries,
you will have to set up [Cloud NAT](https://cloud.google.com/nat/docs/overview).
See [Example GKE Setup](https://cloud.google.com/nat/docs/gke-example) for further information.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe one day we could include an example for this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe our VPC module has Cloud NAT. Other option could be showcasing running one of Google's containers (gcloud container images list --project google-containers). I tested the private cluster with gcr.io/google-containers/nginx and exposed that with a load balancer.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Checked the module, and the NAT is only for the public subnet.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


You can create a private cluster by setting `enable_private_nodes` to `true`. Note that with a private cluster, setting
the master CIDR range with `master_ipv4_cidr_block` is also required.

### How do I control access to the cluster master?

In a private cluster, the master has two endpoints:

* **Private endpoint:** This is the internal IP address of the master, behind an internal load balancer in the master's
VPC network. Nodes communicate with the master using the private endpoint. Any VM in your VPC network, and in the same
region as your private cluster, can use the private endpoint.

* **Public endpoint:** This is the external IP address of the master. You can disable access to the public endpoint by setting
`enable_private_endpoint` to `true`.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is slightly confusing. E.g enable_private_endpoint signals to me that this only controls the private endpoint, treating the public endpoint separately.

Should this be disable_public_endpoint instead?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you're right... using disable_public_endpoint feels more intuitive.

Reason for naming it like this is because gcloud uses --enable-private-endpoint. On the other hand the web console uses "Access master using its external IP address" -checkbox. 😄

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And terraform provider uses enable_private_endpoint 😄

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense. I am a bit on the fence about deviating from the google APIs, but I do feel that disable_public_endpoint is the better name...


You can relax the restrictions by authorizing certain address ranges to access the endpoints with the input variable
`master_authorized_networks_config`.

### Private cluster restrictions and limitations

Private clusters have the following restrictions and limitations:

* The size of the RFC 1918 block for the cluster master must be /28.
* The nodes in a private cluster must run Kubernetes version 1.8.14-gke.0 or later.
* You cannot convert an existing, non-private cluster to a private cluster.
* Each private cluster you create uses a unique VPC Network Peering.
* Deleting the VPC peering between the cluster master and the cluster nodes, deleting the firewall rules that allow
ingress traffic from the cluster master to nodes on port 10250, or deleting the default route to the default
Internet gateway, causes a private cluster to stop functioning.

## What IAM roles does this module configure? (unimplemented)

Given a service account, this module will enable the following IAM roles:
Expand Down
14 changes: 14 additions & 0 deletions modules/gke-cluster/main.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# DEPLOY A GKE CLUSTER
# This module deploys a GKE cluster, a managed, production-ready environment for deploying containerized applications.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice 👍

resource "google_container_cluster" "cluster" {
name = "${var.name}"
description = "${var.description}"
Expand All @@ -24,12 +29,21 @@ resource "google_container_cluster" "cluster" {

initial_node_count = 1

# ip_allocation_policy.use_ip_aliases defaults to true, since we define the block `ip_allocation_policy`
ip_allocation_policy {
// Choose the range, but let GCP pick the IPs within the range
cluster_secondary_range_name = "${var.cluster_secondary_range_name}"
services_secondary_range_name = "${var.cluster_secondary_range_name}"
}

# We can optionally control access to the cluster
# See https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters
private_cluster_config {
enable_private_endpoint = "${var.enable_private_endpoint}"
enable_private_nodes = "${var.enable_private_nodes}"
master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}"
}

addons_config {
http_load_balancing {
disabled = "${var.http_load_balancing ? 0 : 1}"
Expand Down
15 changes: 15 additions & 0 deletions modules/gke-cluster/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,21 @@ variable "http_load_balancing" {
default = true
}

variable "enable_private_nodes" {
description = "Control whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 private addresses and communicate with the master via private networking."
default = "false"
}

variable "enable_private_endpoint" {
description = "Control whether the master's internal IP address is used as the cluster endpoint. If set to 'true', the master can only be accessed from internal IP addresses."
default = "false"
}

variable "master_ipv4_cidr_block" {
description = "The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network."
default = ""
}

// TODO(robmorgan): Are we using these values below? We should understand them more fully before adding them to configs.

variable "network_project" {
Expand Down
Loading