From da1107e321c50ce0a072d41aed5fbe9dc9bccd94 Mon Sep 17 00:00:00 2001 From: Greg Weber Date: Wed, 12 Jun 2019 12:00:12 -0700 Subject: [PATCH 1/3] Local SSD on GKE * Add GKE ssd provisioning scripts that fix performance and capacity issues * Document in the operation guide, and link documentation * Don't use by default for PD. Benchmarking has now shown a significant improvement for PD. --- deploy/gcp/README.md | 7 +- deploy/gcp/main.tf | 10 +- deploy/gcp/manifests/gke | 1 + deploy/gcp/manifests/gke-storage.yml | 1 - .../manifests/local-ssd/kustomization.yaml | 8 ++ .../terraform-local-ssd-provision.yaml | 12 ++ .../manifests/local-volume-provisioner.yaml | 135 ------------------ deploy/gcp/manifests/startup-script.yaml | 55 ------- .../templates/tidb-cluster-values.yaml.tpl | 2 +- docs/google-kubernetes-tutorial.md | 8 +- docs/operation-guide.md | 17 +++ manifests/gke/local-ssd-optimize.yaml | 57 ++++++++ .../persistent-disk.yaml} | 1 + 13 files changed, 115 insertions(+), 199 deletions(-) create mode 120000 deploy/gcp/manifests/gke delete mode 120000 deploy/gcp/manifests/gke-storage.yml create mode 100644 deploy/gcp/manifests/local-ssd/kustomization.yaml create mode 100644 deploy/gcp/manifests/local-ssd/overlays/terraform-local-ssd-provision.yaml delete mode 100644 deploy/gcp/manifests/local-volume-provisioner.yaml delete mode 100644 deploy/gcp/manifests/startup-script.yaml create mode 100644 manifests/gke/local-ssd-optimize.yaml rename manifests/{gke-storage.yml => gke/persistent-disk.yaml} (77%) diff --git a/deploy/gcp/README.md b/deploy/gcp/README.md index 8268c71f21f..cb20e57856f 100644 --- a/deploy/gcp/README.md +++ b/deploy/gcp/README.md @@ -8,7 +8,7 @@ First of all, make sure the following items are installed on your machine: * [Google Cloud SDK](https://cloud.google.com/sdk/install) * [terraform](https://www.terraform.io/downloads.html) >= 0.12 -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) >= 1.11 +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) >= 1.14 * [helm](https://github.com/helm/helm/blob/master/docs/install.md#installing-the-helm-client) >= 2.9.0 and < 3.0.0 * [jq](https://stedolan.github.io/jq/download/) @@ -236,3 +236,8 @@ terraform destroy You have to manually delete disks in the Google Cloud Console, or with `gcloud` after running `terraform destroy` if you do not need the data anymore. > *Note*: When `terraform destroy` is running, an error with the following message might occur: `Error reading Container Cluster "my-cluster": Cluster "my-cluster" has status "RECONCILING" with message""`. This happens when GCP is upgrading the kubernetes master node, which it does automatically at times. While this is happening, it is not possible to delete the cluster. When it is done, run `terraform destroy` again. + + +## More information + +Please view our [operation guide](./operation-guide.md). diff --git a/deploy/gcp/main.tf b/deploy/gcp/main.tf index 46d72916b91..6c7677e08c8 100644 --- a/deploy/gcp/main.tf +++ b/deploy/gcp/main.tf @@ -120,8 +120,7 @@ resource "google_container_node_pool" "pd_pool" { node_config { machine_type = var.pd_instance_type - image_type = "UBUNTU" - local_ssd_count = 1 + local_ssd_count = 0 taint { effect = "NO_SCHEDULE" @@ -150,6 +149,8 @@ resource "google_container_node_pool" "tikv_pool" { node_config { machine_type = var.tikv_instance_type image_type = "UBUNTU" + // This value cannot be changed (instead a new node pool is needed) + // 1 SSD is 375 GiB local_ssd_count = 1 taint { @@ -316,9 +317,8 @@ resource "null_resource" "setup-env" { kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user $(gcloud config get-value account) kubectl create serviceaccount --namespace kube-system tiller kubectl apply -f manifests/crd.yaml -kubectl apply -f manifests/startup-script.yaml -kubectl apply -f manifests/local-volume-provisioner.yaml -kubectl apply -f manifests/gke-storage.yml +kubectl apply -k manifests/local-ssd +kubectl apply -f manifests/gke/persistent-disk.yaml kubectl apply -f manifests/tiller-rbac.yaml helm init --service-account tiller --upgrade --wait until helm ls; do diff --git a/deploy/gcp/manifests/gke b/deploy/gcp/manifests/gke new file mode 120000 index 00000000000..7833ab5c911 --- /dev/null +++ b/deploy/gcp/manifests/gke @@ -0,0 +1 @@ +../../../manifests/gke \ No newline at end of file diff --git a/deploy/gcp/manifests/gke-storage.yml b/deploy/gcp/manifests/gke-storage.yml deleted file mode 120000 index 4017696a5f2..00000000000 --- a/deploy/gcp/manifests/gke-storage.yml +++ /dev/null @@ -1 +0,0 @@ -../../../manifests/gke-storage.yml \ No newline at end of file diff --git a/deploy/gcp/manifests/local-ssd/kustomization.yaml b/deploy/gcp/manifests/local-ssd/kustomization.yaml new file mode 100644 index 00000000000..069a7957885 --- /dev/null +++ b/deploy/gcp/manifests/local-ssd/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: + - ../../../../manifests/gke/local-ssd-provision + +patches: + - overlays/terraform-local-ssd-provision.yaml diff --git a/deploy/gcp/manifests/local-ssd/overlays/terraform-local-ssd-provision.yaml b/deploy/gcp/manifests/local-ssd/overlays/terraform-local-ssd-provision.yaml new file mode 100644 index 00000000000..2fc36396703 --- /dev/null +++ b/deploy/gcp/manifests/local-ssd/overlays/terraform-local-ssd-provision.yaml @@ -0,0 +1,12 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: local-volume-provisioner +spec: + template: + spec: + tolerations: + - operator: Exists + effect: "NoSchedule" + - operator: Exists + effect: "NoSchedule" diff --git a/deploy/gcp/manifests/local-volume-provisioner.yaml b/deploy/gcp/manifests/local-volume-provisioner.yaml deleted file mode 100644 index 9a8bec07d69..00000000000 --- a/deploy/gcp/manifests/local-volume-provisioner.yaml +++ /dev/null @@ -1,135 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: "local-storage" -provisioner: "kubernetes.io/no-provisioner" -volumeBindingMode: "WaitForFirstConsumer" - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: local-provisioner-config - namespace: kube-system -data: - storageClassMap: | - local-storage: - hostDir: /mnt/disks - mountDir: /mnt/disks - ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: local-volume-provisioner - namespace: kube-system - labels: - app: local-volume-provisioner -spec: - selector: - matchLabels: - app: local-volume-provisioner - template: - metadata: - labels: - app: local-volume-provisioner - spec: - tolerations: - - key: dedicated - operator: Equal - value: pd - effect: "NoSchedule" - - key: dedicated - operator: Equal - value: tikv - effect: "NoSchedule" - serviceAccountName: local-storage-admin - containers: - - image: "quay.io/external_storage/local-volume-provisioner:v2.2.0" - name: provisioner - securityContext: - privileged: true - env: - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: MY_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: JOB_CONTAINER_IMAGE - value: "quay.io/external_storage/local-volume-provisioner:v2.2.0" - resources: - requests: - cpu: 100m - memory: 100m - limits: - cpu: 100m - memory: 100m - volumeMounts: - - mountPath: /etc/provisioner/config - name: provisioner-config - readOnly: true - # mounting /dev in DinD environment would fail - # - mountPath: /dev - # name: provisioner-dev - - mountPath: /mnt/disks - name: local-disks - mountPropagation: "HostToContainer" - volumes: - - name: provisioner-config - configMap: - name: local-provisioner-config - # - name: provisioner-dev - # hostPath: - # path: /dev - - name: local-disks - hostPath: - path: /mnt/disks - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: local-storage-admin - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: local-storage-provisioner-pv-binding - namespace: kube-system -subjects: -- kind: ServiceAccount - name: local-storage-admin - namespace: kube-system -roleRef: - kind: ClusterRole - name: system:persistent-volume-provisioner - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: local-storage-provisioner-node-clusterrole - namespace: kube-system -rules: -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: local-storage-provisioner-node-binding - namespace: kube-system -subjects: -- kind: ServiceAccount - name: local-storage-admin - namespace: kube-system -roleRef: - kind: ClusterRole - name: local-storage-provisioner-node-clusterrole - apiGroup: rbac.authorization.k8s.io diff --git a/deploy/gcp/manifests/startup-script.yaml b/deploy/gcp/manifests/startup-script.yaml deleted file mode 100644 index 0aece775bce..00000000000 --- a/deploy/gcp/manifests/startup-script.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: local-ssd-startup - namespace: kube-system - labels: - app: local-ssd-startup -spec: - template: - metadata: - labels: - app: local-ssd-startup - spec: - hostPID: true - nodeSelector: - cloud.google.com/gke-os-distribution: ubuntu - containers: - - name: local-ssd-startup - image: gcr.io/google-containers/startup-script:v1 - securityContext: - privileged: true - env: - - name: STARTUP_SCRIPT - value: | - #!/usr/bin/env bash - set -euo pipefail - apt-get update - apt-get install -y software-properties-common - apt-add-repository universe - apt-get update - declare -a PKG_LIST=(python-google-compute-engine \ - python3-google-compute-engine \ - google-compute-engine-oslogin \ - gce-compute-image-packages) - for pkg in ${PKG_LIST[@]}; do - apt-get install -y $pkg || echo "Not available: $pkg" - done - mount | grep -v nobarrier | awk '/ssd/{print $1}' | xargs -i mount {} -o remount,nobarrier - cat < /etc/security/limits.d/99-tidb.conf - root soft nofile 1000000 - root hard nofile 1000000 - root soft core unlimited - root soft stack 10240 - EOF - volumeMounts: - - mountPath: /mnt/disks - name: local-ssd - mountPropagation: Bidirectional - tolerations: - - effect: NoSchedule - operator: Exists - volumes: - - name: local-ssd - hostPath: - path: /mnt/disks \ No newline at end of file diff --git a/deploy/gcp/templates/tidb-cluster-values.yaml.tpl b/deploy/gcp/templates/tidb-cluster-values.yaml.tpl index 496c786f071..4229d746991 100644 --- a/deploy/gcp/templates/tidb-cluster-values.yaml.tpl +++ b/deploy/gcp/templates/tidb-cluster-values.yaml.tpl @@ -48,7 +48,7 @@ pd: # different classes might map to quality-of-service levels, or to backup policies, # or to arbitrary policies determined by the cluster administrators. # refer to https://kubernetes.io/docs/concepts/storage/storage-classes - storageClassName: local-storage + storageClassName: pd-ssd # Image pull policy. imagePullPolicy: IfNotPresent diff --git a/docs/google-kubernetes-tutorial.md b/docs/google-kubernetes-tutorial.md index 9024c5261fb..dee53f88c73 100644 --- a/docs/google-kubernetes-tutorial.md +++ b/docs/google-kubernetes-tutorial.md @@ -89,7 +89,7 @@ When you see `Running`, it's time to hit Ctrl+C and procee The first TiDB component we are going to install is the TiDB Operator, using a Helm Chart. TiDB Operator is the management system that works with Kubernetes to bootstrap your TiDB cluster and keep it running. This step assumes you are in the `tidb-operator` working directory: kubectl apply -f ./manifests/crd.yaml && - kubectl apply -f ./manifests/gke-storage.yml && + kubectl apply -f ./manifests/gke/persistent-disk.yml && helm install ./charts/tidb-operator -n tidb-admin --namespace=tidb-admin We can watch the operator come up with: @@ -177,3 +177,9 @@ The above commands only delete the running pods, the data is persistent. If you Once you have finished experimenting, you can delete the Kubernetes cluster with: gcloud container clusters delete tidb + + +## More information + +For production deployments, view our [operation guide](./operation-guide.md), and look at the GKE section. +We also have a simple [terraform based deployment](./deploy/gcp/README.md). diff --git a/docs/operation-guide.md b/docs/operation-guide.md index 17240910069..35f70ae9b33 100644 --- a/docs/operation-guide.md +++ b/docs/operation-guide.md @@ -66,6 +66,23 @@ TiDB Operator uses `values.yaml` as TiDB cluster configuration file. It provides For other settings, the variables in `values.yaml` are self-explanatory with comments. You can modify them according to your need before installing the charts. + +## GKE + +On GKE, local SSD volumes by default are limited to 375 GiB size and perform worse than persistent disk. + +For proper performance, you must: + +* install the Linux guest environment, which can only be done on the Ubuntu image, not the COS image +* make sure SSD is mounted with the `nobarrier` option. + +We have a [daemonset which does the above performance fixes](../manifests/gke/local-ssd-optimize.yaml). +We also have a [daemonset that fixes performance and combines all SSD disks together with lvm](../manifests/gke/local-ssd-provision.yaml). +The terraform deployment will automatically install that. + +> **Note**: This setup that combines local SSD assumes you are running only one process that needs local SSD per VM. + + ## Deploy TiDB cluster After TiDB Operator and Helm are deployed correctly and configuration completed, TiDB cluster can be deployed using following command: diff --git a/manifests/gke/local-ssd-optimize.yaml b/manifests/gke/local-ssd-optimize.yaml new file mode 100644 index 00000000000..edbdfa3da1e --- /dev/null +++ b/manifests/gke/local-ssd-optimize.yaml @@ -0,0 +1,57 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: local-ssd-startup + namespace: kube-system + labels: + app: local-ssd-startup +spec: + template: + metadata: + labels: + app: local-ssd-startup + spec: + hostPID: true + nodeSelector: + cloud.google.com/gke-os-distribution: ubuntu + cloud.google.com/gke-local-ssd: "true" + containers: + - name: local-ssd-startup + image: gcr.io/google-containers/startup-script:v1 + securityContext: + privileged: true + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 100m + memory: 100Mi + env: + - name: STARTUP_SCRIPT + value: | + #!/usr/bin/env bash + set -euo pipefail + apt-get update + apt-get install -y software-properties-common + apt-add-repository universe + apt-get update + declare -a PKG_LIST=(python-google-compute-engine \ + python3-google-compute-engine \ + google-compute-engine-oslogin \ + gce-compute-image-packages) + for pkg in ${PKG_LIST[@]}; do + apt-get install -y $pkg || echo "Not available: $pkg" + done + mount | grep -v nobarrier | awk '/ssd/{print $1}' | xargs -i mount {} -o remount,nobarrier + volumeMounts: + - mountPath: /mnt/disks + name: local-ssd + mountPropagation: Bidirectional + tolerations: + - effect: NoSchedule + operator: Exists + volumes: + - name: local-ssd + hostPath: + path: /mnt/disks diff --git a/manifests/gke-storage.yml b/manifests/gke/persistent-disk.yaml similarity index 77% rename from manifests/gke-storage.yml rename to manifests/gke/persistent-disk.yaml index 720a51e387b..81739db0b66 100644 --- a/manifests/gke-storage.yml +++ b/manifests/gke/persistent-disk.yaml @@ -3,5 +3,6 @@ kind: StorageClass metadata: name: "pd-ssd" provisioner: kubernetes.io/gce-pd +volumeBindingMode: WaitForFirstConsumer parameters: type: pd-ssd From 54aa2415bd56e711f8148604479afcda5c627084 Mon Sep 17 00:00:00 2001 From: Greg Weber Date: Thu, 13 Jun 2019 21:18:18 -0700 Subject: [PATCH 2/3] add local-ssd-provision manifest --- .../local-ssd-provision/kustomization.yaml | 5 + .../local-ssd-provision.yaml | 223 ++++++++++++++++++ 2 files changed, 228 insertions(+) create mode 100644 manifests/gke/local-ssd-provision/kustomization.yaml create mode 100644 manifests/gke/local-ssd-provision/local-ssd-provision.yaml diff --git a/manifests/gke/local-ssd-provision/kustomization.yaml b/manifests/gke/local-ssd-provision/kustomization.yaml new file mode 100644 index 00000000000..e511964e5d6 --- /dev/null +++ b/manifests/gke/local-ssd-provision/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - local-ssd-provision.yaml diff --git a/manifests/gke/local-ssd-provision/local-ssd-provision.yaml b/manifests/gke/local-ssd-provision/local-ssd-provision.yaml new file mode 100644 index 00000000000..1712131d7a9 --- /dev/null +++ b/manifests/gke/local-ssd-provision/local-ssd-provision.yaml @@ -0,0 +1,223 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "local-storage" +provisioner: "kubernetes.io/no-provisioner" +volumeBindingMode: "WaitForFirstConsumer" + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-provisioner-config + namespace: kube-system +data: + storageClassMap: | + local-storage: + hostDir: /mnt/disks + mountDir: /mnt/disks + +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: local-volume-provisioner + namespace: kube-system + labels: + app: local-volume-provisioner +spec: + selector: + matchLabels: + app: local-volume-provisioner + template: + metadata: + labels: + app: local-volume-provisioner + spec: + hostPID: true + nodeSelector: + cloud.google.com/gke-os-distribution: ubuntu + cloud.google.com/gke-local-ssd: "true" + serviceAccountName: local-storage-admin + initContainers: + - name: local-ssd-startup + image: alpine + command: ['/bin/sh', '-c', 'nsenter -t 1 -m -u -i -n -p -- bash -c "${STARTUP_SCRIPT}"'] + securityContext: + privileged: true + volumeMounts: + - mountPath: /mnt/disks + name: local-disks + mountPropagation: Bidirectional + env: + - name: STARTUP_SCRIPT + value: | + #!/usr/bin/env bash + set -euo pipefail + set -x + + # Install the linux guest environment tools + cat /etc/apt/sources.list + apt-get update + apt-get install -y software-properties-common || echo "could not install software-properties-common" + apt-add-repository universe + apt-get update + declare -a PKG_LIST=(python-google-compute-engine \ + python3-google-compute-engine \ + google-compute-engine-oslogin \ + gce-compute-image-packages) + for pkg in ${PKG_LIST[@]}; do + apt-get install -y $pkg || echo "Not available: $pkg" + done + + apt-get install -y lvm2 + apt-get -y autoremove + + set -x + if ! findmnt -n -a -l | grep /mnt/disks/ssd ; then + if test -f /etc/ssd_mounts ; then + ssd_mounts=$(cat /etc/ssd_mounts) + else + echo "no ssds mounted yet" + exit 1 + fi + else + ssd_mounts=$(findmnt -n -a -l --nofsroot | grep /mnt/disks/ssd) + echo "$ssd_mounts" > /etc/ssd_mounts + fi + + # Re-mount all disks as a single logical volume + for ssd in $(findmnt -n -a -l --nofsroot | grep /mnt/disks/ssd | awk '{print $1}') ; do + umount "$ssd" + done + for ssd in $(echo "$ssd_mounts" | awk '{print $1}') ; do + if test -d "$ssd"; then + rm -r "$ssd" + fi + done + + if ! pvs | grep volume_all_ssds ; then + for dev in $(echo "$ssd_mounts" | awk '{print $2}') ; do + wipefs --all "$dev" + done + echo "$ssd_mounts" | awk '{print $2}' | xargs /sbin/pvcreate + fi + pvdisplay + pvs + if ! vgs | grep volume_all_ssds ; then + echo "$ssd_mounts" | awk '{print $2}' | xargs /sbin/vgcreate volume_all_ssds + fi + vgdisplay + vgs + if ! lvs | grep logical_all_ssds ; then + lvcreate -l 100%FREE -n logical_all_ssds volume_all_ssds + fi + lvdisplay + lvs + + if ! uuid=$(blkid -s UUID -o value /dev/volume_all_ssds/logical_all_ssds) ; then + mkfs.ext4 /dev/volume_all_ssds/logical_all_ssds + uuid=$(blkid -s UUID -o value /dev/volume_all_ssds/logical_all_ssds) + fi + + mnt_dir="/mnt/disks/$uuid" + mkdir -p "$mnt_dir" + + if ! grep "$uuid" /etc/fstab ; then + new_fstab=$(grep -v /mnt/disks/ssd /etc/fstab) + echo "$new_fstab" > /etc/fstab + echo "UUID=$uuid $mnt_dir ext4 rw,relatime,discard,nobarrier,data=ordered" >> /etc/fstab + fi + mount -a + containers: + - image: "quay.io/external_storage/local-volume-provisioner:v2.2.0" + name: provisioner + securityContext: + privileged: true + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 100m + memory: 100Mi + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: JOB_CONTAINER_IMAGE + value: "quay.io/external_storage/local-volume-provisioner:v2.2.0" + volumeMounts: + - mountPath: /etc/provisioner/config + name: provisioner-config + readOnly: true + # mounting /dev in DinD environment would fail + # - mountPath: /dev + # name: provisioner-dev + - mountPath: /mnt/disks + name: local-disks + mountPropagation: "HostToContainer" + tolerations: + - effect: NoSchedule + operator: Exists + volumes: + - name: provisioner-config + configMap: + name: local-provisioner-config + # - name: provisioner-dev + # hostPath: + # path: /dev + - name: local-disks + hostPath: + path: /mnt/disks + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-storage-admin + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-storage-provisioner-pv-binding + namespace: kube-system +subjects: +- kind: ServiceAccount + name: local-storage-admin + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:persistent-volume-provisioner + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-storage-provisioner-node-clusterrole + namespace: kube-system +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-storage-provisioner-node-binding + namespace: kube-system +subjects: +- kind: ServiceAccount + name: local-storage-admin + namespace: kube-system +roleRef: + kind: ClusterRole + name: local-storage-provisioner-node-clusterrole + apiGroup: rbac.authorization.k8s.io From be86d73b377c338a325464dce3431b4bbac25f7f Mon Sep 17 00:00:00 2001 From: Greg Weber Date: Thu, 13 Jun 2019 22:40:07 -0700 Subject: [PATCH 3/3] fix documentation links --- deploy/gcp/README.md | 2 +- docs/google-kubernetes-tutorial.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/gcp/README.md b/deploy/gcp/README.md index cb20e57856f..2ff61c067c3 100644 --- a/deploy/gcp/README.md +++ b/deploy/gcp/README.md @@ -240,4 +240,4 @@ You have to manually delete disks in the Google Cloud Console, or with `gcloud` ## More information -Please view our [operation guide](./operation-guide.md). +Please view our [operation guide](../../docs/operation-guide.md). diff --git a/docs/google-kubernetes-tutorial.md b/docs/google-kubernetes-tutorial.md index dee53f88c73..1d35a20521b 100644 --- a/docs/google-kubernetes-tutorial.md +++ b/docs/google-kubernetes-tutorial.md @@ -182,4 +182,4 @@ Once you have finished experimenting, you can delete the Kubernetes cluster with ## More information For production deployments, view our [operation guide](./operation-guide.md), and look at the GKE section. -We also have a simple [terraform based deployment](./deploy/gcp/README.md). +We also have a simple [terraform based deployment](../deploy/gcp/README.md).