diff --git a/_redirects b/_redirects index 2344c6744c9b3..4234771f21367 100644 --- a/_redirects +++ b/_redirects @@ -4,453 +4,359 @@ # test at https://play.netlify.com/redirects # ############################################### -/docs/admin/addons /docs/concepts/cluster-administration/addons 301 -/docs/admin/apparmor /docs/tutorials/clusters/apparmor 301 -/docs/admin/audit /docs/tasks/debug-application-cluster/audit 301 -/docs/admin/cluster-components /docs/concepts/overview/components 301 -/docs/admin/cluster-management /docs/tasks/administer-cluster/cluster-management 301 -/docs/admin/cluster-troubleshooting /docs/tasks/debug-application-cluster/debug-cluster 301 -/docs/admin/daemons /docs/concepts/workloads/controllers/daemonset 301 -/docs/admin/disruptions /docs/concepts/workloads/pods/disruptions 301 -/docs/admin/dns /docs/concepts/services-networking/dns-pod-service 301 -/docs/admin/etcd /docs/tasks/administer-cluster/configure-upgrade-etcd 301 -/docs/admin/etcd_upgrade /docs/tasks/administer-cluster/configure-upgrade-etcd 301 -/docs/admin/federation/kubefed /docs/tasks/federation/set-up-cluster-federation-kubefed 301 -/docs/admin/garbage-collection /docs/concepts/cluster-administration/kubelet-garbage-collection 301 -/docs/admin/ha-master-gce /docs/tasks/administer-cluster/highly-available-master 301 -/docs/admin/ /docs/concepts/cluster-administration/cluster-administration-overview 301 -/docs/admin/kubeadm-upgrade-1-7 /docs/tasks/administer-cluster/kubeadm-upgrade-1-7 301 -/docs/admin/limitrange/ /docs/tasks/administer-cluster/cpu-memory-limit 301 -/docs/admin/master-node-communication /docs/concepts/architecture/master-node-communication 301 -/docs/admin/multi-cluster /docs/concepts/cluster-administration/federation 301 -/docs/admin/multiple-schedulers /docs/tasks/administer-cluster/configure-multiple-schedulers 301 -/docs/admin/namespaces /docs/tasks/administer-cluster/namespaces 301 -/docs/admin/namespaces/walkthrough /docs/tasks/administer-cluster/namespaces-walkthrough 301 -/docs/admin/network-plugins /docs/concepts/cluster-administration/network-plugins 301 -/docs/admin/networking /docs/concepts/cluster-administration/networking 301 -/docs/admin/node /docs/concepts/architecture/nodes 301 -/docs/admin/node-allocatable /docs/tasks/administer-cluster/reserve-compute-resources 301 -/docs/admin/node-problem /docs/tasks/debug-application-cluster/monitor-node-health 301 -/docs/admin/out-of-resource /docs/tasks/administer-cluster/out-of-resource 301 -/docs/admin/rescheduler /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods 301 -/docs/admin/resourcequota/limitstorageconsumption /docs/tasks/administer-cluster/limit-storage-consumption 301 -/docs/admin/resourcequota/walkthrough /docs/tasks/administer-cluster/quota-api-object 301 -/docs/admin/static-pods /docs/tasks/administer-cluster/static-pod 301 -/docs/admin/sysctls /docs/concepts/cluster-administration/sysctl-cluster 301 -/docs/admin/upgrade-1-6 /docs/tasks/administer-cluster/upgrade-1-6 301 - -/docs/api /docs/concepts/overview/kubernetes-api 301 - -/docs/concepts/abstractions/controllers/garbage-collection /docs/concepts/workloads/controllers/garbage-collection 301 -/docs/concepts/abstractions/controllers/petsets /docs/concepts/workloads/controllers/statefulset 301 -/docs/concepts/abstractions/controllers/statefulsets /docs/concepts/workloads/controllers/statefulset 301 -/docs/concepts/abstractions/init-containers /docs/concepts/workloads/pods/init-containers 301 -/docs/concepts/abstractions/overview /docs/concepts/overview/working-with-objects/kubernetes-objects 301 -/docs/concepts/abstractions/pod /docs/concepts/workloads/pods/pod-overview 301 - -/docs/concepts/cluster-administration/access-cluster /docs/tasks/access-application-cluster/access-cluster 301 -/docs/concepts/cluster-administration/audit /docs/tasks/debug-application-cluster/audit 301 -/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig /docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig 301 -/docs/concepts/cluster-administration/cluster-management /docs/tasks/administer-cluster/cluster-management 301 -/docs/concepts/cluster-administration/configure-etcd /docs/tasks/administer-cluster/configure-upgrade-etcd 301 -/docs/concepts/cluster-administration/etcd-upgrade /docs/tasks/administer-cluster/configure-upgrade-etcd 301 -/docs/concepts/cluster-administration/federation-service-discovery /docs/tasks/federation/federation-service-discovery 301 -/docs/concepts/cluster-administration/guaranteed-scheduling-critical-addon-pods /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods 301 -/docs/concepts/cluster-administration/master-node-communication /docs/concepts/architecture/master-node-communication 301 -/docs/concepts/cluster-administration/multiple-clusters /docs/concepts/cluster-administration/federation 301 -/docs/concepts/cluster-administration/out-of-resource /docs/tasks/administer-cluster/out-of-resource 301 -/docs/concepts/cluster-administration/resource-usage-monitoring /docs/tasks/debug-application-cluster/resource-usage-monitoring 301 -/docs/concepts/cluster-administration/static-pod /docs/tasks/administer-cluster/static-pod 301 - -/docs/concepts/clusters/logging /docs/concepts/cluster-administration/logging 301 -/docs/concepts/configuration/container-command-arg /docs/tasks/inject-data-application/define-command-argument-container/docs/concepts/ecosystem/thirdpartyresource 301 /docs/tasks/access-kubernetes-api/extend-api-third-party-resource -/docs/concepts/jobs/cron-jobs /docs/concepts/workloads/controllers/cron-jobs 301 -/docs/concepts/jobs/run-to-completion-finite-workloads /docs/concepts/workloads/controllers/jobs-run-to-completion 301 -/docs/concepts/nodes/node /docs/concepts/architecture/nodes 301 -/docs/concepts/storage/etcd-store-api-object /docs/tasks/administer-cluster/configure-upgrade-etcd 301 - -/docs/concepts/tools/kubectl/object-management-overview /docs/tutorials/object-management-kubectl/object-management 301 -/docs/concepts/tools/kubectl/object-management-using-declarative-config /docs/tutorials/object-management-kubectl/declarative-object-management-configuration 301 -/docs/concepts/tools/kubectl/object-management-using-imperative-commands /docs/tutorials/object-management-kubectl/imperative-object-management-command 301 -/docs/concepts/tools/kubectl/object-management-using-imperative-config /docs/tutorials/object-management-kubectl/imperative-object-management-configuration 301 - -/docs/getting-started-guides /docs/setup/pick-right-solution 301 -/docs/getting-started-guides/kubeadm /docs/setup/independent/create-cluster-kubeadm 301 -/docs/getting-started-guides/network-policy/calico /docs/tasks/administer-cluster/calico-network-policy 301 -/docs/getting-started-guides/network-policy/romana /docs/tasks/administer-cluster/romana-network-policy 301 -/docs/getting-started-guides/network-policy/walkthrough /docs/tasks/administer-cluster/declare-network-policy 301 -/docs/getting-started-guides/network-policy/weave /docs/tasks/administer-cluster/weave-network-policy 301 -/docs/getting-started-guides/running-cloud-controller /docs/tasks/administer-cluster/running-cloud-controller 301 -/docs/getting-started-guides/ubuntu/calico /docs/getting-started-guides/ubuntu/ 301 - -/docs/hellonode /docs/tutorials/stateless-application/hello-minikube 301 -/docs/ /docs/home/ 301 -/docs/samples /docs/tutorials/ 301 - -/docs/tasks/administer-cluster/apply-resource-quota-limit /docs/tasks/administer-cluster/quota-api-object 301 -/docs/tasks/administer-cluster/assign-pods-nodes /docs/tasks/configure-pod-container/assign-pods-nodes 301 -/docs/tasks/administer-cluster/overview /docs/concepts/cluster-administration/cluster-administration-overview 301 -/docs/tasks/administer-cluster/cpu-memory-limit /docs/tasks/administer-cluster/memory-default-namespace 301 -/docs/tasks/administer-cluster/share-configuration /docs/tasks/access-application-cluster/configure-access-multiple-clusters 301 - -/docs/tasks/configure-pod-container/apply-resource-quota-limit /docs/tasks/administer-cluster/apply-resource-quota-limit 301 -/docs/tasks/configure-pod-container/calico-network-policy /docs/tasks/administer-cluster/calico-network-policy 301 -/docs/tasks/configure-pod-container/communicate-containers-same-pod /docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume 301 -/docs/tasks/configure-pod-container/declare-network-policy /docs/tasks/administer-cluster/declare-network-policy 301 -/docs/tasks/configure-pod-container/define-environment-variable-container /docs/tasks/inject-data-application/define-environment-variable-container 301 -/docs/tasks/configure-pod-container/distribute-credentials-secure /docs/tasks/inject-data-application/distribute-credentials-secure 301 -/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 -/docs/tasks/configure-pod-container/environment-variable-expose-pod-information /docs/tasks/inject-data-application/environment-variable-expose-pod-information 301 -/docs/tasks/configure-pod-container/limit-range /docs/tasks/administer-cluster/cpu-memory-limit 301 -/docs/tasks/configure-pod-container/romana-network-policy /docs/tasks/administer-cluster/romana-network-policy 301 -/docs/tasks/configure-pod-container/weave-network-policy /docs/tasks/administer-cluster/weave-network-policy 301 -/docs/tasks/configure-pod-container/assign-cpu-ram-container /docs/tasks/configure-pod-container/assign-memory-resource 301 - -/docs/tasks/kubectl/get-shell-running-container /docs/tasks/debug-application-cluster/get-shell-running-container 301 -/docs/tasks/kubectl/install /docs/tasks/tools/install-kubectl 301 -/docs/tasks/kubectl/list-all-running-container-images /docs/tasks/access-application-cluster/list-all-running-container-images 301 - -/docs/tasks/manage-stateful-set/debugging-a-statefulset /docs/tasks/debug-application-cluster/debug-stateful-set 301 -/docs/tasks/manage-stateful-set/delete-pods /docs/tasks/run-application/force-delete-stateful-set-pod 301 -/docs/tasks/manage-stateful-set/deleting-a-statefulset /docs/tasks/run-application/delete-stateful-set 301 -/docs/tasks/manage-stateful-set/scale-stateful-set /docs/tasks/run-application/scale-stateful-set 301 -/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set /docs/tasks/run-application/upgrade-pet-set-to-stateful-set 301 - -/docs/tasks/run-application/podpreset /docs/tasks/inject-data-application/podpreset 301 -/docs/tasks/troubleshoot/debug-init-containers /docs/tasks/debug-application-cluster/debug-init-containers 301 -/docs/tasks/web-ui-dashboard /docs/tasks/access-application-cluster/web-ui-dashboard 301 -/docs/templatedemos /docs/home/contribute/page-templates 301 -/docs/tools/kompose /docs/tools/kompose/user-guide 301 - -/docs/tutorials/clusters/multiple-schedulers /docs/tasks/administer-cluster/configure-multiple-schedulers 301 -/docs/tutorials/connecting-apps/connecting-frontend-backend /docs/tasks/access-application-cluster/connecting-frontend-backend 301 -/docs/tutorials/federation/set-up-cluster-federation-kubefed /docs/tasks/federation/set-up-cluster-federation-kubefed 301 -/docs/tutorials/federation/set-up-coredns-provider-federation /docs/tasks/federation/set-up-coredns-provider-federation 301 -/docs/tutorials/federation/set-up-placement-policies-federation /docs/tasks/federation/set-up-placement-policies-federation 301 -/docs/tutorials/getting-started/create-cluster /docs/tutorials/kubernetes-basics/cluster-intro 301 -/docs/tutorials/stateful-application/run-replicated-stateful-application /docs/tasks/run-application/run-replicated-stateful-application 301 -/docs/tutorials/stateful-application/run-stateful-application /docs/tasks/run-application/run-single-instance-stateful-application 301 -/docs/tutorials/stateless-application/expose-external-ip-address-service /docs/tasks/access-application-cluster/service-access-application-cluster 301 -/docs/tutorials/stateless-application/run-stateless-ap-replication-controller /docs/tasks/run-application/run-stateless-application-deployment 301 -/docs/tutorials/stateless-application/run-stateless-application-deployment /docs/tasks/run-application/run-stateless-application-deployment 301 - -/docs/user-guide/accessing-the-cluster /docs/tasks/access-application-cluster/access-cluster 301 -/docs/user-guide/add-entries-to-pod-etc-hosts-with-host-aliases /docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases 301 -/docs/user-guide/annotations /docs/concepts/overview/working-with-objects/annotations 301 -/docs/user-guide/application-troubleshooting /docs/tasks/debug-application-cluster/debug-application 301 -/docs/user-guide/compute-resources /docs/concepts/configuration/manage-compute-resources-container 301 -/docs/user-guide/config-best-practices /docs/concepts/configuration/overview 301 -/docs/user-guide/configmap /docs/tasks/configure-pod-container/configmap 301 -/docs/user-guide/configuring-containers /docs/tasks/ 301 -/docs/user-guide/connecting-applications /docs/concepts/services-networking/connect-applications-service 301 -/docs/user-guide/connecting-to-applications-port-forward /docs/tasks/access-application-cluster/port-forward-access-application-cluster 301 -/docs/user-guide/connecting-to-applications-proxy /docs/tasks/access-kubernetes-api/http-proxy-access-api 301 -/docs/user-guide/container-environment /docs/concepts/containers/container-lifecycle-hooks 301 -/docs/user-guide/cron-jobs /docs/concepts/workloads/controllers/cron-jobs 301 - -/docs/user-guide/debugging-pods-and-replication-controllers/ /docs/tasks/debug-application-cluster/debug-pod-replication-controller/ 301 - -/docs/user-guide/debugging-services /docs/tasks/debug-application-cluster/debug-service 301 -/docs/user-guide/deploying-applications /docs/tasks/run-application/run-stateless-application-deployment 301 -/docs/user-guide/deployments /docs/concepts/workloads/controllers/deployment 301 -/docs/user-guide/downward-api /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 -/docs/user-guide/downward-api/volume /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 -/docs/user-guide/environment-guide /docs/tasks/inject-data-application/environment-variable-expose-pod-information 301 -/docs/user-guide/federation/cluster /docs/tasks/administer-federation/cluster 301 -/docs/user-guide/federation/configmap /docs/tasks/administer-federation/configmap 301 -/docs/user-guide/federation/daemonsets /docs/tasks/administer-federation/daemonset 301 -/docs/user-guide/federation/deployment /docs/tasks/administer-federation/deployment 301 -/docs/user-guide/federation/events /docs/tasks/administer-federation/events 301 -/docs/user-guide/federation/federated-ingress /docs/tasks/administer-federation/ingress 301 -/docs/user-guide/federation/federated-services /docs/tasks/federation/federation-service-discovery 301 -/docs/user-guide/federation /docs/concepts/cluster-administration/federation 301 -/docs/user-guide/federation/namespaces /docs/tasks/administer-federation/namespaces 301 -/docs/user-guide/federation/replicasets /docs/tasks/administer-federation/replicaset 301 -/docs/user-guide/federation/secrets /docs/tasks/administer-federation/secret 301 -/docs/user-guide/garbage-collection /docs/concepts/workloads/controllers/garbage-collection 301 -/docs/user-guide/getting-into-containers /docs/tasks/debug-application-cluster/get-shell-running-container 301 -/docs/user-guide/gpus /docs/tasks/manage-gpus/scheduling-gpus 301 -/docs/user-guide/horizontal-pod-autoscaling /docs/tasks/run-application/horizontal-pod-autoscale 301 -/docs/user-guide/horizontal-pod-autoscaling/walkthrough /docs/tasks/run-application/horizontal-pod-autoscale-walkthrough 301 -/docs/user-guide/identifiers /docs/concepts/overview/working-with-objects/names 301 -/docs/user-guide/images /docs/concepts/containers/images 301 -/docs/user-guide /docs/home/ 301 -/docs/user-guide/ingress /docs/concepts/services-networking/ingress 301 -/docs/user-guide/introspection-and-debugging /docs/tasks/debug-application-cluster/debug-application-introspection 301 -/docs/user-guide/jobs /docs/concepts/workloads/controllers/jobs-run-to-completion 301 -/docs/user-guide/jobs/expansions /docs/tasks/job/parallel-processing-expansion 301 -/docs/user-guide/jobs/work-queue-1 /docs/tasks/job/coarse-parallel-processing-work-queue/ 301 -/docs/user-guide/jobs/work-queue-2 /docs/tasks/job/fine-parallel-processing-work-queue/ 301 -/docs/user-guide/kubeconfig-file /docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig 301 -/docs/user-guide/labels /docs/concepts/overview/working-with-objects/labels 301 -/docs/user-guide/liveness /docs/tasks/configure-pod-container/configure-liveness-readiness-probes 301 -/docs/user-guide/load-balancer /docs/tasks/access-application-cluster/create-external-load-balancer 301 -/docs/user-guide/logging/elasticsearch /docs/tasks/debug-application-cluster/logging-elasticsearch-kibana 301 -/docs/user-guide/logging/overview /docs/concepts/cluster-administration/logging 301 -/docs/user-guide/logging/stackdriver /docs/tasks/debug-application-cluster/logging-stackdriver 301 -/docs/user-guide/managing-deployments /docs/concepts/cluster-administration/manage-deployment 301 -/docs/user-guide/monitoring /docs/tasks/debug-application-cluster/resource-usage-monitoring 301 -/docs/user-guide/namespaces /docs/concepts/overview/working-with-objects/namespaces 301 -/docs/user-guide/networkpolicies /docs/concepts/services-networking/network-policies 301 -/docs/user-guide/node-selection /docs/concepts/configuration/assign-pod-node 301 -/docs/user-guide/persistent-volumes /docs/concepts/storage/persistent-volumes 301 -/docs/user-guide/persistent-volumes/walkthrough /docs/tasks/configure-pod-container/configure-persistent-volume-storage 301 -/docs/user-guide/petset /docs/concepts/workloads/controllers/statefulset 301 -/docs/user-guide/petset/bootstrapping /docs/concepts/workloads/controllers/statefulset 301 -/docs/abstractions/controllers/petset /docs/concepts/workloads/controllers/statefulset 301 -/docs/concepts/workloads/controllers/petsets /docs/concepts/workloads/controllers/statefulset 301 -/docs/user-guide/pod-preset/ /docs/tasks/inject-data-application/podpreset 301 -/docs/user-guide/pod-security-policy/ /docs/concepts/policy/pod-security-policy 301 -/docs/user-guide/pod-states /docs/concepts/workloads/pods/pod-lifecycle 301 -/docs/user-guide/pod-templates /docs/concepts/workloads/pods/pod-overview 301 -/docs/user-guide/pods /docs/concepts/workloads/pods/pod 301 -/docs/user-guide/pods/init-container /docs/concepts/workloads/pods/init-containers 301 -/docs/user-guide/pods/multi-container /docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume 301 -/docs/user-guide/pods/single-container /docs/tasks/run-application/run-stateless-application-deployment 301 -/docs/user-guide/prereqs /docs/tasks/tools/install-kubectl 301 -/docs/user-guide/production-pods /docs/tasks/ 301 -/docs/user-guide/projected-volume /docs/tasks/configure-pod-container/configure-projected-volume-storage 301 -/docs/user-guide/quick-start /docs/tasks/access-application-cluster/service-access-application-cluster 301 -/docs/user-guide/replicasets /docs/concepts/workloads/controllers/replicaset 301 -/docs/user-guide/replication-controller /docs/concepts/workloads/controllers/replicationcontroller 301 -/docs/user-guide/rolling-updates /docs/tasks/run-application/rolling-update-replication-controller 301 -/docs/user-guide/secrets /docs/concepts/configuration/secret 301 -/docs/user-guide/secrets/walkthrough /docs/tasks/inject-data-application/distribute-credentials-secure 301 -/docs/user-guide/service-accounts /docs/tasks/configure-pod-container/configure-service-account 301 -/docs/user-guide/services-firewalls /docs/tasks/access-application-cluster/configure-cloud-provider-firewall 301 -/docs/user-guide/services /docs/concepts/services-networking/service 301 -/docs/user-guide/services/operations /docs/tasks/access-application-cluster/connecting-frontend-backend 301 -/docs/user-guide/sharing-clusters /docs/tasks/administer-cluster/share-configuration 301 -/docs/user-guide/simple-nginx /docs/tasks/run-application/run-stateless-application-deployment 301 -/docs/user-guide/thirdpartyresources /docs/tasks/access-kubernetes-api/extend-api-third-party-resource 301 -/docs/user-guide/ui /docs/tasks/access-application-cluster/web-ui-dashboard 301 -/docs/user-guide/update-demo /docs/tasks/run-application/rolling-update-replication-controller 301 -/docs/user-guide/volumes /docs/concepts/storage/volumes 301 -/docs/user-guide/working-with-resources /docs/tutorials/object-management-kubectl/object-management 301 - -/docs/whatisk8s /docs/concepts/overview/what-is-kubernetes 301 - - -############## -# address 404s -# -/concepts/containers/container-lifecycle-hooks /docs/concepts/containers/container-lifecycle-hooks 301 - -/docs/api-reference/apps/v1alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/apps/v1alpha1/definitions 301 -/docs/api-reference/apps/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/apps/v1beta1/operations 301 -/docs/api-reference/authorization.k8s.io/v1beta1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/authorization.k8s.io/v1beta1/definitions 301 -/docs/api-reference/authorization.k8s.io/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/authorization.k8s.io/v1beta1/operations 301 -/docs/api-reference/autoscaling/v1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/autoscaling/v1/operations 301 -/docs/api-reference/batch/v1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/batch/v1/operations 301 -/docs/api-reference/batch/v2alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/batch/v2alpha1/definitions 301 -/docs/api-reference/certificates.k8s.io/v1alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/certificates.k8s.io/v1alpha1/definitions 301 -/docs/api-reference/certificates/v1alpha1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/certificates/v1alpha1/operations 301 -/docs/api-reference/extensions/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/extensions/v1beta1/operations 301 -/docs/api-reference/policy/v1alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/policy/v1alpha1/definitions 301 -/docs/api-reference/policy/v1beta1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/policy/v1beta1/definitions 301 -/docs/api-reference/README https://v1-4.docs.kubernetes.io/docs/api-reference/README 301 -/docs/api-reference/storage.k8s.io/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/storage.k8s.io/v1beta1/operations 301 - -/docs/api-reference/v1/definitions /docs/api-reference/v1.7 301 - -/docs/concepts/cluster /docs/concepts/cluster-administration/cluster-administration-overview/ 301 -/docs/concepts/object-metadata/annotations /docs/concepts/overview/working-with-objects/annotations 301 - -/docs/concepts/workloads/controllers/daemonset/docs/concepts/workloads/pods/pod /docs/concepts/workloads/pods/pod 301 -/docs/concepts/workloads/controllers/deployment/docs/concepts/workloads/pods/pod /docs/concepts/workloads/pods/pod 301 - -/docs/contribute/write-new-topic /docs/home/contribute/write-new-topic 301 - -/docs/getting-started-guides/coreos/azure /docs/getting-started-guides/coreos 301 -/docs/getting-started-guides/coreos/bare_metal_calico /docs/getting-started-guides/coreos 301 -/docs/getting-started-guides/juju /docs/getting-started-guides/ubuntu/installation 301 -/docs/getting-started-guides/kargo /docs/getting-started-guides/kubespray 301 -/docs/getting-started-guides/logging-elasticsearch /docs/tasks/debug-application-cluster/logging-elasticsearch-kibana 301 -/docs/getting-started-guides/logging /docs/concepts/cluster-administration/logging 301 -/docs/getting-started-guides/rackspace /docs/setup/pick-right-solution 301 -/docs/getting-started-guides/ubuntu-calico /docs/getting-started-guides/ubuntu 301 -/docs/getting-started-guides/ubuntu/automated /docs/getting-started-guides/ubuntu 301 -/docs/getting-started-guides/vagrant /docs/getting-started-guides/alternatives 301 -/docs/getting-started-guides/windows/While /docs/getting-started-guides/windows 301 - -/docs/federation/api-reference/extensions/v1beta1/definitions /docs/reference/federation/extensions/v1beta1/definitions 301 -/docs/federation/api-reference/federation/v1beta1/definitions /docs/reference/federation/extensions/v1beta1/definitions 301 -/docs/federation/api-reference/README /docs/reference/federation 301 -/docs/federation/api-reference/v1/definitions /docs/reference/federation/v1/definitions 301 -/docs/reference/federation/v1beta1/definitions /docs/reference/federation/extensions/v1beta1/definitions 301 -/docs/reference/federation/v1beta1/operations /docs/reference/federation/extensions/v1beta1/operations 301 - -/docs/reporting-security-issues /security 301 - -/docs/stable/user-guide/labels /docs/concepts/overview/working-with-objects/labels 301 -/docs/tasks/access-application-cluster/access-cluster.md /docs/tasks/access-application-cluster/access-cluster 301 -/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig /docs/tasks/access-application-cluster/configure-access-multiple-clusters 301 -/docs/tasks/access-kubernetes-api/access-kubernetes-api/http-proxy-access-api /docs/tasks/access-kubernetes-api/http-proxy-access-api 301 -/docs/tasks/administer-cluster/reserve-compute-resources/out-of-resource.md /docs/tasks/administer-cluster/out-of-resource 301 -/docs/tasks/configure-pod-container/configure-pod-disruption-budget /docs/tasks/run-application/configure-pdb 301 -/docs/tasks/configure-pod-container/define-command-argument-container /docs/tasks/inject-data-application/define-command-argument-container 301 -/docs/tasks/debug-application-cluster/sematext-logging-monitoring https://sematext.com/kubernetes/ 301 -/docs/tasks/job/work-queue-1 /docs/concepts/workloads/controllers/jobs-run-to-completion 301 -/docs/tasks/manage-stateful-set/delete-pods /docs/tasks/run-application/delete-stateful-set 301 - -/docs/tutorials/getting-started/cluster-intro /docs/tutorials/kubernetes-basics/cluster-intro 301 -/docs/tutorials/getting-started/expose-intro /docs/tutorials/kubernetes-basics/expose-intro 301 -/docs/tutorials/getting-started/scale-app /docs/tutorials/kubernetes-basics/scale-interactive 301 -/docs/tutorials/getting-started/scale-intro /docs/tutorials/kubernetes-basics/scale-intro 301 -/docs/tutorials/getting-started/update-interactive /docs/tutorials/kubernetes-basics/update-interactive 301 -/docs/tutorials/getting-started/update-intro /docs/tutorials/kubernetes-basics/ 301 - -/docs/user-guide/containers /docs/tasks/inject-data-application/define-command-argument-container 301 -/docs/user-guide/horizontal-pod-autoscaling/walkthrough.md /docs/tasks/run-application/horizontal-pod-autoscale-walkthrough 301 -/docs/user-guide/ingress.md /docs/concepts/services-networking/ingress 301 -/docs/user-guide/replication-controller/operations /docs/concepts/workloads/controllers/replicationcontroller 301 -/docs/user-guide/resizing-a-replication-controller /docs/concepts/workloads/controllers/replicationcontroller 301 -/docs/user-guide/scheduled-jobs /docs/concepts/workloads/controllers/cron-jobs 301 -/docs/user-guide/security-context /docs/tasks/configure-pod-container/security-context 301 - -/kubernetes-bootcamp/2-1.html /docs/tutorials/kubernetes-basics 301 -/kubernetes-bootcamp/2-3-2.html /docs/tutorials/kubernetes-basics 301 -/kubernetes /docs 301 -/kubernetes/swagger-spec https://github.com/kubernetes/kubernetes/tree/master/api/swagger-spec 301 -/serviceaccount/token /docs/tasks/configure-pod-container/configure-service-account 301 - -/v1.1/docs/admin/networking.html /docs/concepts/cluster-administration/networking 301 -/v1.1/docs/getting-started-guides /docs/tutorials/kubernetes-basics/ 301 - - -############################ -# pattern matching redirects -# -/docs/user-guide/kubectl/kubectl_* /docs/user-guide/kubectl/v1.7/#:splat 200 - -/v1.1/docs/* /docs/ 301 - -/docs/user-guide/kubectl/1_5/* https://v1-5.docs.kubernetes.io/docs/user-guide/kubectl/v1.5 301 -/docs/user-guide/kubectl/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/user-guide/kubectl/v1.5 301 -/docs/resources-reference/1_5/* https://v1-5.docs.kubernetes.io/docs/resources-reference/v1.5 301 -/docs/resources-reference/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/resources-reference/v1.5 301 -/docs/api-reference/1_5/* https://v1-5.docs.kubernetes.io/docs/api-reference/v1.5 301 -/docs/api-reference/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/api-reference/v1.5 301 - -/docs/user-guide/kubectl/v1.6/node_modules/* https://v1-6.docs.kubernetes.io/docs/user-guide/kubectl/v1.6 301 -/docs/api-reference/v1.6/node_modules/* https://v1-6.docs.kubernetes.io/docs/api-reference/v1.6 301 - -/docs/api-reference/v1.7/node_modules/* /docs/api-reference/v1.7 301 - -/docs/getting-started-guides/docker-multinode/* /docs/setup/independent/create-cluster-kubeadm 301 - -/docs/admin/resourcequota/* /docs/concepts/policy/resource-quotas 301 - - -################################# -# redirects from /js/redirects.js -# -/resource-quota /docs/concepts/policy/resource-quotas 301 -/horizontal-pod-autoscaler /docs/tasks/run-application/horizontal-pod-autoscale 301 -/docs/roadmap https://github.com/kubernetes/kubernetes/milestones/ 301 -/api-ref https://github.com/kubernetes/kubernetes/milestones/ 301 -/kubernetes/third_party/swagger-ui /docs/reference 301 -/docs/user-guide/overview /docs/concepts/overview/what-is-kubernetes 301 -/docs/troubleshooting /docs/tasks/debug-application-cluster/troubleshooting 301 -/docs/concepts/services-networking/networkpolicies /docs/concepts/services-networking/network-policies 301 -/docs/getting-started-guides/meanstack https://medium.com/google-cloud/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d 301 -/docs/samples /docs/tutorials 301 -/v1.1 / 301 -/v1.0 / 301 - - -######################################################## -# Redirect users with chinese language preference to /cn -# -#/ /cn 302 Language=zh - - -########################### -# Fixed 404s from analytics - -/concepts/containers/container-lifecycle-hooks /docs/concepts/containers/container-lifecycle-hooks 301 -/docs/abstractions/controllers/petset /docs/concepts/workloads/controllers/petset 301 - -/docs/admin/add-ons /docs/concepts/cluster-administration/addons 301 -/docs/admin/limitrange/Limits /docs/tasks/administer-cluster/limit-storage-consumption/#limitrange-to-limit-requests-for-storage 301 - -/docs/api-reference/1_5/* /docs/api-reference/v1.5 301 - -/docs/concepts/cluster-administration/device-plugins /docs/concepts/cluster-administration/network-plugins 301 -/docs/concepts/configuration/container-command-args /docs/tasks/inject-data-application/define-command-argument-container 301 -/docs/concepts/ecosystem/thirdpartyresource /docs/tasks/access-kubernetes-api/extend-api-third-party-resource 301 -/docs/concepts/overview /docs/concepts/overview/what-is-kubernetes 301 -/docs/concepts/policy/container-capabilities /docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container 301 -/docs/concepts/policy/security-context /docs/tasks/configure-pod-container/security-context 301 -/docs/concepts/storage/volumes/emptyDirapiVersion /docs/concepts/storage/volumes/#emptydir 301 -/docs/concepts/tools/kubectl/object-management-using-commands /docs/tutorials/object-management-kubectl/imperative-object-management-command 301 -/docs/concepts/workload/pods/pod-overview /docs/concepts/workloads/pods/pod-overview 301 -/docs/concepts/workloads/controllers/cron-jobs/deployment /docs/concepts/workloads/controllers/cron-jobs 301 -/docs/concepts/workloads/controllers/statefulsets /docs/concepts/workloads/controllers/statefulset 301 -/docs/concepts/workloads/pods/init-containers/Kubernetes /docs/concepts/workloads/pods/init-containers 301 - -/docs/consumer-guideline/pod-security-coverage /docs/concepts/policy/pod-security-policy 301 - -/docs/contribute/create-pull-request /docs/home/contribute/create-pull-request 301 -/docs/contribute/page-templates /docs/home/contribute/page-templates 301 -/docs/contribute/review-issues /docs/home/contribute/review-issues 301 -/docs/contribute/stage-documentation-changes /docs/home/contribute/stage-documentation-changes 301 -/docs/contribute/style-guide /docs/home/contribute/style-guide 301 - -/docs/deprecated /docs/reference/deprecation-policy 301 -/docs/deprecation-policy /docs/reference/deprecation-policy 301 - - -/docs/federation/api-reference /docs/reference/federation/v1/operations 301 -/docs/federation/api-reference/extensions/v1beta1/operations /docs/reference/federation/extensions/v1beta1/operations 301 -/docs/federation/api-reference/federation/v1beta1/operations /docs/reference/federation/extensions/v1beta1/operations 301 -/docs/federation/api-reference/v1/operations /docs/reference/federation/v1/operations 301 - -/docs/getting-started-guide/* /docs/setup 301 - -/docs/home/deprecation-policy /docs/reference/deprecation-policy 301 - -/docs/resources-reference/1_5/* /docs/resources-reference/v1.5 301 -/docs/resources-reference/1_6/* /docs/resources-reference/v1.6 301 -/docs/resources-reference/1_7/* /docs/resources-reference/v1.7 301 - -/docs/stable/user-guide/labels /docs/concepts/overview/working-with-objects/labels 301 - - -/docs/tasks/administer-cluster/apply-resource-quota-limit /docs/tasks/administer-cluster/quota-api-object 301 -/docs/tasks/administer-cluster/configure-namespace-isolation /docs/concepts/services-networking/network-policies 301 -/docs/tasks/administer-cluster/configure-pod-disruption-budget /docs/tasks/run-application/configure-pdb 301 - -/docs/tasks/administer-cluster/cpu-management-policies /docs/concepts/configuration/manage-compute-resources-container 301 -/docs/tasks/administer-cluster/default-cpu-request-limit /docs/tasks/configure-pod-container/assign-cpu-resource/#specify-a-cpu-request-and-a-cpu-limit 301 -/docs/tasks/administer-cluster/default-memory-request-limit /docs/tasks/configure-pod-container/assign-memory-resource/#specify-a-memory-request-and-a-memory-limit 301 - -/docs/tasks/configure-pod-container/cilium-network-policy /docs/tasks/administer-cluster/cilium-network-policy 301 -/docs/tasks/configure-pod-container/define-command-argument-container /docs/tasks/inject-data-application/define-command-argument-container 301 -/docs/tasks/configure-pod-container/projected-volume /docs/tasks/configure-pod-container/configure-projected-volume-storage 301 - -/docs/tasks/stateful-sets/deleting-pods /docs/tasks/run-application/force-delete-stateful-set-pod 301 - -/docs/templatedemos/* /docs/home/contribute/page-templates 301 - -/docs/tutorials/getting-started/* /docs/tutorials/kubernetes-basics 301 - -/docs/user-guide/federation/* /docs/concepts/cluster-administration/federation 301 -/docs/user-guide/garbage-collector /docs/concepts/workloads/controllers/garbage-collection 301 -/docs/user-guide/horizontal-pod-autoscaler/* /docs/tasks/run-application/horizontal-pod-autoscale 301 - - -/docs/user-guide/liveness /docs/tasks/configure-pod-container/configure-liveness-readiness-probes 301 -/docs/user-guide/logging /docs/concepts/cluster-administration/logging 301 -/docs/user-guide/replication-controller/operations /docs/concepts/workloads/controllers/replicationcontroller 301 -/docs/user-guide/service-accounts/working-with-resources /docs/tutorials/object-management-kubectl/object-management 301 -/docs/user-guide/StatefulSet /docs/concepts/workloads/controllers/statefulset 301 -/docs/user-guide/ui-access /docs/tasks/access-application-cluster/web-ui-dashboard 301 - -/kubernetes-bootcamp/* /docs/tutorials/kubernetes-basics 301 - -/latest/docs /docs/home 301 - -/kubernetes/swagger-spec https://github.com/kubernetes/kubernetes/tree/master/api/swagger-spec 301 -/swagger-spec/* https://github.com/kubernetes/kubernetes/tree/master/api/swagger-spec 301 -/third_party/swagger-ui/* /docs/reference 301 +/api-ref/ https://github.com/kubernetes/kubernetes/milestones/ 301 +/concepts/containers/container-lifecycle-hooks /docs/concepts/containers/container-lifecycle-hooks 301 +/docs/ /docs/home/ 301 +/docs/abstractions/controllers/petset /docs/concepts/workloads/controllers/statefulset 301 +/docs/admin/ /docs/concepts/cluster-administration/cluster-administration-overview 301 +/docs/admin/add-ons/ /docs/concepts/cluster-administration/addons/ 301 +/docs/admin/addons /docs/concepts/cluster-administration/addons 301 +/docs/admin/apparmor /docs/tutorials/clusters/apparmor 301 +/docs/admin/audit /docs/tasks/debug-application-cluster/audit 301 +/docs/admin/authorization/rbac.md /docs/admin/authorization/rbac/ 301 +/docs/admin/cluster-components /docs/concepts/overview/components 301 +/docs/admin/cluster-management /docs/tasks/administer-cluster/cluster-management 301 +/docs/admin/cluster-troubleshooting /docs/tasks/debug-application-cluster/debug-cluster 301 +/docs/admin/daemons /docs/concepts/workloads/controllers/daemonset 301 +/docs/admin/disruptions /docs/concepts/workloads/pods/disruptions 301 +/docs/admin/dns /docs/concepts/services-networking/dns-pod-service 301 +/docs/admin/etcd /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/admin/etcd_upgrade /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/admin/federation/kubefed /docs/tasks/federation/set-up-cluster-federation-kubefed 301 +/docs/admin/federation/kubefed.md /docs/tasks/federation/set-up-cluster-federation-kubefed/ 301 +/docs/admin/garbage-collection /docs/concepts/cluster-administration/kubelet-garbage-collection 301 +/docs/admin/ha-master-gce /docs/tasks/administer-cluster/highly-available-master 301 +/docs/admin/ha-master-gce.md /docs/tasks/administer-cluster/highly-available-master/ 301 +/docs/admin/kubeadm-upgrade-1-7 /docs/tasks/administer-cluster/kubeadm-upgrade-1-7 301 +/docs/admin/limitrange /docs/tasks/administer-cluster/cpu-memory-limit/ 301 +/docs/admin/limitrange/Limits/ /docs/tasks/administer-cluster/limit-storage-consumption/#limitrange-to-limit-requests-for-storage/ 301 +/docs/admin/master-node-communication /docs/concepts/architecture/master-node-communication 301 +/docs/admin/multi-cluster /docs/concepts/cluster-administration/federation 301 +/docs/admin/multiple-schedulers /docs/tasks/administer-cluster/configure-multiple-schedulers 301 +/docs/admin/namespaces /docs/tasks/administer-cluster/namespaces 301 +/docs/admin/namespaces/walkthrough /docs/tasks/administer-cluster/namespaces-walkthrough 301 +/docs/admin/network-plugins /docs/concepts/cluster-administration/network-plugins 301 +/docs/admin/networking /docs/concepts/cluster-administration/networking 301 +/docs/admin/node /docs/concepts/architecture/nodes 301 +/docs/admin/node-allocatable /docs/tasks/administer-cluster/reserve-compute-resources 301 +/docs/admin/node-allocatable.md /docs/tasks/administer-cluster/reserve-compute-resources/ 301 +/docs/admin/node-conformance.md /docs/admin/node-conformance/ 301 +/docs/admin/node-problem /docs/tasks/debug-application-cluster/monitor-node-health 301 +/docs/admin/out-of-resource /docs/tasks/administer-cluster/out-of-resource 301 +/docs/admin/rescheduler /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods 301 +/docs/admin/resourcequota/* /docs/concepts/policy/resource-quotas/ 301 +/docs/admin/resourcequota/limitstorageconsumption /docs/tasks/administer-cluster/limit-storage-consumption 301 +/docs/admin/resourcequota/walkthrough /docs/tasks/administer-cluster/quota-api-object 301 +/docs/admin/static-pods /docs/tasks/administer-cluster/static-pod 301 +/docs/admin/sysctls /docs/concepts/cluster-administration/sysctl-cluster 301 +/docs/admin/upgrade-1-6 /docs/tasks/administer-cluster/upgrade-1-6 301 +/docs/api /docs/concepts/overview/kubernetes-api 301 +/docs/api-reference/1_5/* /docs/api-reference/v1.5/ 301 +/docs/api-reference/apps/v1alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/apps/v1alpha1/definitions 301 +/docs/api-reference/apps/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/apps/v1beta1/operations 301 +/docs/api-reference/authorization.k8s.io/v1beta1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/authorization.k8s.io/v1beta1/definitions 301 +/docs/api-reference/authorization.k8s.io/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/authorization.k8s.io/v1beta1/operations 301 +/docs/api-reference/autoscaling/v1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/autoscaling/v1/operations 301 +/docs/api-reference/batch/v1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/batch/v1/operations 301 +/docs/api-reference/batch/v2alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/batch/v2alpha1/definitions 301 +/docs/api-reference/certificates.k8s.io/v1alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/certificates.k8s.io/v1alpha1/definitions 301 +/docs/api-reference/certificates/v1alpha1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/certificates/v1alpha1/operations 301 +/docs/api-reference/extensions/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/extensions/v1beta1/operations 301 +/docs/api-reference/policy/v1alpha1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/policy/v1alpha1/definitions 301 +/docs/api-reference/policy/v1beta1/definitions https://v1-4.docs.kubernetes.io/docs/api-reference/policy/v1beta1/definitions 301 +/docs/api-reference/README https://v1-4.docs.kubernetes.io/docs/api-reference/README 301 +/docs/api-reference/storage.k8s.io/v1beta1/operations https://v1-4.docs.kubernetes.io/docs/api-reference/storage.k8s.io/v1beta1/operations 301 +/docs/api-reference/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/api-reference/v1.5 301 +/docs/api-reference/v1.6/node_modules/* https://v1-6.docs.kubernetes.io/docs/api-reference/v1.6 301 +/docs/api-reference/v1.7/node_modules/* /docs/api-reference/v1.7/ 301 +/docs/api-reference/v1/definitions /docs/api-reference/v1.8 301 +/docs/api-reference/v1/operations /docs/api-reference/v1.8 301 +/docs/concepts/abstractions/controllers/garbage-collection /docs/concepts/workloads/controllers/garbage-collection 301 +/docs/concepts/abstractions/controllers/petsets /docs/concepts/workloads/controllers/statefulset 301 +/docs/concepts/abstractions/controllers/statefulsets /docs/concepts/workloads/controllers/statefulset 301 +/docs/concepts/abstractions/init-containers /docs/concepts/workloads/pods/init-containers 301 +/docs/concepts/abstractions/overview /docs/concepts/overview/working-with-objects/kubernetes-objects 301 +/docs/concepts/abstractions/pod /docs/concepts/workloads/pods/pod-overview 301 +/docs/concepts/cluster /docs/concepts/cluster-administration/cluster-administration-overview/ 301 +/docs/concepts/cluster-administration/access-cluster /docs/tasks/access-application-cluster/access-cluster 301 +/docs/concepts/cluster-administration/audit /docs/tasks/debug-application-cluster/audit 301 +/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig /docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig 301 +/docs/concepts/cluster-administration/cluster-management /docs/tasks/administer-cluster/cluster-management 301 +/docs/concepts/cluster-administration/configure-etcd /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/concepts/cluster-administration/device-plugins/ /docs/concepts/cluster-administration/network-plugins/ 301 +/docs/concepts/cluster-administration/etcd-upgrade /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/concepts/cluster-administration/federation-service-discovery /docs/tasks/federation/federation-service-discovery 301 +/docs/concepts/cluster-administration/guaranteed-scheduling-critical-addon-pods /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods 301 +/docs/concepts/cluster-administration/master-node-communication /docs/concepts/architecture/master-node-communication 301 +/docs/concepts/cluster-administration/multiple-clusters /docs/concepts/cluster-administration/federation 301 +/docs/concepts/cluster-administration/out-of-resource /docs/tasks/administer-cluster/out-of-resource 301 +/docs/concepts/cluster-administration/resource-usage-monitoring /docs/tasks/debug-application-cluster/resource-usage-monitoring 301 +/docs/concepts/cluster-administration/static-pod /docs/tasks/administer-cluster/static-pod 301 +/docs/concepts/clusters/logging /docs/concepts/cluster-administration/logging 301 +/docs/concepts/configuration/container-command-arg /docs/tasks/inject-data-application/define-command-argument-container/docs/concepts/ecosystem/thirdpartyresource 301 /docs/tasks/access-kubernetes-api/extend-api-third-party-resource +/docs/concepts/ecosystem/thirdpartyresource/ /docs/tasks/access-kubernetes-api/extend-api-third-party-resource/ 301 +/docs/concepts/jobs/cron-jobs /docs/concepts/workloads/controllers/cron-jobs 301 +/docs/concepts/jobs/run-to-completion-finite-workloads /docs/concepts/workloads/controllers/jobs-run-to-completion 301 +/docs/concepts/nodes/node /docs/concepts/architecture/nodes 301 +/docs/concepts/object-metadata/annotations /docs/concepts/overview/working-with-objects/annotations 301 +/docs/concepts/overview/ /docs/concepts/overview/what-is-kubernetes/ 301 +/docs/concepts/policy/container-capabilities/ /docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container/ 301 +/docs/concepts/policy/security-context/ /docs/tasks/configure-pod-container/security-context/ 301 +/docs/concepts/services-networking/networkpolicies/ /docs/concepts/services-networking/network-policies/ 301 +/docs/concepts/storage/etcd-store-api-object /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/concepts/storage/volumes/emptyDirapiVersion/ /docs/concepts/storage/volumes/#emptydir/ 301 +/docs/concepts/tools/kubectl/object-management-overview /docs/tutorials/object-management-kubectl/object-management 301 +/docs/concepts/tools/kubectl/object-management-using-commands/ /docs/tutorials/object-management-kubectl/imperative-object-management-command/ 301 +/docs/concepts/tools/kubectl/object-management-using-declarative-config /docs/tutorials/object-management-kubectl/declarative-object-management-configuration 301 +/docs/concepts/tools/kubectl/object-management-using-imperative-commands /docs/tutorials/object-management-kubectl/imperative-object-management-command 301 +/docs/concepts/tools/kubectl/object-management-using-imperative-config /docs/tutorials/object-management-kubectl/imperative-object-management-configuration 301 +/docs/concepts/workload/pods/pod-overview/ /docs/concepts/workloads/pods/pod-overview 301 +/docs/concepts/workloads/controllers/cron-jobs/deployment/ /docs/concepts/workloads/controllers/cron-jobs/ 301 +/docs/concepts/workloads/controllers/daemonset/docs/concepts/workloads/pods/pod /docs/concepts/workloads/pods/pod 301 +/docs/concepts/workloads/controllers/deployment/docs/concepts/workloads/pods/pod /docs/concepts/workloads/pods/pod 301 +/docs/concepts/workloads/controllers/petsets /docs/concepts/workloads/controllers/statefulset 301 +/docs/concepts/workloads/controllers/statefulsets/ /docs/concepts/workloads/controllers/statefulset/ 301 +/docs/concepts/workloads/pods/init-containers/Kubernetes /docs/concepts/workloads/pods/init-containers/ 301 +/docs/consumer-guideline/pod-security-coverage/ /docs/concepts/policy/pod-security-policy/ 301 +/docs/contribute/create-pull-request/ /docs/home/contribute/create-pull-request 301 +/docs/contribute/page-templates/ /docs/home/contribute/page-templates 301 +/docs/contribute/review-issues/ /docs/home/contribute/review-issues 301 +/docs/contribute/stage-documentation-changes/ /docs/home/contribute/stage-documentation-changes/ 301 +/docs/contribute/style-guide/ /docs/home/contribute/style-guide 301 +/docs/contribute/write-new-topic /docs/home/contribute/write-new-topic 301 +/docs/deprecate/ /ddocs/reference/deprecation-policy/ 301 +/docs/deprecation-policy/ /docs/reference/deprecation-policy/ 301 +/docs/federation/api-reference/ /docs/reference/federation/v1/operations/ 301 +/docs/federation/api-reference/extensions/v1beta1/definitions /docs/reference/federation/extensions/v1beta1/definitions 301 +/docs/federation/api-reference/extensions/v1beta1/operations /docs/reference/federation/extensions/v1beta1/operations/ 301 +/docs/federation/api-reference/federation/v1beta1/definitions /docs/reference/federation/extensions/v1beta1/definitions 301 +/docs/federation/api-reference/federation/v1beta1/operations /docs/reference/federation/extensions/v1beta1/operations/ 301 +/docs/federation/api-reference/README /docs/reference/federation 301 +/docs/federation/api-reference/v1/definitions /docs/reference/federation/v1/definitions 301 +/docs/federation/api-reference/v1/operations/ /docs/reference/federation/v1/operations/ 301 +/docs/getting-started-guide/* /docs/setup/ 301 +/docs/getting-started-guides /docs/setup/pick-right-solution 301 +/docs/getting-started-guides/coreos/azure /docs/getting-started-guides/coreos 301 +/docs/getting-started-guides/coreos/bare_metal_calico /docs/getting-started-guides/coreos 301 +/docs/getting-started-guides/docker-multinode/* /docs/setup/independent/create-cluster-kubeadm/ 301 +/docs/getting-started-guides/juju /docs/getting-started-guides/ubuntu/installation 301 +/docs/getting-started-guides/kargo /docs/getting-started-guides/kubespray 301 +/docs/getting-started-guides/kubeadm /docs/setup/independent/create-cluster-kubeadm 301 +/docs/getting-started-guides/logging /docs/concepts/cluster-administration/logging 301 +/docs/getting-started-guides/logging-elasticsearch /docs/tasks/debug-application-cluster/logging-elasticsearch-kibana 301 +/docs/getting-started-guides/meanstack/ https://medium.com/google-cloud/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d/ 301 +/docs/getting-started-guides/network-policy/calico /docs/tasks/administer-cluster/calico-network-policy 301 +/docs/getting-started-guides/network-policy/romana /docs/tasks/administer-cluster/romana-network-policy 301 +/docs/getting-started-guides/network-policy/walkthrough /docs/tasks/administer-cluster/declare-network-policy 301 +/docs/getting-started-guides/network-policy/weave /docs/tasks/administer-cluster/weave-network-policy 301 +/docs/getting-started-guides/rackspace /docs/setup/pick-right-solution 301 +/docs/getting-started-guides/running-cloud-controller /docs/tasks/administer-cluster/running-cloud-controller 301 +/docs/getting-started-guides/ubuntu-calico /docs/getting-started-guides/ubuntu 301 +/docs/getting-started-guides/ubuntu/automated /docs/getting-started-guides/ubuntu 301 +/docs/getting-started-guides/ubuntu/calico /docs/getting-started-guides/ubuntu/ 301 +/docs/getting-started-guides/vagrant /docs/getting-started-guides/alternatives 301 +/docs/getting-started-guides/windows/While /docs/getting-started-guides/windows 301 +/docs/hellonode /docs/tutorials/stateless-application/hello-minikube 301 +/docs/home/coreos/ /docs/getting-started-guides/coreos/ 301 +/docs/home/deprecation-policy/ /docs/reference/deprecation-policy/ 301 +/docs/reference/federation/v1beta1/definitions /docs/reference/federation/extensions/v1beta1/definitions 301 +/docs/reference/federation/v1beta1/operations /docs/reference/federation/extensions/v1beta1/operations 301 +/docs/reporting-security-issues /security 301 +/docs/resources-reference/1_5/* /docs/resources-reference/v1.5/ 301 +/docs/resources-reference/1_5/* https://v1-5.docs.kubernetes.io/docs/resources-reference/v1.5/ 301 +/docs/resources-reference/1_6/* /docs/resources-reference/v1.6/ 301 +/docs/resources-reference/1_7/* /docs/resources-reference/v1.7/ 301 +/docs/resources-reference/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/resources-reference/v1.5/ 301 +/docs/roadmap/ https://github.com/kubernetes/kubernetes/milestones/ 301 +/docs/samples /docs/tutorials/ 301 +/docs/stable/user-guide/labels /docs/concepts/overview/working-with-objects/labels 301 +/docs/tasks/access-application-cluster/access-cluster.md /docs/tasks/access-application-cluster/access-cluster 301 +/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig /docs/tasks/access-application-cluster/configure-access-multiple-clusters 301 +/docs/tasks/access-kubernetes-api/access-kubernetes-api/http-proxy-access-api /docs/tasks/access-kubernetes-api/http-proxy-access-api 301 +/docs/tasks/administer-cluster/apply-resource-quota-limit /docs/tasks/administer-cluster/quota-api-object 301 +/docs/tasks/administer-cluster/assign-pods-nodes /docs/tasks/configure-pod-container/assign-pods-nodes 301 +/docs/tasks/administer-cluster/configure-namespace-isolation/ /docs/concepts/services-networking/network-policies/ 301 +/docs/tasks/administer-cluster/configure-pod-disruption-budget/ /docs/tasks/run-application/configure-pdb/ 301 +/docs/tasks/administer-cluster/cpu-management-policies/ /docs/concepts/configuration/manage-compute-resources-container/ 301 +/docs/tasks/administer-cluster/cpu-memory-limit /docs/tasks/administer-cluster/memory-default-namespace 301 +/docs/tasks/administer-cluster/default-cpu-request-limit/ /docs/tasks/configure-pod-container/assign-cpu-resource/#specify-a-cpu-request-and-a-cpu-limit/ 301 +/docs/tasks/administer-cluster/default-memory-request-limit/ /docs/tasks/configure-pod-container/assign-memory-resource/#specify-a-memory-request-and-a-memory-limit/ 301 +/docs/tasks/administer-cluster/overview /docs/concepts/cluster-administration/cluster-administration-overview 301 +/docs/tasks/administer-cluster/reserve-compute-resources/out-of-resource.md /docs/tasks/administer-cluster/out-of-resource 301 +/docs/tasks/administer-cluster/share-configuration /docs/tasks/access-application-cluster/configure-access-multiple-clusters 301 +/docs/tasks/configure-pod-container/apply-resource-quota-limit /docs/tasks/administer-cluster/apply-resource-quota-limit 301 +/docs/tasks/configure-pod-container/assign-cpu-ram-container /docs/tasks/configure-pod-container/assign-memory-resource 301 +/docs/tasks/configure-pod-container/calico-network-policy /docs/tasks/administer-cluster/calico-network-policy 301 +/docs/tasks/configure-pod-container/cilium-network-policy/ /docs/tasks/administer-cluster/cilium-network-policy/ 301 +/docs/tasks/configure-pod-container/communicate-containers-same-pod /docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume 301 +/docs/tasks/configure-pod-container/configure-pod-disruption-budget /docs/tasks/run-application/configure-pdb 301 +/docs/tasks/configure-pod-container/declare-network-policy /docs/tasks/administer-cluster/declare-network-policy 301 +/docs/tasks/configure-pod-container/define-command-argument-container /docs/tasks/inject-data-application/define-command-argument-container 301 +/docs/tasks/configure-pod-container/define-environment-variable-container /docs/tasks/inject-data-application/define-environment-variable-container 301 +/docs/tasks/configure-pod-container/distribute-credentials-secure /docs/tasks/inject-data-application/distribute-credentials-secure 301 +/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 +/docs/tasks/configure-pod-container/environment-variable-expose-pod-information /docs/tasks/inject-data-application/environment-variable-expose-pod-information 301 +/docs/tasks/configure-pod-container/limit-range /docs/tasks/administer-cluster/cpu-memory-limit 301 +/docs/tasks/configure-pod-container/projected-volume/ /docs/tasks/configure-pod-container/configure-projected-volume-storage/ 301 +/docs/tasks/configure-pod-container/romana-network-policy /docs/tasks/administer-cluster/romana-network-policy 301 +/docs/tasks/configure-pod-container/weave-network-policy /docs/tasks/administer-cluster/weave-network-policy 301 +/docs/tasks/debug-application-cluster/sematext-logging-monitoring https://sematext.com/kubernetes/ 301 +/docs/tasks/federation/set-up-cluster-federation-kubefed.md /docs/tasks/federation/set-up-cluster-federation-kubefed/ 301 +/docs/tasks/job/work-queue-1 /docs/concepts/workloads/controllers/jobs-run-to-completion 301 +/docs/tasks/kubectl/get-shell-running-container /docs/tasks/debug-application-cluster/get-shell-running-container 301 +/docs/tasks/kubectl/install /docs/tasks/tools/install-kubectl 301 +/docs/tasks/kubectl/list-all-running-container-images /docs/tasks/access-application-cluster/list-all-running-container-images 301 +/docs/tasks/manage-stateful-set/debugging-a-statefulset /docs/tasks/debug-application-cluster/debug-stateful-set 301 +/docs/tasks/manage-stateful-set/delete-pods /docs/tasks/run-application/delete-stateful-set 301 +/docs/tasks/manage-stateful-set/deleting-a-statefulset /docs/tasks/run-application/delete-stateful-set 301 +/docs/tasks/manage-stateful-set/scale-stateful-set /docs/tasks/run-application/scale-stateful-set 301 +/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set /docs/tasks/run-application/upgrade-pet-set-to-stateful-set 301 +/docs/tasks/run-application/podpreset /docs/tasks/inject-data-application/podpreset 301 +/docs/tasks/stateful-sets/deleting-pods/ /docs/tasks/run-application/force-delete-stateful-set-pod/ 301 +/docs/tasks/troubleshoot/debug-init-containers /docs/tasks/debug-application-cluster/debug-init-containers 301 +/docs/tasks/web-ui-dashboard /docs/tasks/access-application-cluster/web-ui-dashboard 301 +/docs/templatedemos/* /docs/home/contribute/page-templates/ 301 +/docs/tools/kompose /docs/tools/kompose/user-guide 301 +/docs/troubleshooting/ /docs/tasks/debug-application-cluster/troubleshooting/ 301 +/docs/tutorials/clusters/multiple-schedulers /docs/tasks/administer-cluster/configure-multiple-schedulers 301 +/docs/tutorials/connecting-apps/connecting-frontend-backend /docs/tasks/access-application-cluster/connecting-frontend-backend 301 +/docs/tutorials/federation/set-up-cluster-federation-kubefed /docs/tasks/federation/set-up-cluster-federation-kubefed 301 +/docs/tutorials/federation/set-up-cluster-federation-kubefed.md /docs/tasks/federation/set-up-cluster-federation-kubefed/ 301 +/docs/tutorials/federation/set-up-coredns-provider-federation /docs/tasks/federation/set-up-coredns-provider-federation 301 +/docs/tutorials/federation/set-up-placement-policies-federation /docs/tasks/federation/set-up-placement-policies-federation 301 +/docs/tutorials/getting-started/cluster-intro /docs/tutorials/kubernetes-basics/cluster-intro 301 +/docs/tutorials/getting-started/create-cluster /docs/tutorials/kubernetes-basics/cluster-intro 301 +/docs/tutorials/getting-started/expose-intro /docs/tutorials/kubernetes-basics/expose-intro 301 +/docs/tutorials/getting-started/scale-app /docs/tutorials/kubernetes-basics/scale-interactive 301 +/docs/tutorials/getting-started/scale-intro /docs/tutorials/kubernetes-basics/scale-intro 301 +/docs/tutorials/getting-started/update-interactive /docs/tutorials/kubernetes-basics/update-interactive 301 +/docs/tutorials/getting-started/update-intro /docs/tutorials/kubernetes-basics/ 301 +/docs/tutorials/stateful-application/run-replicated-stateful-application /docs/tasks/run-application/run-replicated-stateful-application 301 +/docs/tutorials/stateful-application/run-stateful-application /docs/tasks/run-application/run-single-instance-stateful-application 301 +/docs/tutorials/stateless-application/expose-external-ip-address-service /docs/tasks/access-application-cluster/service-access-application-cluster 301 +/docs/tutorials/stateless-application/run-stateless-ap-replication-controller /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/tutorials/stateless-application/run-stateless-application-deployment /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/user-guide /docs/home/ 301 +/docs/user-guide/accessing-the-cluster /docs/tasks/access-application-cluster/access-cluster 301 +/docs/user-guide/add-entries-to-pod-etc-hosts-with-host-aliases /docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases 301 +/docs/user-guide/annotations /docs/concepts/overview/working-with-objects/annotations 301 +/docs/user-guide/application-troubleshooting /docs/tasks/debug-application-cluster/debug-application 301 +/docs/user-guide/compute-resources /docs/concepts/configuration/manage-compute-resources-container 301 +/docs/user-guide/config-best-practices /docs/concepts/configuration/overview 301 +/docs/user-guide/configmap /docs/tasks/configure-pod-container/configmap 301 +/docs/user-guide/configuring-containers /docs/tasks/ 301 +/docs/user-guide/connecting-applications /docs/concepts/services-networking/connect-applications-service 301 +/docs/user-guide/connecting-to-applications-port-forward /docs/tasks/access-application-cluster/port-forward-access-application-cluster 301 +/docs/user-guide/connecting-to-applications-proxy /docs/tasks/access-kubernetes-api/http-proxy-access-api 301 +/docs/user-guide/container-environment /docs/concepts/containers/container-lifecycle-hooks 301 +/docs/user-guide/containers /docs/tasks/inject-data-application/define-command-argument-container 301 +/docs/user-guide/cron-jobs /docs/concepts/workloads/controllers/cron-jobs 301 +/docs/user-guide/debugging-pods-and-replication-controllers/ /docs/tasks/debug-application-cluster/debug-pod-replication-controller/ 301 +/docs/user-guide/debugging-services /docs/tasks/debug-application-cluster/debug-service 301 +/docs/user-guide/deploying-applications /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/user-guide/deployments /docs/concepts/workloads/controllers/deployment 301 +/docs/user-guide/downward-api /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 +/docs/user-guide/downward-api/volume /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 +/docs/user-guide/environment-guide /docs/tasks/inject-data-application/environment-variable-expose-pod-information 301 +/docs/user-guide/federation /docs/concepts/cluster-administration/federation 301 +/docs/user-guide/federation/cluster /docs/tasks/administer-federation/cluster 301 +/docs/user-guide/federation/configmap /docs/tasks/administer-federation/configmap 301 +/docs/user-guide/federation/daemonsets /docs/tasks/administer-federation/daemonset 301 +/docs/user-guide/federation/deployment /docs/tasks/administer-federation/deployment 301 +/docs/user-guide/federation/events /docs/tasks/administer-federation/events 301 +/docs/user-guide/federation/federated-ingress /docs/tasks/administer-federation/ingress 301 +/docs/user-guide/federation/federated-services /docs/tasks/federation/federation-service-discovery 301 +/docs/user-guide/federation/namespaces /docs/tasks/administer-federation/namespaces 301 +/docs/user-guide/federation/replicasets /docs/tasks/administer-federation/replicaset 301 +/docs/user-guide/federation/secrets /docs/tasks/administer-federation/secret 301 +/docs/user-guide/garbage-collection /docs/concepts/workloads/controllers/garbage-collection 301 +/docs/user-guide/garbage-collector/* /docs/concepts/workloads/controllers/garbage-collection/ 301 +/docs/user-guide/getting-into-containers /docs/tasks/debug-application-cluster/get-shell-running-container 301 +/docs/user-guide/gpus /docs/tasks/manage-gpus/scheduling-gpus 301 +/docs/user-guide/horizontal-pod-autoscaler/* /docs/tasks/run-application/horizontal-pod-autoscale/ 301 +/docs/user-guide/horizontal-pod-autoscaling /docs/tasks/run-application/horizontal-pod-autoscale 301 +/docs/user-guide/horizontal-pod-autoscaling/walkthrough /docs/tasks/run-application/horizontal-pod-autoscale-walkthrough 301 +/docs/user-guide/horizontal-pod-autoscaling/walkthrough.md /docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/ 301 +/docs/user-guide/identifiers /docs/concepts/overview/working-with-objects/names 301 +/docs/user-guide/images /docs/concepts/containers/images 301 +/docs/user-guide/ingress /docs/concepts/services-networking/ingress 301 +/docs/user-guide/ingress.md /docs/concepts/services-networking/ingress 301 +/docs/user-guide/introspection-and-debugging /docs/tasks/debug-application-cluster/debug-application-introspection 301 +/docs/user-guide/jobs /docs/concepts/workloads/controllers/jobs-run-to-completion 301 +/docs/user-guide/jobs/expansions /docs/tasks/job/parallel-processing-expansion 301 +/docs/user-guide/jobs/work-queue-1 /docs/tasks/job/coarse-parallel-processing-work-queue/ 301 +/docs/user-guide/jobs/work-queue-2 /docs/tasks/job/fine-parallel-processing-work-queue/ 301 +/docs/user-guide/kubeconfig-file /docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig 301 +/docs/user-guide/kubectl/1_5/* https://v1-5.docs.kubernetes.io/docs/user-guide/kubectl/v1.5/ 301 +/docs/user-guide/kubectl/kubectl_*/ /docs/user-guide/kubectl/v1.7/#:splat 200 +/docs/user-guide/kubectl/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/user-guide/kubectl/v1.5/ 301 +/docs/user-guide/kubectl/v1.6/node_modules/* https://v1-6.docs.kubernetes.io/docs/user-guide/kubectl/v1.6/ 301 +/docs/user-guide/labels /docs/concepts/overview/working-with-objects/labels 301 +/docs/user-guide/liveness /docs/tasks/configure-pod-container/configure-liveness-readiness-probes 301 +/docs/user-guide/load-balancer /docs/tasks/access-application-cluster/create-external-load-balancer 301 +/docs/user-guide/logging/ /docs/concepts/cluster-administration/logging/ 301 +/docs/user-guide/logging/elasticsearch /docs/tasks/debug-application-cluster/logging-elasticsearch-kibana 301 +/docs/user-guide/logging/overview /docs/concepts/cluster-administration/logging 301 +/docs/user-guide/logging/stackdriver /docs/tasks/debug-application-cluster/logging-stackdriver 301 +/docs/user-guide/managing-deployments /docs/concepts/cluster-administration/manage-deployment 301 +/docs/user-guide/monitoring /docs/tasks/debug-application-cluster/resource-usage-monitoring 301 +/docs/user-guide/namespaces /docs/concepts/overview/working-with-objects/namespaces 301 +/docs/user-guide/networkpolicies /docs/concepts/services-networking/network-policies 301 +/docs/user-guide/node-selection /docs/concepts/configuration/assign-pod-node 301 +/docs/user-guide/overview/ /docs/concepts/overview/what-is-kubernetes/ 301 +/docs/user-guide/persistent-volumes /docs/concepts/storage/persistent-volumes 301 +/docs/user-guide/persistent-volumes/index /docs/concepts/storage/persistent-volumes/ 301 +/docs/user-guide/persistent-volumes/index.md /docs/concepts/storage/persistent-volumes/ 301 +/docs/user-guide/persistent-volumes/walkthrough /docs/tasks/configure-pod-container/configure-persistent-volume-storage 301 +/docs/user-guide/petset /docs/concepts/workloads/controllers/statefulset 301 +/docs/user-guide/petset/bootstrapping /docs/concepts/workloads/controllers/statefulset 301 +/docs/user-guide/pod-preset/ /docs/tasks/inject-data-application/podpreset 301 +/docs/user-guide/pod-security-policy/ /docs/concepts/policy/pod-security-policy 301 +/docs/user-guide/pod-states /docs/concepts/workloads/pods/pod-lifecycle 301 +/docs/user-guide/pod-templates /docs/concepts/workloads/pods/pod-overview 301 +/docs/user-guide/pods /docs/concepts/workloads/pods/pod 301 +/docs/user-guide/pods/init-container /docs/concepts/workloads/pods/init-containers 301 +/docs/user-guide/pods/multi-container /docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume 301 +/docs/user-guide/pods/single-container /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/user-guide/prereqs /docs/tasks/tools/install-kubectl 301 +/docs/user-guide/production-pods /docs/tasks/ 301 +/docs/user-guide/projected-volume /docs/tasks/configure-pod-container/configure-projected-volume-storage 301 +/docs/user-guide/quick-start /docs/tasks/access-application-cluster/service-access-application-cluster 301 +/docs/user-guide/replicasets /docs/concepts/workloads/controllers/replicaset 301 +/docs/user-guide/replication-controller /docs/concepts/workloads/controllers/replicationcontroller 301 +/docs/user-guide/replication-controller/operations /docs/concepts/workloads/controllers/replicationcontroller 301 +/docs/user-guide/resizing-a-replication-controller /docs/concepts/workloads/controllers/replicationcontroller 301 +/docs/user-guide/rolling-updates /docs/tasks/run-application/rolling-update-replication-controller 301 +/docs/user-guide/scheduled-jobs /docs/concepts/workloads/controllers/cron-jobs 301 +/docs/user-guide/secrets /docs/concepts/configuration/secret 301 +/docs/user-guide/secrets/walkthrough /docs/tasks/inject-data-application/distribute-credentials-secure 301 +/docs/user-guide/security-context /docs/tasks/configure-pod-container/security-context 301 +/docs/user-guide/service-accounts /docs/tasks/configure-pod-container/configure-service-account 301 +/docs/user-guide/service-accounts/working-with-resources/ /docs/tutorials/object-management-kubectl/object-management/ 301 +/docs/user-guide/services /docs/concepts/services-networking/service 301 +/docs/user-guide/services-firewalls /docs/tasks/access-application-cluster/configure-cloud-provider-firewall 301 +/docs/user-guide/services/operations /docs/tasks/access-application-cluster/connecting-frontend-backend 301 +/docs/user-guide/sharing-clusters /docs/tasks/administer-cluster/share-configuration 301 +/docs/user-guide/simple-nginx /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/user-guide/StatefulSet/ /docs/concepts/workloads/controllers/statefulset/ 301 +/docs/user-guide/thirdpartyresources /docs/tasks/access-kubernetes-api/extend-api-third-party-resource 301 +/docs/user-guide/ui /docs/tasks/access-application-cluster/web-ui-dashboard 301 +/docs/user-guide/ui-access/ /docs/tasks/access-application-cluster/web-ui-dashboard/ 301 +/docs/user-guide/update-dem/ /docs/tasks/run-application/rolling-update-replication-controller/ 301 +/docs/user-guide/update-demo /docs/tasks/run-application/rolling-update-replication-controller 301 +/docs/user-guide/volumes /docs/concepts/storage/volumes 301 +/docs/user-guide/working-with-resources /docs/tutorials/object-management-kubectl/object-management 301 +/docs/whatisk8s /docs/concepts/overview/what-is-kubernetes 301 +/gettingstarted/ /docs/home/ 301 +/horizontal-pod-autoscaler/ /docs/tasks/run-application/horizontal-pod-autoscale/ 301 +/kubernetes /docs/ 301 +/kubernetes-bootcamp/* /docs/tutorials/kubernetes-basics/ 301 +/kubernetes/swagger-spec https://github.com/kubernetes/kubernetes/tree/master/api/swagger-spec 301 +/kubernetes/third_party/swagger-ui/ /docs/reference/ 301 +/latest/docs/ /docs/home/ 301 +/resource-quota/ /docs/concepts/policy/resource-quotas/ 301 +/serviceaccount/token /docs/tasks/configure-pod-container/configure-service-account 301 +/swagger-spec/* https://github.com/kubernetes/kubernetes/tree/master/api/swagger-spec/ 301 +/third_party/swagger-ui/* /docs/reference/ 301 +/v1.1/docs/admin/networking.html /docs/concepts/cluster-administration/networking 301 +/v1.1/docs/getting-started-guides /docs/tutorials/kubernetes-basics/ 301 diff --git a/case-studies/index.html b/case-studies/index.html index be88512679a62..72d97413c04a4 100644 --- a/case-studies/index.html +++ b/case-studies/index.html @@ -38,36 +38,6 @@
A collection of users running Kubernetes in production.
Read about Ancestry.com -
- GolfNow -

"If you haven’t come from the Kubernetes world and I tell you this is what I’ve been doing, you wouldn’t believe me."

- - Read about GolfNow -
-
- Pearson -

"We chose Kubernetes because of its flexibility, ease of management and the way it improves our engineers' productivity."

- - Read about Pearson -
-
- Wikimedia -

"With Kubernetes, we're simplifying our environment and making it easier for developers to build the tools that make wikis run better."

- - Read about Wikimedia -
-
- eBay -

Inside eBay's shift to Kubernetes and containers atop OpenStack

- - Read about eBay -
-
- box -

"Kubernetes has the opportunity to be the new cloud platform. Because it’s a never-before-seen level of automation and intelligence surrounding infrastructure."

- - Read about Box -
@@ -75,18 +45,13 @@
A collection of users running Kubernetes in production.
- - - - -
- -

SAP's OpenStack, running on Kubernetes in production

- + +

"Kubernetes has the opportunity to be the new cloud platform. The amount of innovation that's going to come from being able to standardize on Kubernetes as a platform is incredibly exciting - more exciting than anything I've seen in the last 10 years of working on the cloud."

+
-
+
@@ -94,39 +59,42 @@

SAP's OpenStack, running on Kubernetes in production

Kubernetes Users

- New York Times - OpenAI - Goldman Sachs - SAP - Samsung SDS - WePay - SoundCloud - UK Home Office - Concur - Amadeus - Ancestry.com - CCP Games - LivePerson - monzo - Box - Pokemon GO - Yahoo! Japan - Philips - buffer - Comcast - Wikimedia - Pearson - zulily - Ebay - JD.COM - Tell your story + Amadeus + Ancestry.com + box + Buffer + CCP Games + Comcast + Concur + Ebay + Goldman Sachs + GolfNow + JD.COM + LivePerson + monzo + New York Times + OpenAI + peardeck + Pearson + Philips + Pokemon GO + Samsung SDS + SAP + SoundCloud + UK Home Office + WePay + Wink + Wikimedia + Yahoo! Japan + zulily + Tell your story
- - + +
diff --git a/cn/_includes/templates/concept.md b/cn/_includes/templates/concept.md new file mode 100644 index 0000000000000..cfd2c8eae9f2e --- /dev/null +++ b/cn/_includes/templates/concept.md @@ -0,0 +1,32 @@ +{% if overview %} + +{{ overview }} + +{% else %} + +{% include templates/_errorthrower.md missing_block='overview' purpose='provides an overview of this concept.' %} + +{% endif %} + +* TOC +{:toc} + +{% if body %} + +{{ body }} + +{% else %} + +{% include templates/_errorthrower.md missing_block='body' purpose='supplies the body of the page content.' %} + +{% endif %} + + +{% if whatsnext %} + +## 开始下一步 + +{{ whatsnext }} + +{% endif %} + diff --git a/cn/docs/admin/accessing-the-api.md b/cn/docs/admin/accessing-the-api.md new file mode 100644 index 0000000000000..930cf620fd15a --- /dev/null +++ b/cn/docs/admin/accessing-the-api.md @@ -0,0 +1,135 @@ +--- +approvers: +- bgrant0607 +- erictune +- lavalamp +title: Kubernetes API访问控制 +--- + +用户通过 `kubectl`、客户端库或者通过发送REST请求[访问API](/docs/user-guide/accessing-the-cluster)。 用户(自然人)和[Kubernetes服务账户](/docs/tasks/configure-pod-container/configure-service-account/) 都可以被授权进行API访问。 +请求到达API服务器后会经过几个阶段,具体说明如图: + +![Diagram of request handling steps for Kubernetes API request](/images/docs/admin/access-control-overview.svg) + +## 传输层安全 + +在典型的Kubernetes集群中,API通过443端口提供服务。 +API服务器会提供一份证书。 该证书一般是自签名的, 所以用户机器上的 `$USER/.kube/config` 目录通常 +包含该API服务器证书的根证书,用来代替系统默认根证书。 当用户使用 `kube-up.sh` 创建集群时,该证书通常会被自动写入用户的`$USER/.kube/config`。 如果集群中存在多个用户,则创建者需要与其他用户共享证书。 + +## 认证 + +一旦 TLS 连接建立,HTTP请求就进入到了认证的步骤。即图中的步骤 **1** 。 +集群创建脚本或集群管理员会为API服务器配置一个或多个认证模块。 +更具体的认证相关的描述详见 [这里](/docs/admin/authentication/)。 + +认证步骤的输入是整个HTTP请求,但这里通常只是检查请求头和/或客户端证书。 + +认证模块支持客户端证书,密码和Plain Tokens, +Bootstrap Tokens,以及JWT Tokens (用于服务账户)。 + +(管理员)可以同时设置多种认证模块,在设置了多个认证模块的情况下,每个模块会依次尝试认证, +直到其中一个认证成功。 + +在 GCE 平台中,客户端证书,密码和Plain Tokens,Bootstrap Tokens,以及JWT Tokens同时被启用。 + +如果请求认证失败,则请求被拒绝,返回401状态码。 +如果认证成功,则被认证为具体的 `username`,该用户名可供随后的步骤中使用。一些认证模块还提供了用户的组成员关系,另一些则没有。 + +尽管Kubernetes使用 "用户名" 来进行访问控制和请求记录,但它实际上并没有 `user` 对象,也不存储用户名称或其他相关信息。 + +## 授权 + +当请求被认证为来自某个特定的用户后,该请求需要被授权。 即图中的步骤 **2** 。 + +请求须包含请求者的用户名,请求动作,以及该动作影响的对象。 如果存在相应策略,声明该用户具有进行相应操作的权限,则该请求会被授权。 + +例如,如果Bob有如下策略,那么他只能够读取`projectCaribou`命名空间下的pod资源: + +```json +{ + "apiVersion": "abac.authorization.kubernetes.io/v1beta1", + "kind": "Policy", + "spec": { + "user": "bob", + "namespace": "projectCaribou", + "resource": "pods", + "readonly": true + } +} +``` +如果Bob发起以下请求,那么请求能够通过授权,因为Bob被允许访问 `projectCaribou` 命名空间下的对象: + +```json +{ + "apiVersion": "authorization.k8s.io/v1beta1", + "kind": "SubjectAccessReview", + "spec": { + "resourceAttributes": { + "namespace": "projectCaribou", + "verb": "get", + "group": "unicorn.example.org", + "resource": "pods" + } + } +} +``` +如果Bob对 `projectCaribou` 命名空间下的对象发起一个写(`create` 或者 `update`)请求,那么它的授权会被拒绝。 如果Bob请求读取(`get`) 其他命名空间,例如 `projectFish`下的对象,其授权也会被拒绝。 + +Kubernetes的授权要求使用通用的REST属性与现有的组织或云服务提供商的访问控制系统进行交互。 采用REST格式是必要的,因为除Kubernetes外,这些访问控制系统还可能与其他的API进行交互。 + +Kubernetes 支持多种授权模块,例如ABAC模式,RBAC模式和 Webhook模式。 管理员创建集群时,会配置API服务器应用的授权模块。 如果多种授权模式同时被启用,Kubernetes将检查所有模块,如果其中一种通过授权,则请求授权通过。 如果所有的模块全部拒绝,则请求被拒绝(HTTP状态码403)。 + +要了解更多的Kubernetes授权相关信息,包括使用授权模块创建策略的具体说明等,可参考[授权概述](/docs/admin/authorization)。 + + +## 准入控制 + +准入控制模块是能够修改或拒绝请求的软件模块。 +作为授权模块的补充,准入控制模块会访问被创建或更新的对象的内容。 +它们作用于对象的创建,删除,更新和连接 (proxy)阶段,但不包括对象的读取。 + +可以同时配置多个准入控制器,它们会按顺序依次被调用。 + +即图中的步骤 **3** 。 + +与认证和授权模块不同的是,如果任一个准入控制器拒绝请求,那么整个请求会立即被拒绝。 + +除了拒绝请求外,准入控制器还可以为对象设置复杂的默认值。 + +可用的准入控制模块描述 [如下](/docs/admin/admission-controllers/)。 + +一旦请求通过所有准入控制器,将使用对应API对象的验证流程对其进行验证,然后写入对象存储 (如步骤 **4**)。 + + +## API的端口和IP + +上述讨论适用于发送请求到API服务器的安全端口(典型情况)。 +实际上API服务器可以通过两个端口提供服务: + +默认情况下,API服务器在2个端口上提供HTTP服务: + + 1. `Localhost Port`: + + - 用于测试和启动,以及管理节点的其他组件 + (scheduler, controller-manager)与API的交互 + - 没有TLS + - 默认值为8080,可以通过 `--insecure-port` 标记来修改。 + - 默认的IP地址为localhost, 可以通过 `--insecure-bind-address`标记来修改。 + - 请求会 **绕过** 认证和鉴权模块。 + - 请求会被准入控制模块处理。 + - 其访问需要主机访问的权限。 + + 2. `Secure Port`: + + - 尽可能使用该端口访问 + - 应用 TLS。 可以通过 `--tls-cert-file` 设置证书, 通过 `--tls-private-key-file` 设置私钥。 + - 默认值为6443,可以通过 `--secure-port` 标记来修改。 + - 默认IP是首个非本地的网络接口地址,可以通过 `--bind-address` 标记来修改。 + - 请求会经过认证和鉴权模块处理。 + - 请求会被准入控制模块处理。 + - 要求认证和授权模块正常运行。 + +通过 `kube-up.sh`创建集群时, 对 Google Compute Engine (GCE) +和一些其他的云供应商来说, API通过443端口提供服务。 对 +GCE而言,项目上配置了防火墙规则,允许外部的HTTPS请求访问API,其他(厂商的)集群设置方法各不相同。 diff --git a/cn/docs/admin/authorization/abac.md b/cn/docs/admin/authorization/abac.md new file mode 100644 index 0000000000000..1db4103ed8c8c --- /dev/null +++ b/cn/docs/admin/authorization/abac.md @@ -0,0 +1,141 @@ +--- +assignees: +- erictune +- lavalamp +- deads2k +- liggitt +title: ABAC 模式 +--- + +{% capture overview %} + +基于属性的访问控制(Attribute-based access control - ABAC)定义了访问控制范例,其中通过使用将属性组合在一起的策略来向用户授予访问权限。 + +{% endcapture %} + +{% capture body %} + +## 策略文件格式 + +基于 `ABAC` 模式,可以这样指定策略文件 `--authorization-policy-file=SOME_FILENAME`。 + +此文件是 JSON 格式[每行都是一个JSON对象](http://jsonlines.org/),不应存在封闭的列表或映射,每行只有一个映射。 + +每一行都是一个 "策略对象",策略对象是具有以下映射的属性: + + - 版本控制属性: + - `apiVersion`,字符串类型: 有效值为"abac.authorization.kubernetes.io/v1beta1",允许版本控制和转换策略格式。 + - `kind`,字符串类型: 有效值为 "Policy",允许版本控制和转换策略格式。 + - `spec` 配置为具有以下映射的属性: + - 匹配属性: + - `user`,字符串类型; 来自 `--token-auth-file` 的用户字符串,如果你指定`user`,它必须与验证用户的用户名匹配。 + - `group`,字符串类型; 如果指定`group`,它必须与经过身份验证的用户的一个组匹配,`system:authenticated`匹配所有经过身份验证的请求。`system:unauthenticated`匹配所有未经过身份验证的请求。 + - 资源匹配属性: + - `apiGroup`,字符串类型; 一个 API 组。 + - 例: `extensions` + - 通配符: `*`匹配所有 API 组。 + - `namespace`,字符串类型; 一个命名空间。 + - 例如: `kube-system` + - 通配符: `*` 匹配所有资源请求。 + - `resource`,字符串类型; 资源类型。 + - 例:`pods` + - 通配符: `*`匹配所有资源请求。 + - 非资源匹配属性: + - `nonResourcePath`,字符串类型; 非资源请求路径。 + - 例如:`/version`或`/apis` + - 通配符: + - `*` 匹配所有非资源请求。 + - `/foo/*` 匹配`/foo/`的所有子路径。 + - `readonly`,键入 boolean,如果为 true,则表示该策略仅适用于 get,list 和 watch 操作。 + +**注意:** 未设置的属性与类型设置为零值的属性相同(例如空字符串,0、false),然而未知的应该可读性优先。 + +在将来,策略可能以 JSON 格式表示,并通过 REST 界面进行管理。 + +## 授权算法 + +请求具有与策略对象的属性对应的属性。 + +当接收到请求时,确定属性。 未知属性设置为其类型的零值(例如: 空字符串,0,false)。 + +设置为`“*"`的属性将匹配相应属性的任何值。 + +检查属性的元组,以匹配策略文件中的每个策略。 如果至少有一行匹配请求属性,则请求被授权(但可能会在稍后验证失败)。 + +要允许任何经过身份验证的用户执行某些操作,请将策略组属性设置为 `"system:authenticated“`。 + +要允许任何未经身份验证的用户执行某些操作,请将策略组属性设置为`"system:authentication“`。 + +要允许用户执行任何操作,请使用 apiGroup,命名空间, +资源和 nonResourcePath 属性设置为 `“*"`的策略. + +要允许用户执行任何操作,请使用设置为`“*”` 的 apiGroup,namespace,resource 和 nonResourcePath 属性编写策略。 + +## Kubectl + +Kubectl 使用 api-server 的 `/api` 和 `/apis` 端点进行协商客户端/服务器版本。 通过创建/更新来验证发送到API的对象操作,kubectl 查询某些 swagger 资源。 对于API版本"v1", 那就是`/swaggerapi/api/v1` & `/swaggerapi/ experimental/v1`。 + +当使用 ABAC 授权时,这些特殊资源必须明确通过策略中的 `nonResourcePath` 属性暴露出来(参见下面的[例子](#examples)): + +* `/api`,`/api/*`,`/apis`和`/apis/*` 用于 API 版本协商. +* `/version` 通过 `kubectl version` 检索服务器版本. +* `/swaggerapi/*` 用于创建/更新操作. + +要检查涉及到特定kubectl操作的HTTP调用,您可以调整详细程度: + + kubectl --v=8 version + +## 例子 + +1. Alice 可以对所有资源做任何事情: + + ```json + {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user": "alice", "namespace": "*", "resource": "*", "apiGroup": "*"}} + ``` +2. Kubelet 可以读取任何pod: + + ```json + {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user": "kubelet", "namespace": "*", "resource": "pods", "readonly": true}} + ``` +3. Kubelet 可以读写事件: + + ```json + {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user": "kubelet", "namespace": "*", "resource": "events"}} + ``` +4. Bob 可以在命名空间“projectCaribou"中读取 pod: + + ```json + {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user": "bob", "namespace": "projectCaribou", "resource": "pods", "readonly": true}} + ``` +5. 任何人都可以对所有非资源路径进行只读请求: + + ```json + {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group": "system:authenticated", "readonly": true, "nonResourcePath": "*"}} + {"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"group": "system:unauthenticated", "readonly": true, "nonResourcePath": "*"}} + ``` + +[完整文件示例](http://releases.k8s.io/{{page.githubbranch}}/pkg/auth/authorizer/abac/example_policy_file.jsonl) + +## 服务帐户的快速说明 + +服务帐户自动生成用户。 用户名是根据命名约定生成的: + +```shell +system:serviceaccount:: +``` +创建新的命名空间也会导致创建一个新的服务帐户: + +```shell +system:serviceaccount::default +``` + +例如,如果要将 API 的 kube-system 完整权限中的默认服务帐户授予,则可以将此行添加到策略文件中: + +```json +{"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"system:serviceaccount:kube-system:default","namespace":"*","resource":"*","apiGroup":"*"}} +``` + +需要重新启动 apitorver 以获取新的策略行. + +{% endcapture %} +{% include templates/concept.md %} diff --git a/cn/docs/admin/authorization/index.md b/cn/docs/admin/authorization/index.md new file mode 100644 index 0000000000000..fdf788fd1cd42 --- /dev/null +++ b/cn/docs/admin/authorization/index.md @@ -0,0 +1,155 @@ +--- +assignees: +- erictune +- lavalamp +- deads2k +- liggitt +title: 概述 +--- + +{% capture overview %} + +学习有关 Kubernetes 授权的更多信息,包括有关使用支持的授权模块创建策略的详细信息。 + +{% endcapture %} + +{% capture body %} + +在 Kubernetes 里,您必须经过身份验证(登录),才能授权您的请求(授予访问权限).。有关认证的信息,请参阅[访问控制概述](/docs/admin/access-the-api/)。 + +Kubernetes 提供通用的 REST API 请求。这意味着 Kubernetes 授权可以与现有的组织或云提供商的访问控制系统一起使用,该系统可以处理除 Kubernetes API 之外的其他 API。 + +## 确定请求是允许还是被拒绝 +Kubernetes 使用 API​​ 服务器授权 API 请求。它根据所有策略评估所有请求属性,并允许或拒绝请求。某些策略必须允许 API 请求的所有部分继续进行,这意味着默认情况下是拒绝权限。 + +(虽然 Kubernetes 使用 API ​​服务器,访问控制和依赖特定类型对象的特定领域策略由 Admission 控制器处理。) + +当配置多个授权模块时,按顺序检查每个模块,如果有任何模块授权请求,则可以继续执行该请求。如果所有模块拒绝请求,则拒绝该请求(HTTP状态代码403)。 + +## 查看您的请求属性 + +Kubernetes 仅查看以下API请求属性: + +* **user** - 验证期间提供的 `user` 字符串 +* **group** - 认证用户所属的组名列表 +* **“extra"** - 由认证层提供的任意字符串键到字符串值的映射 +* **API** - 指示请求是否用于API资源 +* **Request path** - 诸如`/api`或`/healthz`的其他非资源端点的路径(请参阅[kubectl](#kubectl)). +* **API request verb** - API 动词 `get`,`list`,`create`,`update`,`patch`,`watch`,`proxy`,`redirect`,`delete`和`deletecollection`用于资源请求。要确定资源 API 端点的请求动词,请参阅**确定下面的请求动词**. +* **HTTP request verb** - HTTP动词`get`,`post`,`put`和`delete`用于非资源请求 +* **Resource** - 正在访问的资源的ID或名称(仅适用于资源请求) + --* 对于使用`get`, `update`, `patch`, 和 `delete`动词的资源请求,您必须提供资源名称。 +* **Subresource** - 正在访问的子资源(仅用于资源请求) +* **Namespace** - 正在被访问的对象的命名空间(仅针对命名空间的资源请求) +* **API group** - 正在访问的API组(仅用于资源请求). 一个空字符串指定[核心 API 组](/docs/api/). + +## 确定请求动词 + +要确定资源 API 端点的请求动词,请查看所使用的HTTP动词以及请求是否对单个资源或资源集合进行操作: + +HTTP动词| 请求动词 +---------- | --------------- +POST | 创建 +GET,HEAD | 获取(个人资源),列表(集合) +PUT | 更新 +PATCH | 补丁 +DELETE| 删除(个人资源),删除(收藏) + +Kubernetes 有时会使用专门的动词检查授权以获得额外的权限。例如: + +* [PodSecurityPolicy](/docs/concepts/policy/pod-security-policy/)在`extensions` API组中的`podsecuritypolicies`资源上检查`use`动词的授权。 +* [RBAC](/docs/admin/authorization/rbac/#privilege-escalation-prevention-and-bootstrapping) 在`rbac.authorization.k8s.io` API组中的`roles`和`clusterroles`资源上检查`bind`动词的授权。 +* [认证](/docs/admin/authentication/) 在核心API组中的`users`,`groups`和`serviceaccounts`上的`impersonate`动词的授权以及`authentication.k8s.io` API组中的`userextras`进行层次检查。 + +## 授权模块 +* **ABAC模式** - 基于属性的访问控制(ABAC)定义了访问控制范例,通过使用将属性组合在一起的策略来授予用户访问权限。策略可以使用任何类型的属性(用户属性,资源属性,对象,环境属性等)。要了解有关使用ABAC模式的更多信息,请参阅[ABAC模式](/docs/admin/authorization/abac/) +* **RBAC模式** - 基于角色的访问控制(RBAC)是一种根据企业内个人用户的角色来调整对计算机或网络资源的访问的方法。在这种情况下,访问是单个用户执行特定任务(例如查看,创建或修改文件)的能力。要了解有关使用RBAC模式的更多信息,请参阅[RBAC模式](/docs/admin/authorization/rbac/) +*当指定 "RBAC"(基于角色的访问控制)使用 "rbac.authorization.k8s.io" API组来驱动授权决定时,允许管理员通过Kubernetes API动态配置权限策略. +.. *截至1.6 RBAC模式是测试版. +.. *要启用RBAC,请使用 `--authorization-mode=RBAC` 启动 apiserver. +* **Webhook模式** - WebHook 是HTTP回调:发生事件时发生的HTTP POST; 通过HTTP POST简单的事件通知. 实施 WebHooks 的 Web 应用程序将在某些事情发生时向URL发送消息. 要了解有关使用Webhook模式的更多信息,请参阅[Webhook模式](/docs/admin/authorization/webhook/) +* **自定义模块** - 您可以创建使用Kubernetes的自定义模块. 要了解更多信息,请参阅下面的**自定义模块**。 + +### 自定义模块 +可以相当容易地开发其他实现,APIserver 调用 Authorizer 接口: + +```go +type Authorizer interface { + Authorize(a Attributes) error +} +``` + +以确定是否允许每个API操作. + +授权插件是实现此接口的模块.授权插件代码位于 `pkg/auth/authorizer/$MODULENAME` 中。 + +授权模块可以完全实现,也可以拨出远程授权服务。 授权模块可以实现自己的缓存,以减少具有相同或相似参数的重复授权调用的成本。 开发人员应该考虑缓存和撤销权限之间的交互。 + +#### 检查API访问 + +Kubernetes 将 `subjectaccessreviews.v1.authorization.k8s.io` 资源公开为允许外部访问API授权者决策的普通资源。 无论您选择使用哪个授权器,您都可以使用`SubjectAccessReview`发出一个`POST`,就像webhook授权器的`apis/authorization.k8s.io/v1/subjectaccessreviews` 端点一样,并回复一个响应。 例如: + + +```bash +kubectl create --v=8 -f - << __EOF__ +{ + "apiVersion": "authorization.k8s.io/v1", + "kind": "SubjectAccessReview", + "spec": { + "resourceAttributes": { + "namespace": "kittensandponies", + "verb": "get", + "group": "unicorn.example.org", + "resource": "pods" + }, + "user": "jane", + "group": [ + "group1", + "group2" + ], + "extra": { + "scopes": [ + "openid", + "profile" + ] + } + } +} +__EOF__ + +--- snip lots of output --- + +I0913 08:12:31.362873 27425 request.go:908] Response Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1","metadata":{"creationTimestamp":null},"spec":{"resourceAttributes":{"namespace":"kittensandponies","verb":"GET","group":"unicorn.example.org","resource":"pods"},"user":"jane","group":["group1","group2"],"extra":{"scopes":["openid","profile"]}},"status":{"allowed":true}} +subjectaccessreview "" created +``` + +这对于调试访问问题非常有用,因为您可以使用此资源来确定授权者授予哪些访问权限。 + +## 为您的授权模块使用标志 + +您的策略中必须包含一个标志,以指出您的策略包含哪个授权模块: + +可以使用以下标志: + - `--authorization-mode=ABAC` 基于属性的访问控制(ABAC)模式允许您使用本地文件配置策略。 + - `--authorization-mode=RBAC` 基于角色的访问控制(RBAC)模式允许您使用Kubernetes API创建和存储策略. + - `--authorization-mode=Webhook` WebHook是一种HTTP回调模式,允许您使用远程REST管理授权。 + - `--authorization-mode=AlwaysDeny` 此标志阻止所有请求. 仅使用此标志进行测试。 + - `--authorization-mode=AlwaysAllow` 此标志允许所有请求. 只有在您不需要API请求授权的情况下才能使用此标志。 + +您可以选择多个授权模块. 如果其中一种模式为 `AlwaysAllow`,则覆盖其他模式,并允许所有API请求。 + +## 版本控制 + +对于版本 1.2,配置了 kube-up.sh 创建的集群,以便任何请求都不需要授权。 + +从版本 1.3 开始,配置由 kube-up.sh 创建的集群,使得 ABAC 授权模块处于启用状态。但是,其输入文件最初设置为允许所有用户执行所有操作,集群管理员需要编辑该文件,或者配置不同的授权器来限制用户可以执行的操作。 + +{% endcapture %} +{% capture whatsnext %} + +* 要学习有关身份验证的更多信息,请参阅**身份验证**[控制访问 Kubernetes API](docs/admin/access-the-api/)。 +* 要了解有关入学管理的更多信息,请参阅[使用 Admission 控制器](docs/admin/admission-controllers/)。 +* +{% endcapture %} + +{% include templates/concept.md %} diff --git a/cn/docs/admin/multiple-zones.md b/cn/docs/admin/multiple-zones.md new file mode 100644 index 0000000000000..4eac17c49f682 --- /dev/null +++ b/cn/docs/admin/multiple-zones.md @@ -0,0 +1,292 @@ +--- +approvers: +- jlowdermilk +- justinsb +- quinton-hoole +title: 多区域运行 +--- + +## 介绍 + +Kubernetes 从v1.2开始支持将集群运行在多个故障域中。 +(GCE 中称其为 "区(Zones)", AWS 中称其为 "可用区(Availability Zones)",这里我们也称其为 "区")。 +它是广泛意义上的集群联邦特性的轻量级版本 (之前被称为 ["Ubernetes"](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/design-proposals/federation/federation.md))。 +完整的集群联邦能够将多个分别运行在不同区或云供应商(或本地数据中心)的集群集中管理。 +然而,很多用户只是希望通过将单一云供应商上的Kubernetes集群运行在多个区域,来提高集群的可用性, +这就是1.2版本中提供的对多区域的支持。 +(之前被称为 "Ubernetes Lite")。 + +多区域的支持是有明确限制的: Kubernetes集群能够运行在多个区,但必须在同一个地域内 (云供应商也须一致)。 +目前只有GCE和AWS自动支持 (尽管在其他云甚至裸机上,也很容易通过为节点和卷添加合适的标签来实现类似的支持)。 + + +* TOC +{:toc} + +## 功能 + +节点启动时,Kubelet自动为其添加区信息的标签。 + +在单一区域的集群中,Kubernetes 会自动将副本管理器或服务的pod分布到各节点上 (以减轻单实例故障的影响)。 +在多区域的集群中,这种分布的行为扩展到了区域级别 +(以减少区域故障对整体的影响)。 (通过 `SelectorSpreadPriority` 来实现)。 +这种分发是尽力而为(best-effort)的,所以如果集群在各个区之间是异构的 +(比如,各区间的节点数量不同、节点类型不同、pod的资源需求不同等)可能导致pod无法完全均匀地分布。 +如果需要的话,用户可以使用同质的区(节点数量和节点类型相同)来减少区域之间分配不均匀的可能。 + +当卷被创建时, `PersistentVolumeLabel`准入控制器会自动为其添加区域的标签。 +调度器 (通过 `VolumeZonePredicate` 断言) 会确申领该卷的pod被调度到该卷对应的区域, +因为卷是不支持跨区挂载的。 + +## 限制 + +对多区的支持有一些重要的限制: + +* 我们假设不同的区域间在网络上离得很近,所以我们不做任何的区域感知路由。 特别是,通过服务的网络访问可能跨区域 (即使该服务后端pod的其中一些运行在与客户端相同的区域中),这可能导致额外的延迟和损耗。 + +* 卷的区域亲和性只对 `PersistentVolume`有效。 例如,如果你在pod的spec中直接指定一个EBS的卷,则不会生效。 + +* 集群不支持跨云平台或地域 (这些功能需要完整的集群联邦特性支持)。 + +* 尽管节点位于多区域,目前默认情况下 kube-up 创建的管理节点是单实例的。 所以尽管服务是高可用的,并且能够容忍跨区域的性能损耗,管理平面还是单区域的。 需要高可用的管理平面的用户可以按照 [高可用](/docs/admin/high-availability) 指导来操作。 + +* 目前StatefulSet的卷动态创建时的跨区域分配,与pod的亲和性/反亲和性不兼容。 + +* StatefulSet的名称包含破折号 ("-")时,可能影响到卷在区域间的均匀分布。 + +* 为deployment或pod指定多个PVC时,要求其StorageClass处于同一区域内,否则,相应的PV卷需要在一个区域中静态配置。 另一种方式是使用StatefulSet,这可以确保同一副本所挂载的卷位于同一区内。 + + +## 演练 + +接下来我们将介绍如何同时在 GCE 和 AWS 上创建和使用多区域的集群。 为此,你需要创建一个完整的集群 +(指定 `MULTIZONE=true`),然后再次执行 `kube-up`(指定 `KUBE_USE_EXISTING_MASTER=true`)来添加其他区域的节点。 + +### 创建集群 + +按正常方式创建集群,但是传入 MULTIZONE 来通知集群对多区域进行管理。 在 us-central1-a 区域创建节点。 + +GCE: + +```shell +curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a NUM_NODES=3 bash +``` + +AWS: + +```shell +curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a NUM_NODES=3 bash +``` + +该步骤按正常方式创建了集群,仍然运行在单个区域中。 +但 `MULTIZONE=true` 已经开启了多区域的能力。 + +### 标记节点 + +查看节点,你可以发现节点上打了区域信息的标签。 +节点位于 `us-central1-a` (GCE) 或者 `us-west-2a` (AWS)。 标签 `failure-domain.beta.kubernetes.io/region` 用于区分地域, +标签 `failure-domain.beta.kubernetes.io/zone` 用于区分区域。 + +```shell +> kubectl get nodes --show-labels + + +NAME STATUS AGE VERSION LABELS +kubernetes-master Ready,SchedulingDisabled 6m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master +kubernetes-minion-87j9 Ready 6m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 +kubernetes-minion-9vlv Ready 6m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv +kubernetes-minion-a12q Ready 6m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q +``` + +### 添加其它区中的节点 + +接下来我们复用已有的管理节点,添加运行于其它区域 (us-central1-b或us-west-2b)中的节点。 +再次执行 kube-up, 通过指定 `KUBE_USE_EXISTING_MASTER=true`, +kube-up 不会创建新的管理节点,而是会复用之前创建的。 + +GCE: + +```shell +KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-b NUM_NODES=3 kubernetes/cluster/kube-up.sh +``` + +在 AWS 中我们还需要为新增的子网指定网络CIDR,还有管理节点的内部IP地址。 + +```shell +KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2b NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.1.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh +``` + + +再次查看节点,3个新增的节点已经启动,并被标记为us-central1-b: + +```shell +> kubectl get nodes --show-labels + +NAME STATUS AGE VERSION LABELS +kubernetes-master Ready,SchedulingDisabled 16m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master +kubernetes-minion-281d Ready 2m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d +kubernetes-minion-87j9 Ready 16m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 +kubernetes-minion-9vlv Ready 16m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv +kubernetes-minion-a12q Ready 17m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q +kubernetes-minion-pp2f Ready 2m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-pp2f +kubernetes-minion-wf8i Ready 2m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-wf8i +``` + +### 卷的亲和性 + +使用动态创建卷的功能创建一个卷 (只有PV持久卷才支持区域亲和性): + +```json +kubectl create -f - < kubectl get pv --show-labels +NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE LABELS +pv-gce-mj4gm 5Gi RWO Bound default/claim1 46s failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a +``` + +现在我们将创建使用这些PVC的pod。 +因为 GCE 的PD存储 / AWS 的EBS 卷 不支持跨区域挂载, +这意味着相应的pod只能创建在卷所在的区域中。 + +```yaml +kubectl create -f - < kubectl describe pod mypod | grep Node +Node: kubernetes-minion-9vlv/10.240.0.5 +> kubectl get node kubernetes-minion-9vlv --show-labels +NAME STATUS AGE VERSION LABELS +kubernetes-minion-9vlv Ready 22m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv +``` + +### Pod的跨区域分布 + +副本管理器或服务的pod被自动创建在了不同的区域。 首先,在第三个区域内启动节点: + +GCE: + +```shell +KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-f NUM_NODES=3 kubernetes/cluster/kube-up.sh +``` + +AWS: + +```shell +KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2c NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.2.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh +``` + +验证你现在在3个区域内拥有节点: + +```shell +kubectl get nodes --show-labels +``` + +创建 guestbook-go 示例应用, 它包含一个副本数为3的RC,运行一个简单的网络应用: + +```shell +find kubernetes/examples/guestbook-go/ -name '*.json' | xargs -I {} kubectl create -f {} +``` + +Pod应该分布在全部3个区域上: + +```shell +> kubectl describe pod -l app=guestbook | grep Node +Node: kubernetes-minion-9vlv/10.240.0.5 +Node: kubernetes-minion-281d/10.240.0.8 +Node: kubernetes-minion-olsh/10.240.0.11 + + > kubectl get node kubernetes-minion-9vlv kubernetes-minion-281d kubernetes-minion-olsh --show-labels +NAME STATUS AGE VERSION LABELS +kubernetes-minion-9vlv Ready 34m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv +kubernetes-minion-281d Ready 20m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d +kubernetes-minion-olsh Ready 3m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-f,kubernetes.io/hostname=kubernetes-minion-olsh +``` + + +负载平衡器覆盖集群中的所有区域; guestbook-go 示例包含一个 +负载均衡服务的例子: + +```shell +> kubectl describe service guestbook | grep LoadBalancer.Ingress +LoadBalancer Ingress: 130.211.126.21 + +> ip=130.211.126.21 + +> curl -s http://${ip}:3000/env | grep HOSTNAME + "HOSTNAME": "guestbook-44sep", + +> (for i in `seq 20`; do curl -s http://${ip}:3000/env | grep HOSTNAME; done) | sort | uniq + "HOSTNAME": "guestbook-44sep", + "HOSTNAME": "guestbook-hum5n", + "HOSTNAME": "guestbook-ppm40", +``` + +负载平衡器正确指向了所有的pod,即使它们位于不同的区域内。 + +### 停止集群 + +使用完成后,进行清理: + +GCE: + +```shell +KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-f kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-b kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a kubernetes/cluster/kube-down.sh +``` + +AWS: + +```shell +KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh +``` diff --git a/cn/docs/concepts/cluster-administration/federation.md b/cn/docs/concepts/cluster-administration/federation.md new file mode 100644 index 0000000000000..f9a51dfb6c779 --- /dev/null +++ b/cn/docs/concepts/cluster-administration/federation.md @@ -0,0 +1,116 @@ +--- +title: 联邦 +--- + +{% capture overview %} +本页面阐明了为何以及如何使用联邦创建Kubernetes集群。 +{% endcapture %} + +{% capture body %} +## 为何使用联邦 + +联邦可以使多个集群的管理简单化。它提供了两个主要构件模块: + + * 跨集群同步资源:联邦能够让资源在多个集群中同步。例如,你可以确保在多个集群中存在同样的部署。 + * 跨集群发现:联邦能够在所有集群的后端自动配置DNS服务和负载均衡。例如,通过多个集群的后端,你可以确保全局的VIP或DNS记录可用。 + +联邦技术的其他应用场景: + +* 高可用性:通过跨集群分摊负载,自动配置DNS服务和负载均衡,联邦将集群失败所带来的影响降到最低。 +* 避免供应商锁定:跨集群使迁移应用程序变得更容易,联邦服务避免了供应商锁定。 + + +只有在多个集群的场景下联邦服务才是有帮助的。这里列出了一些你会使用多个集群的原因: + +* 降低延迟:在多个区域含有集群,可使用离用户最近的集群来服务用户,从而最大限度降低延迟。 +* 故障隔离:对于故障隔离,也许有多个小的集群比有一个大的集群要更好一些(例如:一个云供应商的不同可用域里有多个集群)。详细信息请参阅[多集群指南](/docs/admin/multi-cluster)。 +* 可伸缩性:对于单个kubernetes集群是有伸缩性限制的(但对于大多数用户来说并非如此。更多细节参考[Kubernetes扩展和性能目标](https://git.k8s.io/community/sig-scalability/goals.md))。 +* [混合云](#混合云的能力):可以有多个集群,它们分别拥有不同的云供应商或者本地数据中心。 + +### 注意事项 + +虽然联邦有很多吸引人的场景,但这里还是有一些需要关注的事项: + +* 增加网络的带宽和损耗:联邦控制面会监控所有的集群,来确保集群的当前状态与预期一致。那么当这些集群运行在一个或者多个云提供者的不同区域中,则会带来重大的网络损耗。 +* 降低集群的隔离:当联邦控制面中存在一个故障时,会影响所有的集群。把联邦控制面的逻辑降到最小可以缓解这个问题。 无论何时,它都是kubernetes集群里控制面的代表。设计和实现也使其变得更安全,避免多集群运行中断。 +* 完整性:联邦项目相对较新,还不是很成熟。不是所有资源都可用,且很多资源才刚刚开始。[Issue 38893](https://github.com/kubernetes/kubernetes/issues/38893) 列举了一些团队正忙于解决的系统已知问题。 + +### 混合云的能力 + +Kubernetes集群里的联邦包括运行在不同云供应商上的集群(例如,谷歌云、亚马逊),和本地部署的集群(例如,OpenStack)。只需在适当的云供应商和/或位置创建所需的所有集群,并将每个集群的API endpoint和凭据注册到您的联邦API服务中(详情参考[联邦管理指南](/docs/admin/federation/))。 + +在此之后,您的[API资源](#api资源)就可以跨越不同的集群和云供应商。 + +## 建立联邦 + +若要能联合多个集群,首先需要建立一个联邦控制面。参照[安装指南](/docs/tutorials/federation/set-up-cluster-federation-kubefed/) 建立联邦控制面。 + +## API资源 + +控制面建立完成后,就可以开始创建联邦API资源了。 +以下指南详细介绍了一些资源: + +* [Cluster](/docs/tasks/administer-federation/cluster/) +* [ConfigMap](/docs/tasks/administer-federation/configmap/) +* [DaemonSets](/docs/tasks/administer-federation/daemonset/) +* [Deployment](/docs/tasks/administer-federation/deployment/) +* [Events](/docs/tasks/administer-federation/events/) +* [Ingress](/docs/tasks/administer-federation/ingress/) +* [Namespaces](/docs/tasks/administer-federation/namespaces/) +* [ReplicaSets](/docs/tasks/administer-federation/replicaset/) +* [Secrets](/docs/tasks/administer-federation/secret/) +* [Services](/docs/concepts/cluster-administration/federation-service-discovery/) + +[API参考文档](/docs/reference/federation/)列举了联邦API服务支持的所有资源。 + +## 级联删除 + +Kubernetes1.6版本支持联邦资源级联删除。使用级联删除,即当删除联邦控制面的一个资源时,也删除了所有底层集群中的相应资源。 + +当使用REST API时,级联删除功能不是默认开启的。若使用REST API从联邦控制面删除一个资源时,要开启级联删除功能,即需配置选项 `DeleteOptions.orphanDependents=false`。使用`kubectl delete`使级联删除功能默认开启。使用`kubectl delete --cascade=false`禁用级联删除功能。 + +注意:Kubernetes1.5版本开始支持联邦资源子集的级联删除。 + +## 单个集群的范围 + +对于IaaS供应商如谷歌计算引擎或亚马逊网络服务,一个虚拟机存在于一个[域](https://cloud.google.com/compute/docs/zones)或[可用域](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html)中。 +我们建议一个Kubernetes集群里的所有虚机应该在相同的可用域里,因为: + + - 与单一的全局Kubernetes集群对比,该方式有较少的单点故障。 + - 与跨可用域的集群对比,该方式更容易推断单区域集群的可用性属性。 + - 当Kubernetes开发者设计一个系统(例如,对延迟、带宽或相关故障进行假设),他们也会假设所有的机器都在一个单一的数据中心,或者以其他方式紧密相连。 + +每个可用区域里包含多个集群当然是可以的,但是总的来说我们认为集群数越少越好。 +偏爱较少集群数的原因是: + + - 在某些情况下,在一个集群里有更多的节点,可以改进Pods的装箱问题(更少的资源碎片)。 + - 减少操作开销(尽管随着OPS工具和流程的成熟而降低了这块的优势)。 + - 为每个集群的固定资源花费降低开销,例如,使用apiserver的虚拟机(但是在全体集群开销中,中小型集群的开销占比要小的多)。 + +多集群的原因包括: + + - 严格的安全性策略要求隔离一类工作与另一类工作(但是,请参见下面的集群分割)。 + - 测试集群或其他集群软件直至最优的新Kubernetes版本发布。 + +## 选择合适的集群数 + +Kubernetes集群数量选择也许是一个相对静止的选择,因为对其重新审核的情况很少。相比之下,一个集群中的节点数和一个服务中的pods数可能会根据负载和增长频繁变化。 + +选择集群的数量,首先,需要决定哪些区域对于将要运行在Kubernetes上的服务,可以有足够的时间到达所有的终端用户(如果使用内容分发网络,则不需要考虑CDN-hosted内容的延迟需求)。法律问题也可能影响这一点。例如,拥有全球客户群的公司可能会对于在美国、欧盟、亚太和南非地区拥有集群起到决定权。使用`R`代表区域的数量。 + +其次,决定有多少集群在同一时间不可用,而一些仍然可用。使用`U`代表不可用的数量。如果不确定,最好选择1。 + +如果允许负载均衡在集群故障发生时将通信引导到任何区域,那么至少需要较大的`R`或`U + 1`集群。若非如此(例如,若要在集群故障发生时确保所有用户的低延迟),则需要`R * (U + 1)`集群(在每一个`R`区域里都有`U + 1`)。在任何情况下,尝试将每个集群放在不同的区域中。 + +最后,如果你的集群需求超过一个Kubernetes集群推荐的最大节点数,那么你可能需要更多的集群。Kubernetes1.3版本支持多达1000个节点的集群规模。 + +{% endcapture %} + +{% capture whatsnext %} +* 进一步学习[联邦提案](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/design-proposals/federation.md)。 +* 集群联邦参考该[配置指导](/docs/tutorials/federation/set-up-cluster-federation-kubefed/)。 +* 查看[Kubecon2016浅谈联邦](https://www.youtube.com/watch?v=pq9lbkmxpS8) +{% endcapture %} + +{% include templates/concept.md %} + diff --git a/cn/docs/concepts/cluster-administration/sysctl-cluster.md b/cn/docs/concepts/cluster-administration/sysctl-cluster.md new file mode 100644 index 0000000000000..9476750d551fa --- /dev/null +++ b/cn/docs/concepts/cluster-administration/sysctl-cluster.md @@ -0,0 +1,101 @@ +--- +approvers: +- sttts +title: Kubernetes集群中使用Sysctls +--- + +* TOC +{:toc} + +这篇文章描述了如何在Kubernetes集群中使用Sysctls。 + +## 什么是Sysctl? + +在Linux中,Sysctl接口允许管理员在内核运行时修改内核参数。这些可用参数都存在于虚拟进程文件系统中的`/proc/sys/`目录。这些内核参数作用于各种子系统中,例如: + +- 内核 (通用前缀:`kernel.`) +- 网络 (通用前缀:`net.`) +- 虚拟内存 (通用前缀:`vm.`) +- 设备专用 (通用前缀:`dev.`) +- 更多子系统描述见 [Kernel docs](https://www.kernel.org/doc/Documentation/sysctl/README). + +获取所有参数列表,可运行 + +``` +$ sudo sysctl -a +``` + +## 命名空间级vs.节点级Sysctls + +在今天的Linux内核系统中有一些Sysctls是 _命名空间级_ 的。这意味着他们在同节点的不同pod间是可配置成独立的。在kubernetes里,命名空间级是Sysctls的一个必要条件,以使其在一个pod语境里易于理解。 + +以下列出了Sysctls中已知的 _命名空间级_ : + +- `kernel.shm*`(内核中共享内存相关参数), +- `kernel.msg*`(内核中SystemV消息队列相关参数), +- `kernel.sem`(内核中信号量参数), +- `fs.mqueue.*`(内核中POSIX消息队列相关参数), +- `net.*`(内核中网络配置项相关参数)。 + +Sysctls中非命名空间级的被称为 _节点级_ ,其必须由集群管理员手动设置,要么通过节点的底层Linux分布方式(例如,通过 `/etc/sysctls.conf`),亦或在特权容器中使用Daemonset。 + +**注意**: 这是很好的做法,考虑在一个集群里给有特殊sysctl的节点设置为 _污点_ ,并且给他们安排仅需要这些sysctl设置的pods。 建议采用Kubernetes [_污点和容点_ +特征](/docs/user-guide/kubectl/{{page.version}}/#taint) 来实现。 + +## 安全的 vs. 不安全的 Sysctls + +Sysctls被分为 _安全的_ 和 _不安全的_ sysctls。同一节点上的pods间除了适当命名空间命名一个 _安全的_ sysctl,还必须适当的 _隔离_ 。 这意味着给一个pod设置一个 _安全的_ sysctl + +- 不能对相同节点上其他pod产生任何影响 +- 不能对节点的健康造成损害 +- 不能在pod资源限制以外获取更多的CPU和内存资源 + +目前看来,大多数的 _命名空间级_ sysctls 不一定被认为是 _安全的_ 。 + +在Kubernetes 1.4版本中,以下sysctls提供了 _安全的_ 配置: + +- `kernel.shm_rmid_forced`, +- `net.ipv4.ip_local_port_range`, +- `net.ipv4.tcp_syncookies`. + +该列表在未来的Kubernetes版本里还会继续扩充,当kubelet提供更好的隔离机制时。 + +所有 _安全的_ sysctls 都是默认启用的。 + +所有 _不安全的_ sysctls 默认是关闭的,且必须通过每个节点基础上的集群管理手动开启。禁用不安全的sysctls的Pods将会被计划,但不会启动。 + +**警告**: 由于他们的本质是 _不安全的_ ,使用 _不安全的_ sysctls是自担风险的,并且会导致严重的问题,例如容器的错误行为,资源短缺或者是一个节点的完全破损。 + +## 使能不安全的Sysctls + +牢记上面的警告, 在非常特殊的情况下,例如高性能指标或是实时应用程序优化,集群管理员可以允许 _不安全的_ +sysctls。 _不安全的_ sysctls 会打上kubelet标识,在逐节点的基础上被启用,例如: + +```shell +$ kubelet --experimental-allowed-unsafe-sysctls 'kernel.msg*,net.ipv4.route.min_pmtu' ... +``` + +只有 _命名空间级_ sysctls 可以使用该方法启用。 + +## 给Pod配置Sysctls + +在Kubernetes 1.4版本中,sysctl特性是一个alpha API。因此,sysctls被设置为在pods上使用注释。它们适用于同一个pod上的所有容器。 + +这里列举了一个例子, _安全的_ 和 _不安全的_ sysctls使用不同的注释: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: sysctl-example + annotations: + security.alpha.kubernetes.io/sysctls: kernel.shm_rmid_forced=1 + security.alpha.kubernetes.io/unsafe-sysctls: net.ipv4.route.min_pmtu=1000,kernel.msgmax=1 2 3 +spec: + ... +``` + +**注意**: 包含以上规定的 _不安全的_ sysctls的一个Pod, 将无法启动任何不能使这两个 _不安全的_ sysctls明确的节点。 推荐 +_节点级_ sysctls使用 [_容点和污点_ +特征](/docs/user-guide/kubectl/v1.6/#taint) or [taints on nodes](/docs/concepts/configuration/taint-and-toleration/) +来将这些pods分配到正确的nodes上。 diff --git a/cn/docs/concepts/containers/images.md b/cn/docs/concepts/containers/images.md new file mode 100644 index 0000000000000..a50c57b5a0f95 --- /dev/null +++ b/cn/docs/concepts/containers/images.md @@ -0,0 +1,296 @@ +--- +approvers: +- erictune +- thockin +title: 镜像 +--- + +{% capture overview %} + +在Kubernetes pod中引用镜像前,请创建Docker镜像,并将之推送到镜像仓库中。 +容器的“image”属性支持和Docker命令行相同的语法,包括私有仓库和标签。 + +{% endcapture %} + +{:toc} + +{% capture body %} + +## 升级镜像 +默认的镜像拉取策略是“IfNotPresent”,在镜像已经存在的情况下,kubelet将不在去拉取镜像。 +如果总是想要拉取镜像,必须设置拉取策略为“Always”或者设置镜像标签为“:latest”。 + +如果没有指定镜像的标签,它会被假定为“:latest”,同时拉取策略为“Always”。 + +注意应避免使用“:latest”标签,参见 [Best Practices for Configuration](/docs/concepts/configuration/overview/#container-images) 获取更多信息。 + +## 使用私有仓库 + +从私有仓库读取镜像时可能需要密钥。 +凭证可以用以下方式提供: + + - 使用Google Container Registry + - 每个集群分别配置 + - 在Google Compute Engine 或者 Google Container Engine上自动配置 + - 所有的pod都能读取项目的私有仓库 + - 使用 AWS EC2 Container Registry (ECR) + - 使用IAM角色和策略来控制对ECR仓库的访问 + - 自动刷新ECR的登录凭证 + - 使用 Azure Container Registry (ACR) + - 配置节点对私有仓库认证 + - 所有的pod都可以读取已配置的私有仓库 + - 需要集群管理员提供node的配置 + - 提前拉取镜像 + - 所有的pod都可以使用node上缓存的镜像 + - 需要以root进入node操作 + - pod上指定 ImagePullSecrets + - 只有提供了密钥的pod才能接入私有仓库 +下面将详细描述每一项 + + +### 使用 Google Container Registry +Kuberetes运行在Google Compute Engine (GCE)时原生支持[Google ContainerRegistry (GCR)] +(https://cloud.google.com/tools/container-registry/)。如果kubernetes集群运行在GCE +或者Google Container Engine (GKE)上,使用镜像全名(e.g. gcr.io/my_project/image:tag)即可。 + +集群中的所有pod都会有读取这个仓库中镜像的权限。 + +Kubelet将使用实例的Google service account向GCR认证。实例的service account拥有 +`https://www.googleapis.com/auth/devstorage.read_only`,所以它可以从项目的GCR拉取,但不能推送。 + +### 使用 AWS EC2 Container Registry + +当Node是AWS EC2实例时,Kubernetes原生支持[AWS EC2 ContainerRegistry](https://aws.amazon.com/ecr/)。 + +在pod定义中,使用镜像全名即可 (例如 `ACCOUNT.dkr.ecr.REGION.amazonaws.com/imagename:tag`) + +集群中可以创建pod的用户都可以使用ECR中的任意镜像运行pod。 + +Kubelet会获取并且定期刷新ECR的凭证。它需要以下权限 + +- `ecr:GetAuthorizationToken` +- `ecr:BatchCheckLayerAvailability` +- `ecr:GetDownloadUrlForLayer` +- `ecr:GetRepositoryPolicy` +- `ecr:DescribeRepositories` +- `ecr:ListImages` +- `ecr:BatchGetImage` + +要求: + +- 必须使用kubelet 1.2.0及以上版本 +- 如果node在区域A,而镜像仓库在另一个区域B,需要1.3.0及以上版本 +- 区域中必须提供ECR + +诊断 + +- 验证是否满足以上要求 +- 获取工作站的$REGION (例如 `us-west-2`)凭证,使用凭证SSH到主机手动运行docker,检查是否运行 +- 验证kublet是否使用参数`--cloud-provider=aws`运行 +- 检查kubelet日志(例如 `journalctl -u kubelet`),是否有类似的行 + - `plugins.go:56] Registering credential provider: aws-ecr-key` + - `provider.go:91] Refreshing cache for provider: *aws_credentials.ecrProvider` + +### 使用 Azure Container Registry (ACR) +当使用[Azure Container Registry](https://azure.microsoft.com/en-us/services/container-registry/)时,可以使用admin user或者service principal认证。 +任何一种情况,认证都通过标准的Dokcer authentication完成。本指南假设使用[azure-cli](https://github.com/azure/azure-cli) +命令行工具。 + +首先,需要创建仓库并获取凭证,完整的文档请参考 +[Azure container registry documentation](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli)。 + +创建好容器仓库后,可以使用以下凭证登录: + + * `DOCKER_USER` : service principal, or admin username + * `DOCKER_PASSWORD`: service principal password, or admin user password + * `DOCKER_REGISTRY_SERVER`: `${some-registry-name}.azurecr.io` + * `DOCKER_EMAIL`: `${some-email-address}` + +填写以上变量后,就可以 +[configure a Kubernetes Secret and use it to deploy a Pod](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod)。 + + +### 配置Nodes对私有仓库认证 + +**注意:** 如果在Google Container Engine (GKE)上运行集群,每个节点上都会有`.dockercfg`文件,它包含对Google Container Registry的凭证。 +不需要使用以下方法。 + +**注意:** 如果在AWS EC2上运行集群且准备使用EC2 Container Registry (ECR),每个node上的kubelet会管理和更新ECR的登录凭证。不需要使用以下方法。 + +**注意:** 该方法适用于能够对节点进行配置的情况。该方法在GCE及在其它能自动配置节点的云平台上并不适合。 + +Docker将私有仓库的密钥存放在`$HOME/.dockercfg`或`$HOME/.docker/config.json`文件中。Kubelet上,docker会使用root用户`$HOME`路径下的密钥。 + +推荐如下步骤来为node配置私有仓库。以下示例在PC或笔记本电脑中操作 + + 1.对于想要使用的每一种凭证,运行 `docker login [server]`,它会更新`$HOME/.docker/config.json`。 + 1.使用编辑器查看`$HOME/.docker/config.json`,保证文件中包含了想要使用的凭证 + 1.获取node列表,例如 + - 如果使用node名称,`nodes=$(kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}')` + - 如果使用node IP ,`nodes=$(kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}')` + 1.将本地的`.docker/config.json`拷贝到每个节点root用户目录下 + - 例如: `for n in $nodes; do scp ~/.docker/config.json root@$n:/root/.docker/config.json; done` + +创建使用私有仓库的pod来验证,例如: + +```yaml +$ cat < /tmp/private-image-test-1.yaml +apiVersion: v1 +kind: Pod +metadata: + name: private-image-test-1 +spec: + containers: + - name: uses-private-image + image: $PRIVATE_IMAGE_NAME + imagePullPolicy: Always + command: [ "echo", "SUCCESS" ] +EOF +$ kubectl create -f /tmp/private-image-test-1.yaml +pod "private-image-test-1" created +$ +``` + +如果一切正常,一段时间后,可以看到: + +```shell +$ kubectl logs private-image-test-1 +SUCCESS +``` + +如果失败,则可以看到: + +```shell +$ kubectl describe pods/private-image-test-1 | grep "Failed" + Fri, 26 Jun 2015 15:36:13 -0700 Fri, 26 Jun 2015 15:39:13 -0700 19 {kubelet node-i2hq} spec.containers{uses-private-image} failed Failed to pull image "user/privaterepo:v1": Error: image user/privaterepo:v1 not found +``` + + +必须保证集群中所有的节点都有相同的`.docker/config.json`文件。否则,pod会在一些节点上正常运行而在另一些节点上无法启动 +例如,如果使用node自动弹缩,那么每个实例模板都需要包含`.docker/config.json`,或者挂载一个包含这个文件的驱动器。 + +在`.docker/config.json`中配置了私有仓库密钥后,所有pod都会能读取私有仓库中的镜像。 + +**该方法已在6月26日的docker私有仓库和kubernetes v0.19.3上测试通过,其他私有仓库,如quay.io应该也可以运行,但未测试过。** + +### 提前拉取镜像 + +**注意:** 如果在Google Container Engine (GKE)上运行集群,每个节点上都会有`.dockercfg`文件,它包含对Google Container Registry的凭证。 +不需要使用以下方法。 + +**注意:** 该方法适用于能够对节点进行配置的情况。该方法在GCE及在其它能自动配置节点的云平台上并不适合。 + +默认情况下,kubelet会尝试从指定的仓库拉取每一个镜像 +但是,如果容器属性`imagePullPolicy`设置为`IfNotPresent`或者`Never`, +则会使用本地镜像(优先、唯一、分别)。 + +如果依赖提前拉取镜像代替仓库认证, +必须保证集群所有的节点提前拉取的镜像是相同的。 + +可以用于提前载入指定的镜像以提高速度,或者作为私有仓库认证的一种替代方案 + +所有的pod都可以使用node上缓存的镜像 + +### 在pod上指定ImagePullSecrets + +**注意:** GKE,GCE及其他自动创建node的云平台上,推荐使用本方法。 + +Kuberentes支持在pod中指定仓库密钥。 + +#### 使用Docker Config创建Secret + +运行以下命令,将大写字母代替为合适的值 + +```shell +$ kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL +secret "myregistrykey" created. +``` + +如果需要接入多个仓库,可以为每个仓库创建一个secret。 +当为pod拉取镜像时,kubelet会将`imagePullSecrets`合入一个独立虚拟的`.docker/config.json`。 + +Pod只能引用和它相同namespace的ImagePullSecrets, +所以需要为每一个namespace做配置 + +#### 通过kubectl创建secret + +由于某种原因在一个`.docker/config.json`中需要多个项或者需要非上述命令给出的secret,可以[create a secret using +json or yaml](/docs/user-guide/secrets/#creating-a-secret-manually)。 + +请保证: + +- 设置data项的名称为`.dockerconfigjson` +- 使用base64对docker文件编码,并将字符准确黏贴到`data[".dockerconfigjson"]`里 +- 设置`type`为`kubernetes.io/dockerconfigjson` + +示例: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: myregistrykey + namespace: awesomeapps +data: + .dockerconfigjson: UmVhbGx5IHJlYWxseSByZWVlZWVlZWVlZWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGx5eXl5eXl5eXl5eXl5eXl5eXl5eSBsbGxsbGxsbGxsbGxsbG9vb29vb29vb29vb29vb29vb29vb29vb29vb25ubm5ubm5ubm5ubm5ubm5ubm5ubm5ubmdnZ2dnZ2dnZ2dnZ2dnZ2dnZ2cgYXV0aCBrZXlzCg== +type: kubernetes.io/dockerconfigjson +``` + +如果收到错误消息`error: no objects passed to create`,可能是 base64 编码后的字符串非法。 +如果收到错误消息类似`Secret "myregistrykey" is invalid: data[.dockerconfigjson]: invalid value ...`, +说明数据已经解码成功,但是不满足`.docker/config.json`文件的语法。 + +#### 在pod中引用imagePullSecrets + +现在,在创建pod时,可以在pod定义中增加`imagePullSecrets`小节来引用secret + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: foo + namespace: awesomeapps +spec: + containers: + - name: foo + image: janedoe/awesomeapp:v1 + imagePullSecrets: + - name: myregistrykey +``` + +对每一个使用私有仓库的pod,都需要做以上操作。 + +也可以在[serviceAccount](/docs/user-guide/service-accounts) 资源中设置imagePullSecrets自动设置`imagePullSecrets` + +`imagePullSecrets`可以和每个node上的`.docker/config.json`一起使用,他们将共同生效。本方法在Google Container Engine (GKE) +也能正常工作。 + +### 使用场景 + +配置私有仓库有多种方案,以下是一些常用场景和建议的解决方案。 + +1. 集群运行非专有(例如 开源镜像)镜像。镜像不需要隐藏。 + - 使用Docker hub上的公有镜像 + - 无需配置 + - 在GCE/GKE上会自动使用高稳定性和高速的Docker hub的本地mirror +1. 集群运行一些专有镜像,这些镜像对外部公司需要隐藏,对集群用户可见 + - 使用自主的私有[Docker registry](https://docs.docker.com/registry/). + - 可以放置在[Docker Hub](https://hub.docker.com/account/signup/),或者其他地方。 + - 按照上面的描述,在每个节点手动配置.docker/config.json + - 或者,在防火墙内运行一个内置的私有仓库,并开放读取权限 + - 不需要配置Kubenretes + - 或者,在GCE/GKE上时,使用项目的Google Container Registry + - 使用集群自动伸缩比手动配置node工作的更好 + - 或者,在更改集群node配置不方便时,使用`imagePullSecrets` +1. 使用专有镜像的集群,有更严格的访问控制 + - 保证[AlwaysPullImages admission controller](/docs/admin/admission-controllers/#alwayspullimages)开启。否则,所有的pod都可以使用镜像 + - 将敏感数据存储在"Secret"资源中,而不是打包在镜像里 +1. 多租户集群下,每个租户需要自己的私有仓库 + - 保证[AlwaysPullImages admission controller](/docs/admin/admission-controllers/#alwayspullimages)开启。否则,所有租户的所有的pod都可以使用镜像 + - 私有仓库开启认证 + - 为每个租户获取仓库凭证,放置在secret中,并发布到每个租户的namespace下 + - 租户将secret增加到每个namespace下的imagePullSecrets中 + +{% endcapture %} + +{% include templates/concept.md %} diff --git a/cn/docs/concepts/overview/components.md b/cn/docs/concepts/overview/components.md new file mode 100644 index 0000000000000..8275ba4df614c --- /dev/null +++ b/cn/docs/concepts/overview/components.md @@ -0,0 +1,124 @@ +--- +assignees: +- lavalamp +title: Kubernetes 组件 +redirect_from: +- "/docs/admin/cluster-components/" +- "/docs/admin/cluster-components.html" +--- +{% capture overview %} +本文档概述了 Kubernetes 所需的各种二进制组件, 用于提供齐全的功能。 +{% endcapture %} + +{% capture body %} + +## Master 组件 + +Master 组件提供的集群控制。Master 组件对集群做出全局性决策(例如:调度),以及检测和响应集群事件(副本控制器的`replicas`字段不满足时,启动新的副本)。 + +Master 组件可以在集群中的任何节点上运行。然而,为了简单起见,设置脚本通常会启动同一个虚拟机上所有 Master 组件,并且不会在此虚拟机上运行用户容器。请参阅[构建高可用性群集](/docs/admin/high-availability)示例对于多主机 VM 的设置。 + +### API服务器 + +[kube-apiserver](/docs/admin/kube-apiserver)对外暴露了Kubernetes API。它是的 Kubernetes 前端控制层。它被设计为水平扩展,即通过部署更多实例来缩放。请参阅[构建高可用性群集](/docs/admin/high-availability). + +### etcd + +[etcd](/docs/admin/etcd) 用于 Kubernetes 的后端存储。所有集群数据都存储在此处,始终为您的 Kubernetes 集群的 etcd 数据提供备份计划。 + +### kube-controller-manager + +[kube-controller-manager](/docs/admin/kube-controller-manager)运行控制器,它们是处理集群中常规任务的后台线程。逻辑上,每个控制器是一个单独的进程,但为了降低复杂性,它们都被编译成独立的可执行文件,并在单个进程中运行。 + +这些控制器包括: + +* 节点控制器: 当节点移除时,负责注意和响应。 +* 副本控制器: 负责维护系统中每个副本控制器对象正确数量的 Pod。 +* 端点控制器: 填充 端点(Endpoints) 对象(即连接 Services & Pods)。 +* 服务帐户和令牌控制器: 为新的命名空间创建默认帐户和 API 访问令牌. + +### 云控制器管理器-(cloud-controller-manager) + +cloud-controller-manager 是用于与底层云提供商交互的控制器。云控制器管理器二进制是 Kubernetes v1.6 版本中引入的 Alpha 功能。 + +cloud-controller-manager 仅运行云提供商特定的控制器循环。您必须在 kube-controller-manager 中禁用这些控制器循环,您可以通过在启动 kube-controller-manager 时将 `--cloud-provider` 标志设置为`external`来禁用控制器循环。 + +cloud-controller-manager 允许云供应商代码和 Kubernetes 核心彼此独立发展,在以前的版本中,Kubernetes 核心代码依赖于云提供商特定的功能代码。在未来的版本中,云供应商的特定代码应由云供应商自己维护,并与运行 Kubernetes 的云控制器管理器相关联。 + +以下控制器具有云提供商依赖关系: + +* 节点控制器: 用于检查云提供商以确定节点是否在云中停止响应后被删除 +* 路由控制器: 用于在底层云基础架构中设置路由 +* 服务控制器: 用于创建,更新和删除云提供商负载平衡器 +* 数据卷控制器: 用于创建,附加和装载卷,并与云提供商进行交互以协调卷 + +### 调度器 - (kube-scheduler) + +[kube-scheduler](/docs/admin/kube-scheduler)监视没有分配节点的新创建的 Pod,选择一个节点供他们运行。 + +### 插件(addons) + +插件是实现集群功能的 Pod 和 Service。 Pods 可以通过 Deployments,ReplicationControllers 管理。插件对象本身是受命名空间限制的,被创建于 `kube-system` 命名空间。 + +Addon 管理器用于创建和维护附加资源. 有关详细信息,请参阅[here](http://releases.k8s.io/HEAD/cluster/addons). + +#### DNS + +虽然其他插件并不是必需的,但所有 Kubernetes 集群都应该具有[Cluster DNS](/docs/concepts/services-networking/dns-pod-service/),许多示例依赖于它。 + +Cluster DNS 是一个 DNS 服务器,和您部署环境中的其他 DNS 服务器一起工作,为 Kubernetes 服务提供DNS记录。 + +Kubernetes 启动的容器自动将 DNS 服务器包含在 DNS 搜索中。 + +#### 用户界面 + +dashboard 提供了集群状态的只读概述。有关更多信息,请参阅[使用HTTP代理访问 Kubernetes API](/docs/tasks/access-kubernetes-api/http-proxy-access-api/) + + +#### 容器资源监控 + +[容器资源监控](/docs/user-guide/monitoring)将关于容器的一些常见的时间序列度量值保存到一个集中的数据库中,并提供用于浏览这些数据的界面。 + +#### 集群层面日志 + +[集群层面日志](/docs/user-guide/logging/overview) 机制负责将容器的日志数据保存到一个集中的日志存储中,该存储能够提供搜索和浏览接口。 + +## 节点组件 + +节点组件在每个节点上运行,维护运行的 Pod 并提供 Kubernetes 运行时环境。 + +### kubelet + +[kubelet](/docs/admin/kubelet)是主要的节点代理,它监测已分配给其节点的 Pod(通过 apiserver 或通过本地配置文件),提供如下功能: + +* 挂载 Pod 所需要的数据卷(Volume)。 +* 下载 Pod 的 secrets。 +* 通过 Docker 运行(或通过 rkt)运行 Pod 的容器。 +* 周期性的对容器生命周期进行探测。 +* 如果需要,通过创建 *镜像 Pod(Mirror Pod)* 将 Pod 的状态报告回系统的其余部分。 +* 将节点的状态报告回系统的其余部分。 + +### kube-proxy + +[kube-proxy](/docs/admin/kube-proxy)通过维护主机上的网络规则并执行连接转发,实现了Kubernetes服务抽象。 + + +### docker + +Docker 用于运行容器。 + +### rkt + +支持 rkt 运行容器作为 Docker 的试验性替代方案。 + +### supervisord + +supervisord 是一个轻量级的进程监控系统,可以用来保证 kubelet 和 docker 运行。 + +### fluentd + +fluentd 是一个守护进程,它有助于提供[集群层面日志](#cluster-level-logging) 集群层面的日志。 + +{% endcapture %} + +{% include templates/concept.md %} diff --git a/cn/docs/concepts/policy/resource-quotas.md b/cn/docs/concepts/policy/resource-quotas.md new file mode 100644 index 0000000000000..5054cb7f25c49 --- /dev/null +++ b/cn/docs/concepts/policy/resource-quotas.md @@ -0,0 +1,220 @@ +--- +approvers: +- derekwaynecarr +title: 资源配额 +--- + +当多个用户或团队共享具有固定数目节点的集群时,人们会担心有人使用的资源超出应有的份额。 + +资源配额是帮助管理员解决这一问题的工具。 + +资源配额, 通过 `ResourceQuota` 对象来定义, 对每个namespace的资源消耗总量提供限制。 它可以按类型限制namespace下可以创建的对象的数量,也可以限制可被该项目以资源形式消耗的计算资源的总量。 + +资源配额的工作方式如下: + +- 不同的团队在不同的namespace下工作。 目前这是自愿的, 但计划通过ACL (Access Control List 访问控制列表) + 使其变为强制性的。 +- 管理员为每个namespace创建一个或多个资源配额对象。 +- 用户在namespace下创建资源 (pods、 services等),同时配额系统会跟踪使用情况,来确保其不超过 + 资源配额中定义的硬性资源限额。 +- 如果资源的创建或更新违反了配额约束,则请求会失败,并返回 HTTP状态码 `403 FORBIDDEN` ,以及说明违反配额 + 约束的信息。 +- 如果namespace下的计算资源 (如 `cpu` 和 `memory`)的配额被启用,则用户必须为这些资源设定请求值(request) + 和约束值(limit),否则配额系统将拒绝Pod的创建。 + 提示: 可使用 LimitRange 准入控制器来为没有设置计算资源需求的Pod设置默认值。 + 作为示例,请参考 [演练](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/) 来避免这个问题。 + +下面是使用namespace和配额构建策略的示例: + +- 在具有 32 GiB 内存 和 16 核CPU资源的集群中, 允许A团队使用 20 GiB 内存 和 10 核的CPU资源, + 允许B团队使用 10GiB 内存和 4 核的CPU资源, 并且预留 2GiB 内存和 2 核的CPU资源供将来分配。 +- 限制 "testing" namespace使用 1 核CPU资源和 1GiB 内存。 允许 "production" namespace使用任意数量。 + +在集群容量小于各namespace配额总和的情况下,可能存在资源竞争。 Kubernetes采用先到先服务的方式处理这类问题。 + +无论是资源竞争还是配额的变更都不会影响已经创建的资源。 + +## 启用资源配额 + +资源配额的支持在很多Kubernetes版本中是默认开启的。 当 apiserver 的 +`--admission-control=` 参数中包含 `ResourceQuota` 时,资源配额会被启用。 + +当namespace中存在一个 `ResourceQuota` 对象时,该namespace即开始实施资源配额管理。 +一个namespace中最多只应存在一个 `ResourceQuota` 对象 + +## 计算资源配额 + +用户可以对给定namespace下的 [计算资源](/docs/user-guide/compute-resources) 总量进行限制。 + +配额机制所支持的资源类型: + +| 资源名称 | 描述 | +| --------------------- | ----------------------------------------------------------- | +| `cpu` | 所有非终止状态的Pod中,其CPU需求总量不能超过该值。 | +| `limits.cpu` | 所有非终止状态的Pod中,其CPU限额总量不能超过该值。 | +| `limits.memory` | 所有非终止状态的Pod中,其内存限额总量不能超过该值。 | +| `memory` | 所有非终止状态的Pod中,其内存需求总量不能超过该值。 | +| `requests.cpu` | 所有非终止状态的Pod中,其CPU需求总量不能超过该值。 | +| `requests.memory` | 所有非终止状态的Pod中,其内存需求总量不能超过该值。 | + +## 存储资源配额 + +用户可以对给定namespace下的 [存储资源](/docs/user-guide/persistent-volumes) 总量进行限制。 + +此外,还可以根据相关的存储类(Storage Class)来限制存储资源的消耗。 + +| 资源名称 | 描述 | +| --------------------- | ----------------------------------------------------------- | +| `requests.storage` | 所有的PVC中,存储资源的需求不能超过该值。 | +| `persistentvolumeclaims` | namespace中所允许的 [PVC](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) 总量。 | +| `.storageclass.storage.k8s.io/requests.storage` | 所有该storage-class-name相关的PVC中, 存储资源的需求不能超过该值。 | +| `.storageclass.storage.k8s.io/persistentvolumeclaims` | namespace中所允许的该storage-class-name相关的[PVC](/docs/user-guide/persistent-volumes/#persistentvolumeclaims)的总量。 | + +例如,如果一个操作人员针对 "黄金" 存储类型与 "铜" 存储类型设置配额,操作员可以 +定义配额如下: + +* `gold.storageclass.storage.k8s.io/requests.storage: 500Gi` +* `bronze.storageclass.storage.k8s.io/requests.storage: 100Gi` + +## 对象数量配额 + +给定类型的对象数量可以被限制。 支持以下类型: + +| 资源名称 | 描述 | +| ------------------------------- | ------------------------------------------------- | +| `configmaps` | namespace下允许存在的configmap的数量。 | +| `persistentvolumeclaims` | namespace下允许存在的[PVC](/docs/user-guide/persistent-volumes/#persistentvolumeclaims)的数量。 | +| `pods` | namespace下允许存在的非终止状态的pod数量。 如果pod 的 `status.phase 为 Failed 或 Succeeded` , 那么其处于终止状态。 | +| `replicationcontrollers` | namespace下允许存在的replication controllers的数量。 | +| `resourcequotas` | namespace下允许存在的 [resource quotas](/docs/admin/admission-controllers/#resourcequota) 的数量。 | +| `services` | namespace下允许存在的service的数量。 | +| `services.loadbalancers` | namespace下允许存在的load balancer类型的service的数量。 | +| `services.nodeports` | namespace下允许存在的node port类型的service的数量。 | +| `secrets` | namespace下允许存在的secret的数量。 | + +例如 `pods` 配额统计并保证单个namespace下创建 `pods` 的最大数量。 + +用户可能希望在namespace中为pod设置配额,来避免有用户创建很多小的pod,从而耗尽集群提供的pod IP地址。 + +## 配额作用域 + +每个配额都有一组相关的作用域(scope),配额只会对作用域内的资源生效。 + +当一个作用域被添加到配额中后,它会对作用域相关的资源数量作限制。 +如配额中指定了允许(作用域)集合之外的资源,会导致验证错误。 + +| 范围 | 描述 | +| ----- | ----------- | +| `Terminating` | 匹配 `spec.activeDeadlineSeconds >= 0` 的pod。 | +| `NotTerminating` | 匹配 `spec.activeDeadlineSeconds is nil` 的pod。 | +| `BestEffort` | 匹配"尽力而为(best effort)"服务类型的pod。 | +| `NotBestEffort` | 匹配非"尽力而为(best effort)"服务类型的pod。 | + +`BestEffort` 作用域限制配额跟踪以下资源: `pods` + +`Terminating`、 `NotTerminating` 和 `NotBestEffort` 限制配额跟踪以下资源: + +* `cpu` +* `limits.cpu` +* `limits.memory` +* `memory` +* `pods` +* `requests.cpu` +* `requests.memory` + +## 请求/约束 + +分配计算资源时,每个容器可以为CPU或内存指定请求和约束。 +也可以设置两者中的任何一个。 + +如果配额中指定了 `requests.cpu` 或 `requests.memory` 的值,那么它要求每个进来的容器针对这些资源有明确的请求。 如果配额中指定了 `limits.cpu` 或 `limits.memory`的值,那么它要求每个进来的容器针对这些资源指定明确的约束。 + +## 查看和设置配额 + +Kubectl 支持创建、更新和查看配额: + +```shell +$ kubectl create namespace myspace + +$ cat < compute-resources.yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: compute-resources +spec: + hard: + pods: "4" + requests.cpu: "1" + requests.memory: 1Gi + limits.cpu: "2" + limits.memory: 2Gi +EOF +$ kubectl create -f ./compute-resources.yaml --namespace=myspace + +$ cat < object-counts.yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: object-counts +spec: + hard: + configmaps: "10" + persistentvolumeclaims: "4" + replicationcontrollers: "20" + secrets: "10" + services: "10" + services.loadbalancers: "2" +EOF +$ kubectl create -f ./object-counts.yaml --namespace=myspace + +$ kubectl get quota --namespace=myspace +NAME AGE +compute-resources 30s +object-counts 32s + +$ kubectl describe quota compute-resources --namespace=myspace +Name: compute-resources +Namespace: myspace +Resource Used Hard +-------- ---- ---- +limits.cpu 0 2 +limits.memory 0 2Gi +pods 0 4 +requests.cpu 0 1 +requests.memory 0 1Gi + +$ kubectl describe quota object-counts --namespace=myspace +Name: object-counts +Namespace: myspace +Resource Used Hard +-------- ---- ---- +configmaps 0 10 +persistentvolumeclaims 0 4 +replicationcontrollers 0 20 +secrets 1 10 +services 0 10 +services.loadbalancers 0 2 +``` + +## 配额和集群容量 + +配额对象是独立于集群容量的。它们通过绝对的单位来表示。 所以,为集群添加节点, *不会* +自动赋予每个namespace消耗更多资源的能力。 + +有时可能需要更复杂的策略,比如: + + - 在几个团队中按比例划分总的集群资源。 + - 允许每个租户根据需要增加资源使用量,但要有足够的限制以防止意外资源耗尽。 + - 在namespace中添加节点、提高配额的额外需求。 + +这些策略可以基于 ResourceQuota,通过编写一个检测配额使用,并根据其他信号调整各namespace下的配额硬性限制的 "控制器" 来实现。 + +注意:资源配额对集群资源总体进行划分,但它对节点没有限制:来自多个namespace的Pod可能在同一节点上运行。 + +## 示例 + +查看 [如何使用资源配额的详细示例](/docs/tasks/administer-cluster/quota-api-object/)。 + +## 更多信息 + +查看 [资源配额设计文档](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) 了解更多信息。 diff --git a/cn/docs/concepts/services-networking/network-policies.md b/cn/docs/concepts/services-networking/network-policies.md new file mode 100644 index 0000000000000..ae8ccf69ea44b --- /dev/null +++ b/cn/docs/concepts/services-networking/network-policies.md @@ -0,0 +1,104 @@ +--- +approvers: +- thockin +- caseydavenport +- danwinship +title: 网络策略 +--- + +* TOC +{:toc} + +网络策略(NetworkPolicy)是一种关于pod间及pod与其他网络端点间所允许的通信规则的规范。 + +`NetworkPolicy` 资源使用标签选择pod,并定义选定pod所允许的通信规则。 + +## 前提 + +网络策略通过网络插件来实现,所以用户必须使用支持 `NetworkPolicy` 的网络解决方案 - 简单地创建资源对象,而没有控制器来使它生效的话,是没有任何作用的。 + +## 隔离和非隔离的Pod + +默认情况下,Pod是非隔离的,它们接受任何来源的流量。 + +Pod可以通过相关的网络策略进行隔离。一旦命名空间中有网络策略选择了特定的Pod,该Pod会拒绝网络策略所不允许的连接。 (命名空间下其他未被网络策略所选择的Pod会继续接收所有的流量) + +## `NetworkPolicy` 资源 + +通过[api参考](/docs/api-reference/{{page.version}}/#networkpolicy-v1-networking)来了解资源定义。 + +下面是一个 `NetworkPolicy` 的示例: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: test-network-policy + namespace: default +spec: + podSelector: + matchLabels: + role: db + ingress: + - from: + - namespaceSelector: + matchLabels: + project: myproject + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 +``` + +除非选择支持网络策略的网络解决方案,否则将上述示例发送到API服务器没有任何效果。 + +__必填字段__: 与所有其他的Kubernetes配置一样,`NetworkPolicy` 需要 `apiVersion`、 `kind`和 `metadata` 字段。 关于配置文件操作的一般信息,请参考 [这里](/docs/user-guide/simple-yaml)、 [这里](/docs/user-guide/configuring-containers)和 [这里](/docs/user-guide/working-with-resources)。 + +__spec__: `NetworkPolicy` [spec](https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status) 中包含了在一个命名空间中定义特定网络策略所需的所有信息 + +__podSelector__: 每个 `NetworkPolicy` 都包括一个 `podSelector` ,它对该策略所应用的一组Pod进行选择。因为 `NetworkPolicy` 目前只支持定义 `ingress` 规则,这里的 `podSelector` 本质上是为该策略定义 "目标pod" 。示例中的策略选择带有 "role=db" 标签的pod。空的 `podSelector` 选择命名空间下的所有pod。 + +__ingress__: 每个 `NetworkPolicy` 包含一个 `ingress` 规则的白名单列表。 (其中的)规则允许同时匹配 `from` 和 `ports` 部分的流量。示例策略中包含一条简单的规则: 它匹配一个单一的端口,来自两个来源中的一个, 第一个通过 `namespaceSelector` 指定,第二个通过 `podSelector` 指定。 + +所以,示例网络策略: + +1. 隔离 "default" 命名空间下 "role=db" 的pod (如果它们不是已经被隔离的话)。 +2. 允许从 "default" 命名空间下带有 "role=frontend" 标签的pod到 "default" 命名空间下的pod的6379 TCP端口的连接。 +3. 允许从带有 "project=myproject" 标签的命名空间下的任何pod到 "default" 命名空间下的pod的6379 TCP端口的连接。 + +查看 [网络策略入门指南](/docs/getting-started-guides/network-policy/walkthrough) 了解更多示例。 + +## 默认策略 + +用户可以通过创建一个选择所有Pod,但是不允许任何通信的网络策略,来为一个命名空间创建 "默认的" 隔离策略: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny +spec: + podSelector: +``` + +这可以确保即使Pod在未被其他任何网络策略所选择的情况下仍能被隔离。 + +或者,如果用户希望允许一个命名空间下的所有Pod的所有通信 (即使已经添加了策略,使得一些pod被 "隔离"),仍可以创建一个明确允许所有通信的策略: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all +spec: + podSelector: + ingress: + - {} +``` + +## 下一步呢? + +- 查看 [声明网络策略](/docs/tasks/administer-cluster/declare-network-policy/) + 来进行更多的示例演练 diff --git a/cn/docs/tasks/administer-cluster/static-pod.md b/cn/docs/tasks/administer-cluster/static-pod.md new file mode 100644 index 0000000000000..7c32e2d26d487 --- /dev/null +++ b/cn/docs/tasks/administer-cluster/static-pod.md @@ -0,0 +1,125 @@ +--- +approvers: +- jsafrane +title: 静态Pods +--- + +**如果你正在运行Kubernetes集群并且使用静态pods在每个节点上起一个pod,那么最好使用[DaemonSet](/cn/docs/concepts/workloads/controllers/daemonset/)!** + +*静态pods*直接由特定节点上的kubelet进程来管理,不通过主控节点上的API服务器。静态pod不关联任何replicationcontroller,它由kubelet进程自己来监控,当pod崩溃时重启该pod。对于静态pod没有健康检查。静态pod始终绑定在某一个kubelet,并且始终运行在同一个节点上。 + +Kubelet自动为每一个静态pod在Kubernetes的API服务器上创建一个镜像Pod(Mirror Pod),因此可以在API服务器查询到该pod,但是不被API服务器控制(例如不能删除)。 + +## 静态pod创建 + +静态pod有两种创建方式:用配置文件或者通过HTTP。 + +### 配置文件 + +配置文件就是放在特定目录下的标准的JSON或YAML格式的pod定义文件。用`kubelet --pod-manifest-path=`来启动kubelet进程,kubelet将会周期扫描这个目录,根据这个目录下出现或消失的YAML/JSON文件来创建或删除静态pod。 + +下面例子用静态pod的方式启动一个nginx的Web服务器: + +1. 选择一个节点来运行静态pod。这个例子中就是`my-node1`。 + + ``` + [joe@host ~] $ ssh my-node1 + ``` + +2. 选择一个目录,例如/etc/kubelet.d,把web服务器的pod定义文件放在这个目录下,例如`/etc/kubelet.d/static-web.yaml`: + + ``` + [root@my-node1 ~] $ mkdir /etc/kubelet.d/ + [root@my-node1 ~] $ cat </etc/kubelet.d/static-web.yaml + apiVersion: v1 + kind: Pod + metadata: + name: static-web + labels: + role: myrole + spec: + containers: + - name: web + image: nginx + ports: + - name: web + containerPort: 80 + protocol: TCP + EOF + ``` + +3.配置节点上的kubelet使用这个目录,kubelet启动时增加`--pod-manifest-path=/etc/kubelet.d/`参数。如果是Fedora系统,在Kubelet配置文件/etc/kubernetes/kubelet中添加下面这行: + + ``` + KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" + ``` + +如果是其它Linux发行版或者其它Kubernetes安装方式,配置方法可能会不一样。 + +4. 重启kubelet。如果是Fedora系统,就是: + + ``` + [root@my-node1 ~] $ systemctl restart kubelet + ``` + +## 通过HTTP创建静态Pods + +Kubelet周期地从--manifest-url=参数指定的地址下载文件,并且把它翻译成JSON/YAML格式的pod定义。此后的操作方式与--pod-manifest-path=相同,kubelet会不时地重新下载该文件,当文件变化时对应地终止或启动静态pod(如下)。 + +## 静态pods的动作行为 + +kubelet启动时,由`--pod-manifest-path=` or `--manifest-url=`参数指定的目录下定义的所有pod都会自动创建,例如,我们示例中的static-web。 (可能要花些时间拉取nginx镜像,耐心等待...) + +```shell +[joe@my-node1 ~] $ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +f6d05272b57e nginx:latest "nginx" 8 minutes ago Up 8 minutes k8s_web.6f802af4_static-web-fk-node1_default_67e24ed9466ba55986d120c867395f3c_378e5f3c +``` + +如果我们查看Kubernetes的API服务器(运行在主机 `my-master`),可以看到这里创建了一个新的镜像Pod: + +```shell +[joe@host ~] $ ssh my-master +[joe@my-master ~] $ kubectl get pods +NAME READY STATUS RESTARTS AGE +static-web-my-node1 1/1 Running 0 2m +``` + +静态pod的标签会传递给镜像Pod,可以用来过滤或筛选。 + +需要注意的是,我们不能通过API服务器来删除静态pod(例如,通过 [`kubectl`](/docs/user-guide/kubectl/) 命令),kebelet不会删除它。 + +```shell +[joe@my-master ~] $ kubectl delete pod static-web-my-node1 +pods/static-web-my-node1 +[joe@my-master ~] $ kubectl get pods +NAME READY STATUS RESTARTS AGE +static-web-my-node1 1/1 Running 0 12s +``` + +返回`my-node1`主机,我们尝试手动终止容器,可以看到kubelet很快就会自动重启容器。 + +```shell +[joe@host ~] $ ssh my-node1 +[joe@my-node1 ~] $ docker stop f6d05272b57e +[joe@my-node1 ~] $ sleep 20 +[joe@my-node1 ~] $ docker ps +CONTAINER ID IMAGE COMMAND CREATED ... +5b920cbaf8b1 nginx:latest "nginx -g 'daemon of 2 seconds ago ... +``` + +## 静态pods的动态增加和删除 + +运行中的kubelet周期扫描配置的目录(我们这个例子中就是`/etc/kubelet.d`)下文件的变化,当这个目录中有文件出现或消失时创建或删除pods。 + +```shell +[joe@my-node1 ~] $ mv /etc/kubelet.d/static-web.yaml /tmp +[joe@my-node1 ~] $ sleep 20 +[joe@my-node1 ~] $ docker ps +// no nginx container is running +[joe@my-node1 ~] $ mv /tmp/static-web.yaml /etc/kubelet.d/ +[joe@my-node1 ~] $ sleep 20 +[joe@my-node1 ~] $ docker ps +CONTAINER ID IMAGE COMMAND CREATED ... +e7a62e3427f1 nginx:latest "nginx -g 'daemon of 27 seconds ago +``` diff --git a/cn/docs/tasks/debug-application-cluster/debug-application.md b/cn/docs/tasks/debug-application-cluster/debug-application.md new file mode 100644 index 0000000000000..11538f35de87e --- /dev/null +++ b/cn/docs/tasks/debug-application-cluster/debug-application.md @@ -0,0 +1,182 @@ +--- +title: 应用故障排查 +--- + +本指南帮助用户来调试kubernetes上那些没有正常运行的应用。 +本指南*不能*调试集群。如果想调试集群的话,请参阅[这里](/docs/admin/cluster-troubleshooting)。 + +* TOC +{:toc} + +## FAQ + +强烈建议用户参考我们的[FAQ](https://github.com/kubernetes/kubernetes/wiki/User-FAQ). + +## 诊断问题 + +故障排查的第一步是先给问题分下类。这个问题是什么?Pods,Replication Controller或者Service? + + * [Debugging Pods](#debugging-pods) + * [Debugging Replication Controllers](#debugging-replication-controllers) + * [Debugging Services](#debugging-services) + +### Debugging Pods + +调试pod的第一步是看一下这个pod的信息,用如下命令查看一下pod的当前状态和最近的事件: + +```shell +$ kubectl describe pods ${POD_NAME} +``` + +查看一下pod中的容器所处的状态。这些容器的状态都是`Running`吗?最近有没有重启过? + +后面的调试都是要依靠pods的状态的。 + +#### pod停留在pending状态 + +如果一个pod卡在`Pending`状态,则表示这个pod没有被调度到一个节点上。通常这是因为资源不足引起的。 +敲一下`kubectl describe ...`这个命令,输出的信息里面应该有显示为什么没被调度的原因。 +常见原因如下: + +* **资源不足**: +你可能耗尽了集群上所有的CPU和内存,此时,你需要删除pods,调整资源请求,或者增加节点。 +更多信息请参阅[Compute Resources document](/docs/user-guide/compute-resources/#my-pods-are-pending-with-event-message-failedscheduling) + +* **使用了`hostPort`**: +如果绑定一个pod到`hostPort`,那么能创建的pod个数就有限了。 +多数情况下,`hostPort`是非必要的,而应该采用服务来暴露pod。 +如果确实需要使用`hostPort`,那么能创建的pod的数量就是节点的个数。 + + +#### pod停留在waiting状态 + +如果一个pod卡在`Waiting`状态,则表示这个pod已经调试到节点上,但是没有运行起来。 +再次敲一下`kubectl describe ...`这个命令来查看相关信息。 +最常见的原因是拉取镜像失败。可以通过以下三种方式来检查: + +* 使用的镜像名字正确吗? +* 镜像仓库里有没有这个镜像? +* 用`docker pull `命令手动拉下镜像试试。 + +#### pod处于crashing状态或者unhealthy + +首先,看一下容器的log: + +```shell +$ kubectl logs ${POD_NAME} ${CONTAINER_NAME} +``` + +如果容器是crashed的,用如下命令可以看到crash的log: + +```shell +$ kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} +``` + +或者,用`exec`在容器内运行一些命令: + +```shell +$ kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} +``` + +注意:当一个pod内只有一个容器时,可以不带参数`-c ${CONTAINER_NAME}`。 + +例如,名为Cassandra的pod,处于running态,要查看它的log,可运行如下命令: + +```shell +$ kubectl exec cassandra -- cat /var/log/cassandra/system.log +``` + +如果以上方法都不起作用,找到这个pod所在的节点并用SSH登录进去做进一步的分析。 +通常情况下,是不需要在Kubernetes API中再给出另外的工具的。 +因此,如果你发现需要ssh进一个主机来分析问题时,请在GitHub上提一个特性请求,描述一个你的场景并说明为什么已经提供的工具不能满足需求。 + + +#### pod处于running态,但是没有正常工作 + +如果创建的pod不符合预期,那么创建pod的描述文件应该是存在某种错误的,并且这个错误在创建pod时被忽略掉。 +通常pod的定义中,章节被错误的嵌套,或者一个字段名字被写错,都可能会引起被忽略掉。 +例如,希望在pod中用命令行执行某个命令,但是将`command`写成`commnd`,pod虽然可以创建,但命令并没有执行。 + +如何查出来哪里出错? +首先,删掉这个pod再重新创建一个,重创时,像下面这样带着`--validate`这个参数: +`kubectl create --validate -f mypod.yaml`,`command`写成`commnd`的拼写错误就会打印出来了。 + +```shell +I0805 10:43:25.129850 46757 schema.go:126] unknown field: commnd +I0805 10:43:25.129973 46757 schema.go:129] this may be a false alarm, see https://github.com/kubernetes/kubernetes/issues/6842 +pods/mypod +``` + + + +如果上面方法没有看到相关异常的信息,那么接下来就要验证从apiserver获取到的pod是否与期望的一致,比如创建Pod的yaml文件是mypod.yaml。 + +运行如下命令来获取apiserver创建的pod信息并保存成一个文件: +`kubectl get pods/mypod -o yaml > mypod-on-apiserver.yaml`。 + +然后手动对这两个文件进行比较: +apiserver获得的yaml文件中的一些行,不在创建pod的yaml文件内,这是正常的。 +如果创建Pod的yaml文件内的一些行,在piserver获得的yaml文件中不存在,可以说明创建pod的yaml中的定义有问题。 + + +### Debugging Replication Controllers + +RC相当简单。他们要么能创建pod,要么不能。如果不能创建pod,请参阅上述[Debugging Pods](#debugging-pods)。 + +也可以使用`kubectl describe rc ${CONTROLLER_NAME}`命令来监视RC相关的事件。 + +### Debugging Services + +服务提供了多个Pod之间的负载均衡功能。 +有一些常见的问题可以造成服务无法正常工作。以下说明将有助于调试服务的问题。 + +首先,验证服务是否有端点。对于每一个Service对像,apiserver使`endpoints`资源可用。 + +通过如下命令可以查看endpoints资源: + +```shell +$ kubectl get endpoints ${SERVICE_NAME} +``` + +确保endpoints与服务内容器个数一致。 +例如,如果你创建了一个nginx服务,它有3个副本,那么你就会在这个服务的endpoints中看到3个不同的IP地址。 + +#### 服务缺少endpoints + +如果缺少endpoints,请尝试使用服务的labels列出所有的pod。 +假如有一个服务,有如下的label: + +```yaml +... +spec: + - selector: + name: nginx + type: frontend +``` + +你可以使用如下命令列出与selector相匹配的pod,并验证这些pod是否归属于创建的服务: + +```shell +$ kubectl get pods --selector=name=nginx,type=frontend +``` + +如果pod列表附合预期,但是endpoints仍然为空,那么可能没有暴露出正确的端口。 +如果服务指定了`containerPort`,但是列表中的Pod没有列出该端口,则不会将其添加到端口列表。 + +验证该pod的`containerPort`与服务的`containerPort`是否匹配。 + +#### 网络业务不工作 + +如果可以连接到服务上,但是连接立即被断开了,并且在endpoints列表中有endpoints,可能是代理和pods之间不通。 + +确认以下3件事情: + + * Pods工作是否正常? 看一下重启计数,并参阅[Debugging Pods](#debugging-pods); + * 可以直接连接到pod上吗?获取pod的IP地址,然后尝试直接连接到该IP上; + * 应用是否在配置的端口上进行服务?Kubernetes不进行端口重映射,所以如果应用在8080端口上服务,那么`containerPort`字段就需要设定为8080。 + +#### 更多信息 + +如果上述都不能解决你的问题,请按照[Debugging Service document](/docs/user-guide/debugging-services)中的介绍来确保你的`Service`处于running态,有`Endpoints`,`Pods`真正的在服务;你有DNS在工作,安装了iptables规则,kube-proxy也没有异常行为。 + +你也可以访问[troubleshooting document](/docs/troubleshooting/)来获取更多信息。 diff --git a/cn/docs/tasks/inject-data-application/dapi-envars-container.yaml b/cn/docs/tasks/inject-data-application/dapi-envars-container.yaml new file mode 100644 index 0000000000000..8b3b3a39d3c1b --- /dev/null +++ b/cn/docs/tasks/inject-data-application/dapi-envars-container.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-envars-resourcefieldref +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox:1.24 + command: [ "sh", "-c"] + args: + - while true; do + echo -en '\n'; + printenv MY_CPU_REQUEST MY_CPU_LIMIT; + printenv MY_MEM_REQUEST MY_MEM_LIMIT; + sleep 10; + done; + resources: + requests: + memory: "32Mi" + cpu: "125m" + limits: + memory: "64Mi" + cpu: "250m" + env: + - name: MY_CPU_REQUEST + valueFrom: + resourceFieldRef: + containerName: test-container + resource: requests.cpu + - name: MY_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: test-container + resource: limits.cpu + - name: MY_MEM_REQUEST + valueFrom: + resourceFieldRef: + containerName: test-container + resource: requests.memory + - name: MY_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: test-container + resource: limits.memory + restartPolicy: Never diff --git a/cn/docs/tasks/inject-data-application/dapi-envars-pod.yaml b/cn/docs/tasks/inject-data-application/dapi-envars-pod.yaml new file mode 100644 index 0000000000000..00762373b3e89 --- /dev/null +++ b/cn/docs/tasks/inject-data-application/dapi-envars-pod.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-envars-fieldref +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox + command: [ "sh", "-c"] + args: + - while true; do + echo -en '\n'; + printenv MY_NODE_NAME MY_POD_NAME MY_POD_NAMESPACE; + printenv MY_POD_IP MY_POD_SERVICE_ACCOUNT; + sleep 10; + done; + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + restartPolicy: Never diff --git a/cn/docs/tasks/inject-data-application/dapi-volume-resources.yaml b/cn/docs/tasks/inject-data-application/dapi-volume-resources.yaml new file mode 100644 index 0000000000000..65770f283f0cd --- /dev/null +++ b/cn/docs/tasks/inject-data-application/dapi-volume-resources.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kubernetes-downwardapi-volume-example-2 +spec: + containers: + - name: client-container + image: gcr.io/google_containers/busybox:1.24 + command: ["sh", "-c"] + args: + - while true; do + echo -en '\n'; + if [[ -e /etc/cpu_limit ]]; then + echo -en '\n'; cat /etc/cpu_limit; fi; + if [[ -e /etc/cpu_request ]]; then + echo -en '\n'; cat /etc/cpu_request; fi; + if [[ -e /etc/mem_limit ]]; then + echo -en '\n'; cat /etc/mem_limit; fi; + if [[ -e /etc/mem_request ]]; then + echo -en '\n'; cat /etc/mem_request; fi; + sleep 5; + done; + resources: + requests: + memory: "32Mi" + cpu: "125m" + limits: + memory: "64Mi" + cpu: "250m" + volumeMounts: + - name: podinfo + mountPath: /etc + readOnly: false + volumes: + - name: podinfo + downwardAPI: + items: + - path: "cpu_limit" + resourceFieldRef: + containerName: client-container + resource: limits.cpu + - path: "cpu_request" + resourceFieldRef: + containerName: client-container + resource: requests.cpu + - path: "mem_limit" + resourceFieldRef: + containerName: client-container + resource: limits.memory + - path: "mem_request" + resourceFieldRef: + containerName: client-container + resource: requests.memory + diff --git a/cn/docs/tasks/inject-data-application/dapi-volume.yaml b/cn/docs/tasks/inject-data-application/dapi-volume.yaml new file mode 100644 index 0000000000000..7126cefae5be6 --- /dev/null +++ b/cn/docs/tasks/inject-data-application/dapi-volume.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kubernetes-downwardapi-volume-example + labels: + zone: us-est-coast + cluster: test-cluster1 + rack: rack-22 + annotations: + build: two + builder: john-doe +spec: + containers: + - name: client-container + image: gcr.io/google_containers/busybox + command: ["sh", "-c"] + args: + - while true; do + if [[ -e /etc/labels ]]; then + echo -en '\n\n'; cat /etc/labels; fi; + if [[ -e /etc/annotations ]]; then + echo -en '\n\n'; cat /etc/annotations; fi; + sleep 5; + done; + volumeMounts: + - name: podinfo + mountPath: /etc + readOnly: false + volumes: + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "annotations" + fieldRef: + fieldPath: metadata.annotations + diff --git a/cn/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md b/cn/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md new file mode 100644 index 0000000000000..bbb7162888535 --- /dev/null +++ b/cn/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md @@ -0,0 +1,209 @@ +--- +title: 通过文件将Pod信息呈现给容器 +--- + +{% capture overview %} + +此页面描述Pod如何使用DownwardAPIVolumeFile把自己的信息呈现给pod中运行的容器。DownwardAPIVolumeFile可以呈现pod的字段和容器字段。 + +{% endcapture %} + + +{% capture prerequisites %} + +{% include task-tutorial-prereqs.md %} + +{% endcapture %} + +{% capture steps %} + +## Downward API + +有两种方式可以将Pod和Container字段呈现给运行中的容器: + +* [环境变量](/docs/tasks/configure-pod-container/environment-variable-expose-pod-information/) +* DownwardAPIVolumeFile + +这两种呈现Pod和Container字段的方式都称为*Downward API*。 + +## 存储Pod字段 + +在这个练习中,你将创建一个包含一个容器的pod。这是该pod的配置文件: + +{% include code.html language="yaml" file="dapi-volume.yaml" ghlink="/cn/docs/tasks/inject-data-application/dapi-volume.yaml" %} + +在配置文件中,你可以看到Pod有一个`downwardAPI`类型的Volume,并且挂载到容器中的`/etc`。 + +查看`downwardAPI`下面的`items`数组。每个数组元素都是一个[DownwardAPIVolumeFile](/docs/resources-reference/{{page.version}}/#downwardapivolumefile-v1-core)。 +第一个元素指示Pod的`metadata.labels`字段的值保存在名为`labels`的文件中。 +第二个元素指示Pod的`annotations`字段的值保存在名为`annotations`的文件中。 + +**注意:** 本示例中的字段是Pod字段,不是Pod中容器的字段。 +{: .note} + +创建Pod: + +```shell +kubectl create -f https://k8s.io/cn/docs/tasks/inject-data-application/dapi-volume.yaml +``` + +验证Pod中的容器运行正常: + +```shell +kubectl get pods +``` + +查看容器的日志: + +```shell +kubectl logs kubernetes-downwardapi-volume-example +``` + +输出显示`labels`和`annotations`文件的内容: + +```shell +cluster="test-cluster1" +rack="rack-22" +zone="us-est-coast" + +build="two" +builder="john-doe" +``` + +进入Pod中运行的容器,打开一个shell: + +``` +kubectl exec -it kubernetes-downwardapi-volume-example -- sh +``` + +在该shell中,查看`labels`文件: + +```shell +/# cat /etc/labels +``` + +输出显示Pod的所有labels都已写入`labels`文件。 + +```shell +cluster="test-cluster1" +rack="rack-22" +zone="us-est-coast" +``` + +同样,查看`annotations`文件: + +```shell +/# cat /etc/annotations +``` + +查看`/etc`目录下的文件: + +```shell +/# ls -laR /etc +``` + +在输出中可以看到,`labels` 和 `annotations`文件都在一个临时子目录中:这个例子,`..2982_06_02_21_47_53.299460680`。在`/etc`目录中,`..data`是一个指向临时子目录 +的符号链接。`/etc`目录中,`labels` 和 `annotations`也是符号链接。 + +``` +drwxr-xr-x ... Feb 6 21:47 ..2982_06_02_21_47_53.299460680 +lrwxrwxrwx ... Feb 6 21:47 ..data -> ..2982_06_02_21_47_53.299460680 +lrwxrwxrwx ... Feb 6 21:47 annotations -> ..data/annotations +lrwxrwxrwx ... Feb 6 21:47 labels -> ..data/labels + +/etc/..2982_06_02_21_47_53.299460680: +total 8 +-rw-r--r-- ... Feb 6 21:47 annotations +-rw-r--r-- ... Feb 6 21:47 labels +``` + +用符号链接可实现元数据的动态原子刷新;更新将写入一个新的临时目录,然后`..data`符号链接完成原子更新,通过使用[rename(2)](http://man7.org/linux/man-pages/man2/rename.2.html)。 + +退出shell: + +```shell +/# exit +``` + +## 存储容器字段 + +前面的练习中,你将Pod字段保存到DownwardAPIVolumeFile中。接下来这个练习,你将存储容器字段。这里是包含一个容器的pod的配置文件: + +{% include code.html language="yaml" file="dapi-volume-resources.yaml" ghlink="/cn/docs/tasks/inject-data-application/dapi-volume-resources.yaml" %} + +在这个配置文件中,你可以看到Pod有一个`downwardAPI`类型的Volume,并且挂载到容器的`/etc`目录。 + +查看`downwardAPI`下面的`items`数组。每个数组元素都是一个DownwardAPIVolumeFile。 + +第一个元素指定名为`client-container`的容器中`limits.cpu`字段的值应保存在名为`cpu_limit`的文件中。 + +创建Pod: + +```shell +kubectl create -f https://k8s.io/cn/docs/tasks/inject-data-application/dapi-volume-resources.yaml +``` + +进入Pod中运行的容器,打开一个shell: + +``` +kubectl exec -it kubernetes-downwardapi-volume-example-2 -- sh +``` + +在shell中,查看`cpu_limit`文件: + +```shell +/# cat /etc/cpu_limit +``` +你可以使用同样的命令查看`cpu_request`, `mem_limit` 和`mem_request` 文件. + +{% endcapture %} + +{% capture discussion %} + +## Capabilities of the Downward API + +下面这些信息可以通过环境变量和DownwardAPIVolumeFiles提供给容器: + +* 节点名称 +* 节点IP +* Pod名称 +* Pod名字空间 +* Pod IP地址 +* Pod服务帐号名称 +* Pod的UID +* 容器的CPU约束 +* 容器的CPU请求值 +* 容器的内存约束 +* 容器的内存请求值 + +此外,以下信息可通过DownwardAPIVolumeFiles获得: + +* Pod的标签 +* Pod的注释 + +**Note:** 如果容器未指定CPU和memory limits,则Downward API默认为节点可分配值。 +{: .note} + +## 投射密钥到指定路径并且指定文件权限 + +你可以将密钥投射到指定路径并且指定每个文件的访问权限。更多信息,请参阅[Secrets](/docs/concepts/configuration/secret/). + +## Downward API的动机 + +对于容器来说,有时候拥有自己的信息是很有用的,可避免与Kubernetes过度耦合。Downward API使得容器使用自己或者集群的信息,而不必通过Kubernetes客户端或API服务器。 + +一个例子是有一个现有的应用假定要用一个非常熟悉的环境变量来保存一个唯一标识。一种可能是给应用增加处理层,但这样是冗余和易出错的,而且它违反了低耦合的目标。更好的选择是使用Pod名称作为标识,把Pod名称注入这个环境变量中。 +{% endcapture %} + + +{% capture whatsnext %} + +* [PodSpec](/docs/resources-reference/{{page.version}}/#podspec-v1-core) +* [Volume](/docs/resources-reference/{{page.version}}/#volume-v1-core) +* [DownwardAPIVolumeSource](/docs/resources-reference/{{page.version}}/#downwardapivolumesource-v1-core) +* [DownwardAPIVolumeFile](/docs/resources-reference/{{page.version}}/#downwardapivolumefile-v1-core) +* [ResourceFieldSelector](/docs/resources-reference/{{page.version}}/#resourcefieldselector-v1-core) + +{% endcapture %} + +{% include templates/task.md %} diff --git a/cn/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md b/cn/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md new file mode 100644 index 0000000000000..1eb2c074e1b51 --- /dev/null +++ b/cn/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md @@ -0,0 +1,153 @@ +--- +title: 通过环境变量将Pod信息呈现给容器 +--- + +{% capture overview %} + +此页面显示了Pod如何使用环境变量把自己的信息呈现给pod中运行的容器。环境变量可以呈现pod的字段和容器字段。 + +有两种方式可以将Pod和Container字段呈现给运行中的容器: +环境变量 和[DownwardAPIVolumeFiles](/docs/resources-reference/{{page.version}}/#downwardapivolumefile-v1-core). +这两种呈现Pod和Container字段的方式都称为*Downward API*。 + +{% endcapture %} + + +{% capture prerequisites %} + +{% include task-tutorial-prereqs.md %} + +{% endcapture %} + + +{% capture steps %} + +## Downward API + +有两种方式可以将Pod和Container字段呈现给运行中的容器: + +* 环境变量 +* [DownwardAPIVolumeFiles](/docs/resources-reference/{{page.version}}/#downwardapivolumefile-v1-core) + +这两种呈现Pod和Container字段的方式都称为*Downward API*。 + + +## 用Pod字段作为环境变量的值 + +在这个练习中,你将创建一个包含一个容器的pod。这是该pod的配置文件: + +{% include code.html language="yaml" file="dapi-envars-pod.yaml" ghlink="/cn/docs/tasks/inject-data-application/dapi-envars-pod.yaml" %} + +这个配置文件中,你可以看到五个环境变量。`env`字段是一个[EnvVars](/docs/resources-reference/{{page.version}}/#envvar-v1-core)类型的数组。 +数组中第一个元素指定`MY_NODE_NAME`这个环境变量从Pod的`spec.nodeName`字段获取变量值。同样,其它环境变量也是从Pod的字段获取它们的变量值。 + +**注意:** 本示例中的字段是Pod字段,不是Pod中容器的字段。 +{: .note} + +创建Pod: + +```shell +kubectl create -f https://k8s.io/cn/docs/tasks/inject-data-application/dapi-envars-pod.yaml +``` + +验证Pod中的容器运行正常: + +``` +kubectl get pods +``` + +查看容器日志: + +``` +kubectl logs dapi-envars-fieldref +``` + +输出信息显示了所选择的环境变量的值: + +``` +minikube +dapi-envars-fieldref +default +172.17.0.4 +default +``` + +要了解为什么这些值在日志中,请查看配置文件中的`command` 和 `args`字段。 当容器启动时,它将五个环境变量的值写入stdout。每十秒重复执行一次。 + +接下来,进入Pod中运行的容器,打开一个shell: + +``` +kubectl exec -it dapi-envars-fieldref -- sh +``` + +在shell中,查看环境变量: + +``` +/# printenv +``` + +输出信息显示环境变量已经指定为Pod的字段的值。 + +``` +MY_POD_SERVICE_ACCOUNT=default +... +MY_POD_NAMESPACE=default +MY_POD_IP=172.17.0.4 +... +MY_NODE_NAME=minikube +... +MY_POD_NAME=dapi-envars-fieldref +``` + +## 用容器字段作为环境变量的值 + +前面的练习中,你将Pod字段作为环境变量的值。接下来这个练习,你将用容器字段作为环境变量的值。这里是包含一个容器的pod的配置文件: + +{% include code.html language="yaml" file="dapi-envars-container.yaml" ghlink="/cn/docs/tasks/inject-data-application/dapi-envars-container.yaml" %} + +这个配置文件中,你可以看到四个环境变量。`env`字段是一个[EnvVars](/docs/resources-reference/{{page.version}}/#envvar-v1-core) +类型的数组。数组中第一个元素指定`MY_CPU_REQUEST`这个环境变量从容器的`requests.cpu`字段获取变量值。同样,其它环境变量也是从容器的字段获取它们的变量值。 + +创建Pod: + +```shell +kubectl create -f https://k8s.io/cn/docs/tasks/inject-data-application/dapi-envars-container.yaml +``` + +验证Pod中的容器运行正常: + +``` +kubectl get pods +``` + +查看容器日志: + +``` +kubectl logs dapi-envars-resourcefieldref +``` + +输出信息显示了所选择的环境变量的值: + +``` +1 +1 +33554432 +67108864 +``` + +{% endcapture %} + +{% capture whatsnext %} + +* [给容器定义环境变量](/docs/tasks/configure-pod-container/define-environment-variable-container/) +* [PodSpec](/docs/resources-reference/{{page.version}}/#podspec-v1-core) +* [Container](/docs/resources-reference/{{page.version}}/#container-v1-core) +* [EnvVar](/docs/resources-reference/{{page.version}}/#envvar-v1-core) +* [EnvVarSource](/docs/resources-reference/{{page.version}}/#envvarsource-v1-core) +* [ObjectFieldSelector](/docs/resources-reference/{{page.version}}/#objectfieldselector-v1-core) +* [ResourceFieldSelector](/docs/resources-reference/{{page.version}}/#resourcefieldselector-v1-core) + +{% endcapture %} + + +{% include templates/task.md %} diff --git a/cn/docs/tutorials/stateful-application/basic-stateful-set.md b/cn/docs/tutorials/stateful-application/basic-stateful-set.md index 3c45196561118..7290344cf329c 100644 --- a/cn/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/cn/docs/tutorials/stateful-application/basic-stateful-set.md @@ -532,7 +532,7 @@ web-2 gcr.io/google_containers/nginx-slim:0.7 Patch `web` StatefulSet 来执行 `RollingUpdate` 更新策略。 ```shell -kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate"}}} +kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate"}}}' statefulset "web" patched ``` diff --git a/cn/docs/user-guide/kubectl-overview.md b/cn/docs/user-guide/kubectl-overview.md index ef90b69247aa6..704999df096c7 100644 --- a/cn/docs/user-guide/kubectl-overview.md +++ b/cn/docs/user-guide/kubectl-overview.md @@ -2,7 +2,7 @@ approvers: - bgrant0607 - hw-qiaolei -title:kubectl概述 +title: kubectl概述 --- kubectl是用于针对Kubernetes集群运行命令的命令行接口。本概述涵盖`kubectl`语法,描述命令操作,并提供常见的示例。有关每个命令的详细信息,包括所有支持的flags和子命令,请参考[kubectl](/docs/user-guide/kubectl)相关文档。有关安装说明,请参阅[安装kubectl](/docs/tasks/kubectl/install/)。 @@ -22,19 +22,19 @@ kubectl [command] [TYPE] [NAME] [flags] $ kubectl get pod pod1 $ kubectl get pods pod1 $ kubectl get po pod1 - + `NAME`:指定资源的名称。名称区分大小写。如果省略名称,则会显示所有资源的详细信息,比如`$ kubectl get pods`。 在多个资源上执行操作时,可以按类型和名称指定每个资源,或指定一个或多个文件: * 按类型和名称指定资源: - + * 要分组资源,如果它们都是相同的类型:`TYPE1 name1 name2 name<#>`.
例: `$ kubectl get pod example-pod1 example-pod2` * 要分别指定多种资源类型: `TYPE1/name1 TYPE1/name2 TYPE2/name3 TYPE<#>/name<#>`.
例: `$ kubectl get pod/example-pod1 replicationcontroller/example-rc1` - + 使用一个或多个文件指定资源: `-f file1 -f file2 -f file<#>` 使用[YAML而不是JSON](/docs/concepts/configuration/overview/#general-config-tips),因为YAML往往更加用户友好,特别是对于配置文件。
例:$ kubectl get pod -f ./pod.yaml @@ -286,4 +286,4 @@ $ kubectl logs -f ## 下一步 -开始使用[kubectl](/docs/user-guide/kubectl)命令。 \ No newline at end of file +开始使用[kubectl](/docs/user-guide/kubectl)命令。 diff --git a/docs/admin/accessing-the-api.md b/docs/admin/accessing-the-api.md index 03447f2488940..a844b0f5fdbd7 100644 --- a/docs/admin/accessing-the-api.md +++ b/docs/admin/accessing-the-api.md @@ -97,7 +97,7 @@ Kubernetes authorization requires that you use common REST attributes to interac Kubernetes supports multiple authorization modules, such as ABAC mode, RBAC Mode, and Webhook mode. When an administrator creates a cluster, they configured the authorization modules that should be used in the API server. If more than one authorization modules are configured, Kubernetes checks each module, and if any module authorizes the request, then the request can proceed. If all of the modules deny the request, then the request is denied (HTTP status code 403). -To learn more about Kubernetes authorization, including details about creating policies using the supported authorization modules, see [Authorization Overview](/docs/admin/authorization). +To learn more about Kubernetes authorization, including details about creating policies using the supported authorization modules, see [Authorization Overview](/docs/admin/authorization/). ## Admission Control diff --git a/docs/admin/admission-controllers.md b/docs/admin/admission-controllers.md index a5deca8ad5d26..05229844adcc1 100644 --- a/docs/admin/admission-controllers.md +++ b/docs/admin/admission-controllers.md @@ -71,7 +71,7 @@ class is marked as default, it rejects any creation of `PersistentVolumeClaim` w must revisit `StorageClass` objects and mark only one as default. This plugin ignores any `PersistentVolumeClaim` updates; it acts only on creation. -See [persistent volume](/docs/user-guide/persistent-volumes) documentation about persistent volume claims and +See [persistent volume](/docs/concepts/storage/persistent-volumes/) documentation about persistent volume claims and storage classes and how to mark a storage class as default. ### DefaultTolerationSeconds diff --git a/docs/admin/apparmor/deny-write.profile b/docs/admin/apparmor/deny-write.profile deleted file mode 100644 index c2653c7112865..0000000000000 --- a/docs/admin/apparmor/deny-write.profile +++ /dev/null @@ -1,10 +0,0 @@ -#include - -profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { - #include - - file, - - # Deny all file writes. - deny /** w, -} diff --git a/docs/admin/apparmor/hello-apparmor-pod.yaml b/docs/admin/apparmor/hello-apparmor-pod.yaml deleted file mode 100644 index 3e9b3b2a9c6be..0000000000000 --- a/docs/admin/apparmor/hello-apparmor-pod.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: hello-apparmor - annotations: - # Tell Kubernetes to apply the AppArmor profile "k8s-apparmor-example-deny-write". - # Note that this is ignored if the Kubernetes node is not running version 1.4 or greater. - container.apparmor.security.beta.kubernetes.io/hello: localhost/k8s-apparmor-example-deny-write -spec: - containers: - - name: hello - image: busybox - command: [ "sh", "-c", "echo 'Hello AppArmor!' && sleep 1h" ] diff --git a/docs/admin/authentication.md b/docs/admin/authentication.md index a0ab2d9a89c67..10f3ce64dd5f9 100644 --- a/docs/admin/authentication.md +++ b/docs/admin/authentication.md @@ -435,7 +435,7 @@ the authentication webhook queries the remote service with a review object containing the token. Kubernetes will not challenge a request that lacks such a header. -Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/api/) +Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/concepts/overview/kubernetes-api/) as other Kubernetes API objects. Implementers should be aware of looser compatibility promises for beta objects and check the "apiVersion" field of the request to ensure correct deserialization. Additionally, the API server must diff --git a/docs/admin/authorization/index.md b/docs/admin/authorization/index.md index 7b51bedc0df95..54ccdc63507a6 100644 --- a/docs/admin/authorization/index.md +++ b/docs/admin/authorization/index.md @@ -37,7 +37,7 @@ Kubernetes reviews only the following API request attributes: --* For resource requests using `get`, `update`, `patch`, and `delete` verbs, you must provide the resource name. * **Subresource** - The subresource that is being accessed (for resource requests only). * **Namespace** - The namespace of the object that is being accessed (for namespaced resource requests only). - * **API group** - The API group being accessed (for resource requests only). An empty string designates the [core API group](/docs/api/). + * **API group** - The API group being accessed (for resource requests only). An empty string designates the [core API group](/docs/concepts/overview/kubernetes-api/). ## Determine the Request Verb To determine the request verb for a resource API endpoint, review the HTTP verb used and whether or not the request acts on an individual resource or a collection of resources: diff --git a/docs/admin/authorization/rbac.md b/docs/admin/authorization/rbac.md index 56260e13d9461..a7d17a9265ac5 100644 --- a/docs/admin/authorization/rbac.md +++ b/docs/admin/authorization/rbac.md @@ -521,7 +521,7 @@ This is commonly used by add-on API servers for unified authentication and autho system:persistent-volume-provisioner None -Allows access to the resources required by most dynamic volume provisioners. +Allows access to the resources required by most dynamic volume provisioners. diff --git a/docs/admin/authorization/webhook.md b/docs/admin/authorization/webhook.md index b88fef6357f3a..8a807bc35bbbc 100644 --- a/docs/admin/authorization/webhook.md +++ b/docs/admin/authorization/webhook.md @@ -58,7 +58,7 @@ action. This object contains fields describing the user attempting to make the request, and either details about the resource being accessed or requests attributes. -Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/api/) +Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/concepts/overview/kubernetes-api/) as other Kubernetes API objects. Implementers should be aware of looser compatibility promises for beta objects and check the "apiVersion" field of the request to ensure correct deserialization. Additionally, the API Server must diff --git a/docs/admin/federation/index.md b/docs/admin/federation/index.md index ecdcca87d974b..d4c524a9261a6 100644 --- a/docs/admin/federation/index.md +++ b/docs/admin/federation/index.md @@ -134,7 +134,7 @@ existing Kubernetes cluster. It also starts a [`type: LoadBalancer`](/docs/concepts/services-networking/service/#type-loadbalancer) [`Service`](/docs/concepts/services-networking/service/) for the `federation-apiserver` and a -[`PVC`](/docs/concepts/storage/persistent-volumes/) backed +[`PVC`](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims/) backed by a dynamically provisioned [`PV`](/docs/concepts/storage/persistent-volumes/) for `etcd`. All these components are created in the `federation` namespace. diff --git a/docs/api-reference/apps/v1beta1/definitions.html b/docs/api-reference/apps/v1beta1/definitions.html index 2f15ecf9070e7..7043dfcba4b78 100755 --- a/docs/api-reference/apps/v1beta1/definitions.html +++ b/docs/api-reference/apps/v1beta1/definitions.html @@ -367,7 +367,7 @@

v1.PersistentVolumeClaimSpec

accessModes

-

AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1

+

AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1

false

v1.PersistentVolumeAccessMode array

@@ -381,7 +381,7 @@

v1.PersistentVolumeClaimSpec

resources

-

Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources

+

Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources

false

v1.ResourceRequirements

@@ -1160,7 +1160,7 @@

v1.Container

resources

-

Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources

+

Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources

false

v1.ResourceRequirements

@@ -1896,14 +1896,14 @@

v1.PersistentVolumeClaim

spec

-

Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

false

v1.PersistentVolumeClaimSpec

status

-

Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

false

v1.PersistentVolumeClaimStatus

@@ -2006,7 +2006,7 @@

v1.PersistentVolumeClaimVolumeSou

claimName

-

ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

true

string

@@ -2194,7 +2194,7 @@

v1.PersistentVolumeClaimStatus

accessModes

-

AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1

+

AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1

false

v1.PersistentVolumeAccessMode array

@@ -2988,7 +2988,7 @@

v1.Volume

persistentVolumeClaim

-

PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

false

v1.PersistentVolumeClaimVolumeSource

diff --git a/docs/api-reference/batch/v1/definitions.html b/docs/api-reference/batch/v1/definitions.html index 50f6f28e449bb..20ecb4cf031d1 100755 --- a/docs/api-reference/batch/v1/definitions.html +++ b/docs/api-reference/batch/v1/definitions.html @@ -1066,7 +1066,7 @@

v1.Container

resources

-

Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources

+

Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources

false

v1.ResourceRequirements

@@ -1857,7 +1857,7 @@

v1.PersistentVolumeClaimVolumeSou

claimName

-

ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

true

string

@@ -2977,7 +2977,7 @@

v1.Volume

persistentVolumeClaim

-

PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

false

v1.PersistentVolumeClaimVolumeSource

diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index 262b7aed95ca1..7830bf56664e4 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -1938,7 +1938,7 @@

v1.PersistentVolumeClaimVolumeSou

claimName

-

ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

true

string

@@ -2749,7 +2749,7 @@

v1.Volume

persistentVolumeClaim

-

PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims

+

PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims

false

v1.PersistentVolumeClaimVolumeSource

@@ -4993,7 +4993,7 @@

v1.Container

resources

-

Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources

+

Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources

false

v1.ResourceRequirements

diff --git a/docs/api-reference/v1.5/index.html b/docs/api-reference/v1.5/index.html index 71b333af828c7..de6a3b57dacce 100644 --- a/docs/api-reference/v1.5/index.html +++ b/docs/api-reference/v1.5/index.html @@ -179,7 +179,7 @@

Container v1

resources
ResourceRequirements -Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources securityContext
SecurityContext @@ -23125,11 +23125,11 @@

PersistentVolumeClaim v1

spec
PersistentVolumeClaimSpec -Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims status
PersistentVolumeClaimStatus -Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims @@ -23147,11 +23147,11 @@

PersistentVolumeClaimSpec v1

accessModes
string array -AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 resources
ResourceRequirements -Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources selector
LabelSelector @@ -23177,7 +23177,7 @@

PersistentVolumeClaimStatus v1

accessModes
string array -AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 capacity
object @@ -23204,7 +23204,7 @@

PersistentVolumeClaimList v1

items
PersistentVolumeClaim array -A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +A list of persistent volume claims. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims kind
string @@ -25413,7 +25413,7 @@

Volume v1

persistentVolumeClaim
PersistentVolumeClaimVolumeSource -PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -36717,7 +36717,7 @@

NodeStatus v1

capacity
object -Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. +Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity for more details. conditions
NodeCondition array @@ -39039,7 +39039,7 @@

PersistentVolume v1

-

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes

+

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/

@@ -39065,11 +39065,11 @@

PersistentVolume v1

spec
PersistentVolumeSpec -Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes status
PersistentVolumeStatus -Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes @@ -39087,7 +39087,7 @@

PersistentVolumeSpec v1

accessModes
string array -AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes +AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes awsElasticBlockStore
AWSElasticBlockStoreVolumeSource @@ -39103,7 +39103,7 @@

PersistentVolumeSpec v1

capacity
object -A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity +A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity cephfs
CephFSVolumeSource @@ -39115,7 +39115,7 @@

PersistentVolumeSpec v1

claimRef
ObjectReference -ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding +ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#binding fc
FCVolumeSource @@ -39151,7 +39151,7 @@

PersistentVolumeSpec v1

persistentVolumeReclaimPolicy
string -What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy +What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#recycling-policy photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -39189,7 +39189,7 @@

PersistentVolumeStatus v1

phase
string -Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase +Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#phase reason
string @@ -39212,7 +39212,7 @@

PersistentVolumeList v1

items
PersistentVolume array -List of persistent volumes. More info: http://kubernetes.io/docs/user-guide/persistent-volumes +List of persistent volumes. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/ kind
string @@ -50004,7 +50004,7 @@

PersistentVolumeClaimVolumeSource claimName
string -ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims readOnly
boolean diff --git a/docs/api-reference/v1.6/index.html b/docs/api-reference/v1.6/index.html index 64322a85620c0..37d5d7f9c26af 100644 --- a/docs/api-reference/v1.6/index.html +++ b/docs/api-reference/v1.6/index.html @@ -183,7 +183,7 @@

Container v1 core

resources
ResourceRequirements -Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources securityContext
SecurityContext @@ -23025,11 +23025,11 @@

PersistentVolumeClaim v1 core

spec
PersistentVolumeClaimSpec -Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims status
PersistentVolumeClaimStatus -Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims @@ -23047,11 +23047,11 @@

PersistentVolumeClaimSpec v1 core

accessModes
string array -AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 resources
ResourceRequirements -Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources selector
LabelSelector @@ -23059,7 +23059,7 @@

PersistentVolumeClaimSpec v1 core

storageClassName
string -Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 +Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 volumeName
string @@ -23081,7 +23081,7 @@

PersistentVolumeClaimStatus v1 core accessModes
string array -AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 capacity
object @@ -23108,7 +23108,7 @@

PersistentVolumeClaimList v1 core

items
PersistentVolumeClaim array -A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +A list of persistent volume claims. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims kind
string @@ -25328,7 +25328,7 @@

Volume v1 core

persistentVolumeClaim
PersistentVolumeClaimVolumeSource -PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -38720,7 +38720,7 @@

NodeStatus v1 core

capacity
object -Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. +Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity for more details. conditions
NodeCondition array @@ -41046,7 +41046,7 @@

PersistentVolume v1 core

-

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes

+

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/

@@ -41072,11 +41072,11 @@

PersistentVolume v1 core

spec
PersistentVolumeSpec -Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes status
PersistentVolumeStatus -Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes @@ -41094,7 +41094,7 @@

PersistentVolumeSpec v1 core

accessModes
string array -AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes +AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes awsElasticBlockStore
AWSElasticBlockStoreVolumeSource @@ -41110,7 +41110,7 @@

PersistentVolumeSpec v1 core

capacity
object -A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity +A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity cephfs
CephFSVolumeSource @@ -41122,7 +41122,7 @@

PersistentVolumeSpec v1 core

claimRef
ObjectReference -ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding +ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#binding fc
FCVolumeSource @@ -41158,7 +41158,7 @@

PersistentVolumeSpec v1 core

persistentVolumeReclaimPolicy
string -What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy +What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#recycling-policy photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -41208,7 +41208,7 @@

PersistentVolumeStatus v1 core

phase
string -Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase +Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#phase reason
string @@ -41231,7 +41231,7 @@

PersistentVolumeList v1 core

items
PersistentVolume array -List of persistent volumes. More info: http://kubernetes.io/docs/user-guide/persistent-volumes +List of persistent volumes. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/ kind
string @@ -52657,7 +52657,7 @@

PersistentVolumeClaimVolumeSo claimName
string -ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims readOnly
boolean diff --git a/docs/concepts/api-extension/custom-resources.md b/docs/concepts/api-extension/custom-resources.md index dce7da2b157ff..aded0427a533d 100644 --- a/docs/concepts/api-extension/custom-resources.md +++ b/docs/concepts/api-extension/custom-resources.md @@ -53,8 +53,12 @@ This frees you from writing your own API server to handle the custom resource, but the generic nature of the implementation means you have less flexibility than with [API server aggregation](#api-server-aggregation). -CRD is the successor to the deprecated *ThirdPartyResource* (TPR) API, and is available as of -Kubernetes 1.7. +Refer to the [Custom Resource Example](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/apiextensions-apiserver/examples/client-go) +for a demonstration of how to register a new custom resource, work with instances of your new resource type, +and setup a controller to handle events. + +**Note:** CRD is the successor to the deprecated *ThirdPartyResource* (TPR) API, and is available as of Kubernetes 1.7. +{: .note} ## API server aggregation diff --git a/docs/concepts/cluster-administration/cloud-providers.md b/docs/concepts/cluster-administration/cloud-providers.md index 4c0034efb9108..e31c0acf23fff 100644 --- a/docs/concepts/cluster-administration/cloud-providers.md +++ b/docs/concepts/cluster-administration/cloud-providers.md @@ -13,7 +13,7 @@ This section describes all the possible configurations which can be used when running Kubernetes on Amazon Web Services. ## Load Balancers -You can setup [external load balancers](/docs/tasks/access-application-cluster/create-external-load-balancer) +You can setup [external load balancers](/docs/tasks/access-application-cluster/create-external-load-balancer/) to use specific features in AWS by configuring the annotations as shown below. ```yaml diff --git a/docs/concepts/cluster-administration/cluster-administration-overview.md b/docs/concepts/cluster-administration/cluster-administration-overview.md index 9f6a9018c077c..97c07725e361f 100644 --- a/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -18,23 +18,23 @@ See the guides in [Picking the Right Solution](/docs/setup/pick-right-solution/) Before choosing a guide, here are some considerations: - Do you just want to try out Kubernetes on your computer, or do you want to build a high-availability, multi-node cluster? Choose distros best suited for your needs. - - **If you are designing for high-availability**, learn about configuring [clusters in multiple zones](/docs/admin/multi-cluster). + - **If you are designing for high-availability**, learn about configuring [clusters in multiple zones](/docs/admin/multi-cluster/). - Will you be using **a hosted Kubernetes cluster**, such as [Google Container Engine (GKE)](https://cloud.google.com/container-engine/), or **hosting your own cluster**? - Will your cluster be **on-premises**, or **in the cloud (IaaS)**? Kubernetes does not directly support hybrid clusters. Instead, you can set up multiple clusters. - - **If you are configuring Kubernetes on-premises**, consider which [networking model](/docs/admin/networking) fits best. One option for custom networking is [*OpenVSwitch GRE/VxLAN networking*](/docs/admin/ovs-networking/), which uses OpenVSwitch to set up networking between pods across Kubernetes nodes. + - **If you are configuring Kubernetes on-premises**, consider which [networking model](/docs/admin/networking/) fits best. One option for custom networking is [*OpenVSwitch GRE/VxLAN networking*](/docs/admin/ovs-networking/), which uses OpenVSwitch to set up networking between pods across Kubernetes nodes. - Will you be running Kubernetes on **"bare metal" hardware** or on **virtual machines (VMs)**? - Do you **just want to run a cluster**, or do you expect to do **active development of Kubernetes project code**? If the latter, choose a actively-developed distro. Some distros only use binary releases, but offer a greater variety of choices. - - Familiarize yourself with the [components](/docs/admin/cluster-components) needed to run a cluster. + - Familiarize yourself with the [components](/docs/admin/cluster-components/) needed to run a cluster. Note: Not all distros are actively maintained. Choose distros which have been tested with a recent version of Kubernetes. -If you are using a guide involving Salt, see [Configuring Kubernetes with Salt](/docs/admin/salt). +If you are using a guide involving Salt, see [Configuring Kubernetes with Salt](/docs/admin/salt/). ## Managing a cluster -* [Managing a cluster](/docs/concepts/cluster-administration/cluster-management/) describes several topics related to the lifecycle of a cluster: creating a new cluster, upgrading your cluster’s master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a running cluster.. +* [Managing a cluster](/docs/concepts/cluster-administration/cluster-management/) describes several topics related to the lifecycle of a cluster: creating a new cluster, upgrading your cluster’s master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a running cluster. * Learn how to [manage nodes](/docs/concepts/nodes/node/). @@ -44,20 +44,20 @@ If you are using a guide involving Salt, see [Configuring Kubernetes with Salt]( * [Kubernetes Container Environment](/docs/concepts/containers/container-environment-variables/) describes the environment for Kubelet managed containers on a Kubernetes node. -* [Controlling Access to the Kubernetes API](/docs/admin/accessing-the-api) describes how to set up permissions for users and service accounts. +* [Controlling Access to the Kubernetes API](/docs/admin/accessing-the-api/) describes how to set up permissions for users and service accounts. -* [Authenticating](/docs/admin/authentication) explains authentication in Kubernetes, including the various authentication options. +* [Authenticating](/docs/admin/authentication/) explains authentication in Kubernetes, including the various authentication options. -* [Authorization](/docs/admin/authorization) is separate from authentication, and controls how HTTP calls are handled. +* [Authorization](/docs/admin/authorization/) is separate from authentication, and controls how HTTP calls are handled. -* [Using Admission Controllers](/docs/admin/admission-controllers) explains plug-ins which intercepts requests to the Kubernetes API server after authentication and authorization. +* [Using Admission Controllers](/docs/admin/admission-controllers/) explains plug-ins which intercepts requests to the Kubernetes API server after authentication and authorization. * [Using Sysctls in a Kubernetes Cluster](/docs/concepts/cluster-administration/sysctl-cluster/) describes to an administrator how to use the `sysctl` command-line tool to set kernel parameters . * [Auditing](/docs/tasks/debug-application-cluster/audit/) describes how to interact with Kubernetes' audit logs. ### Securing the kubelet - * [Master-Node communication](/docs/concepts/cluster-administration/master-node-communication/) + * [Master-Node communication](/docs/concepts/architecture/master-node-communication/) * [TLS bootstrapping](/docs/admin/kubelet-tls-bootstrapping/) * [Kubelet authentication/authorization](/docs/admin/kubelet-authentication-authorization/) diff --git a/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/docs/concepts/cluster-administration/kubelet-garbage-collection.md index 0a1036cd69ca1..068ee6bd2ab0c 100644 --- a/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -72,4 +72,4 @@ Including: | `--low-diskspace-threshold-mb` | `--eviction-hard` or `eviction-soft` | eviction generalizes disk thresholds to other resources | | `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | eviction generalizes disk pressure transition to other resources | -See [Configuring Out Of Resource Handling](/docs/concepts/cluster-administration/out-of-resource/) for more details. +See [Configuring Out Of Resource Handling](/docs/tasks/administer-cluster/out-of-resource/) for more details. diff --git a/docs/concepts/cluster-administration/manage-deployment.md b/docs/concepts/cluster-administration/manage-deployment.md index 4a946071255b8..c89990c04421b 100644 --- a/docs/concepts/cluster-administration/manage-deployment.md +++ b/docs/concepts/cluster-administration/manage-deployment.md @@ -256,7 +256,7 @@ my-nginx-2035384211-u3t6x 1/1 Running 0 23m fe This outputs all "app=nginx" pods, with an additional label column of pods' tier (specified with `-L` or `--label-columns`). -For more information, please see [labels](/docs/user-guide/labels/) and [kubectl label](/docs/user-guide/kubectl/{{page.version}}/#label) document. +For more information, please see [labels](/docs/concepts/overview/working-with-objects/labels/) and [kubectl label](/docs/user-guide/kubectl/{{page.version}}/#label) document. ## Updating annotations diff --git a/docs/concepts/cluster-administration/proxies.md b/docs/concepts/cluster-administration/proxies.md index 41e29d6cef799..13f73e8bbac82 100644 --- a/docs/concepts/cluster-administration/proxies.md +++ b/docs/concepts/cluster-administration/proxies.md @@ -27,7 +27,7 @@ There are several different proxies you may encounter when using Kubernetes: - proxy to target may use HTTP or HTTPS as chosen by proxy using available information - can be used to reach a Node, Pod, or Service - does load balancing when used to reach a Service - 1. The [kube proxy](/docs/user-guide/services/#ips-and-vips): + 1. The [kube proxy](/docs/concepts/services-networking/service/#ips-and-vips): - runs on each node - proxies UDP and TCP - does not understand HTTP diff --git a/docs/concepts/configuration/manage-compute-resources-container.md b/docs/concepts/configuration/manage-compute-resources-container.md index 2b5c67b02215e..475d8590441bf 100644 --- a/docs/concepts/configuration/manage-compute-resources-container.md +++ b/docs/concepts/configuration/manage-compute-resources-container.md @@ -26,7 +26,7 @@ CPU and memory are collectively referred to as *compute resources*, or just *resources*. Compute resources are measurable quantities that can be requested, allocated, and consumed. They are distinct from -[API resources](/docs/api/). API resources, such as Pods and +[API resources](/docs/concepts/overview/kubernetes-api/). API resources, such as Pods and [Services](/docs/user-guide/services) are objects that can be read and modified through the Kubernetes API server. diff --git a/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index 78a63da117d35..3709914bd5ec2 100644 --- a/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -37,16 +37,20 @@ in a variety of ways. For example: - Administrators might have sets of certificates that they provide to individual users. With kubeconfig files, you can organize your clusters, users, and namespaces. -And you can define contexts that enable users to quickly and easily switch between +You can also define contexts to quickly and easily switch between clusters and namespaces. ## Context -A kubeconfig file can have *context* elements. Each context is a triple -(cluster, namespace, user). You can use `kubectl config use-context` to set -the current context. The `kubectl` command-line tool communicates with the -cluster and namespace listed in the current context. And it uses the -credentials of the user listed in the current context. +A *context* element in a kubeconfig file is used to group access parameters +under a convenient name. Each context has three parameters: cluster, namespace, and user. +By default, the `kubectl` command-line tool uses parameters from +the *current context* to communicate with the cluster. + +To choose the current context: +``` +kubectl config use-context +``` ## The KUBECONFIG environment variable diff --git a/docs/concepts/configuration/overview.md b/docs/concepts/configuration/overview.md index 3690f36ea6e35..61149cd3cad93 100644 --- a/docs/concepts/configuration/overview.md +++ b/docs/concepts/configuration/overview.md @@ -50,15 +50,15 @@ This is a living document. If you think of something that is not on this list bu If you only need access to the port for debugging purposes, you can use the [kubectl proxy and apiserver proxy](/docs/tasks/access-kubernetes-api/http-proxy-access-api/) or [kubectl port-forward](/docs/tasks/access-application-cluster/port-forward-access-application-cluster/). You can use a [Service](/docs/concepts/services-networking/service/) object for external service access. - If you explicitly need to expose a pod's port on the host machine, consider using a [NodePort](/docs/user-guide/services/#type-nodeport) service before resorting to `hostPort`. + If you explicitly need to expose a pod's port on the host machine, consider using a [NodePort](/docs/concepts/services-networking/service/#type-nodeport) service before resorting to `hostPort`. - Avoid using `hostNetwork`, for the same reasons as `hostPort`. -- Use _headless services_ for easy service discovery when you don't need kube-proxy load balancing. See [headless services](/docs/user-guide/services/#headless-services). +- Use _headless services_ for easy service discovery when you don't need kube-proxy load balancing. See [headless services](/docs/concepts/services-networking/service/#headless-services). ## Using Labels -- Define and use [labels](/docs/user-guide/labels/) that identify __semantic attributes__ of your application or deployment. For example, instead of attaching a label to a set of pods to explicitly represent some service (For example, `service: myservice`), or explicitly representing the replication controller managing the pods (for example, `controller: mycontroller`), attach labels that identify semantic attributes, such as `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. This will let you select the object groups appropriate to the context— for example, a service for all "tier: frontend" pods, or all "test" phase components of app "myapp". See the [guestbook](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/guestbook/) app for an example of this approach. +- Define and use [labels](/docs/concepts/overview/working-with-objects/labels/) that identify __semantic attributes__ of your application or deployment. For example, instead of attaching a label to a set of pods to explicitly represent some service (For example, `service: myservice`), or explicitly representing the replication controller managing the pods (for example, `controller: mycontroller`), attach labels that identify semantic attributes, such as `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. This will let you select the object groups appropriate to the context— for example, a service for all "tier: frontend" pods, or all "test" phase components of app "myapp". See the [guestbook](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/guestbook/) app for an example of this approach. A service can be made to span multiple deployments, such as is done across [rolling updates](/docs/tasks/run-application/rolling-update-replication-controller/), by simply omitting release-specific labels from its selector, rather than updating a service's selector to match the replication controller's selector fully. @@ -84,7 +84,7 @@ This is a living document. If you think of something that is not on this list bu - Use `kubectl delete` rather than `stop`. `Delete` has a superset of the functionality of `stop`, and `stop` is deprecated. -- Use kubectl bulk operations (via files and/or labels) for get and delete. See [label selectors](/docs/user-guide/labels/#label-selectors) and [using labels effectively](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively). +- Use kubectl bulk operations (via files and/or labels) for get and delete. See [label selectors](/docs/concepts/overview/working-with-objects/labels/#label-selectors) and [using labels effectively](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively). - Use `kubectl run` and `expose` to quickly create and expose single container Deployments. See the [quick start guide](/docs/user-guide/quick-start/) for an example. diff --git a/docs/concepts/containers/container-environment-variables.md b/docs/concepts/containers/container-environment-variables.md index 07f2e2caba383..513b09cb46f22 100644 --- a/docs/concepts/containers/container-environment-variables.md +++ b/docs/concepts/containers/container-environment-variables.md @@ -19,7 +19,7 @@ This page describes the resources available to Containers in the Container envir The Kubernetes Container environment provides several important resources to Containers: -* A filesystem, which is a combination of an [image](/docs/concepts/containers/images) and one or more [volumes](/docs/concepts/storage/volumes). +* A filesystem, which is a combination of an [image](/docs/concepts/containers/images/) and one or more [volumes](/docs/concepts/storage/volumes/). * Information about the Container itself. * Information about other objects in the cluster. @@ -31,7 +31,7 @@ It is available through the `hostname` command or the function call in libc. The Pod name and namespace are available as environment variables through the -[downward API](/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information). +[downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/). User defined environment variables from the Pod definition are also available to the Container, as are any environment variables specified statically in the Docker image. diff --git a/docs/concepts/containers/images.md b/docs/concepts/containers/images.md index 6d06187dbb972..6519f42a2e282 100644 --- a/docs/concepts/containers/images.md +++ b/docs/concepts/containers/images.md @@ -25,7 +25,7 @@ you can do one of the following: - set the `imagePullPolicy` of the container to `Always`; - use `:latest` as the tag for the image to use; -- enable the [AllwaysPullImages](/docs/admin/admission-controllers/#alwayspullimages) admission controller. +- enable the [AlwaysPullImages](/docs/admin/admission-controllers/#alwayspullimages) admission controller. If you did not specify tag of your image, it will be assumed as `:latest`, with pull image policy of `Always` correspondingly. diff --git a/docs/concepts/overview/components.md b/docs/concepts/overview/components.md index 3cf64483ca1a8..52fcd42175367 100644 --- a/docs/concepts/overview/components.md +++ b/docs/concepts/overview/components.md @@ -18,20 +18,20 @@ cluster (for example, scheduling), and detecting and responding to cluster event Master components can be run on any node in the cluster. However, for simplicity, set up scripts typically start all master components on the same VM, and do not run user containers on this VM. See -[Building High-Availability Clusters](/docs/admin/high-availability) for an example multi-master-VM setup. +[Building High-Availability Clusters](/docs/admin/high-availability/) for an example multi-master-VM setup. ### kube-apiserver -[kube-apiserver](/docs/admin/kube-apiserver) exposes the Kubernetes API. It is the front-end for the -Kubernetes control plane. It is designed to scale horizontally -- that is, it scales by deploying more instances. See [Building High-Availability Clusters](/docs/admin/high-availability). +[kube-apiserver](/docs/admin/kube-apiserver/) exposes the Kubernetes API. It is the front-end for the +Kubernetes control plane. It is designed to scale horizontally -- that is, it scales by deploying more instances. See [Building High-Availability Clusters](/docs/admin/high-availability/). ### etcd -[etcd](/docs/tasks/administer-cluster/configure-upgrade-etcd) is used as Kubernetes' backing store. All cluster data is stored here. Always have a backup plan for etcd's data for your Kubernetes cluster. +[etcd](/docs/tasks/administer-cluster/configure-upgrade-etcd/) is used as Kubernetes' backing store. All cluster data is stored here. Always have a backup plan for etcd's data for your Kubernetes cluster. ### kube-controller-manager -[kube-controller-manager](/docs/admin/kube-controller-manager) runs controllers, which are the background threads that handle routine tasks in the cluster. Logically, each controller is a separate process, but to reduce complexity, they are all compiled into a single binary and run in a single process. +[kube-controller-manager](/docs/admin/kube-controller-manager/) runs controllers, which are the background threads that handle routine tasks in the cluster. Logically, each controller is a separate process, but to reduce complexity, they are all compiled into a single binary and run in a single process. These controllers include: @@ -58,7 +58,7 @@ The following controllers have cloud provider dependencies: ### kube-scheduler -[kube-scheduler](/docs/admin/kube-scheduler) watches newly created pods that have no node assigned, and +[kube-scheduler](/docs/admin/kube-scheduler/) watches newly created pods that have no node assigned, and selects a node for them to run on. ### addons @@ -84,12 +84,12 @@ Containers started by Kubernetes automatically include this DNS server in their #### Container Resource Monitoring -[Container Resource Monitoring](/docs/tasks/debug-application-cluster/resource-usage-monitoring) records generic time-series metrics +[Container Resource Monitoring](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) records generic time-series metrics about containers in a central database, and provides a UI for browsing that data. #### Cluster-level Logging -A [Cluster-level logging](/docs/concepts/cluster-administration/logging) mechanism is responsible for +A [Cluster-level logging](/docs/concepts/cluster-administration/logging/) mechanism is responsible for saving container logs to a central log store with search/browsing interface. ## Node components @@ -98,7 +98,7 @@ Node components run on every node, maintaining running pods and providing the Ku ### kubelet -[kubelet](/docs/admin/kubelet) is the primary node agent. It watches for pods that have been assigned to its node (either by apiserver or via local configuration file) and: +[kubelet](/docs/admin/kubelet/) is the primary node agent. It watches for pods that have been assigned to its node (either by apiserver or via local configuration file) and: * Mounts the pod's required volumes. * Downloads the pod's secrets. @@ -109,7 +109,7 @@ Node components run on every node, maintaining running pods and providing the Ku ### kube-proxy -[kube-proxy](/docs/admin/kube-proxy) enables the Kubernetes service abstraction by maintaining +[kube-proxy](/docs/admin/kube-proxy/) enables the Kubernetes service abstraction by maintaining network rules on the host and performing connection forwarding. diff --git a/docs/concepts/overview/kubernetes-api.md b/docs/concepts/overview/kubernetes-api.md index a6d38679a59d6..b74650ff8db42 100644 --- a/docs/concepts/overview/kubernetes-api.md +++ b/docs/concepts/overview/kubernetes-api.md @@ -18,7 +18,7 @@ Kubernetes itself is decomposed into multiple components, which interact through ## API changes -In our experience, any system that is successful needs to grow and change as new use cases emerge or existing ones change. Therefore, we expect the Kubernetes API to continuously change and grow. However, we intend to not break compatibility with existing clients, for an extended period of time. In general, new API resources and new resource fields can be expected to be added frequently. Elimination of resources or fields will require following a deprecation process. The precise deprecation policy for eliminating features is TBD, but once we reach our 1.0 milestone, there will be a specific policy. +In our experience, any system that is successful needs to grow and change as new use cases emerge or existing ones change. Therefore, we expect the Kubernetes API to continuously change and grow. However, we intend to not break compatibility with existing clients, for an extended period of time. In general, new API resources and new resource fields can be expected to be added frequently. Elimination of resources or fields will require following the [API deprecation policy](https://kubernetes.io/docs/reference/deprecation-policy/). What constitutes a compatible change and how to change the API are detailed by the [API change document](https://git.k8s.io/community/contributors/devel/api_changes.md). diff --git a/docs/concepts/overview/what-is-kubernetes.md b/docs/concepts/overview/what-is-kubernetes.md index 0596b10b266a4..93cff0b445464 100644 --- a/docs/concepts/overview/what-is-kubernetes.md +++ b/docs/concepts/overview/what-is-kubernetes.md @@ -93,7 +93,7 @@ Even though Kubernetes provides a lot of functionality, there are always new sce Additionally, the [Kubernetes control plane](/docs/concepts/overview/components/) is built upon the same [APIs](/docs/reference/api-overview/) that are available to developers and users. Users can write their own controllers, such as [schedulers](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/devel/scheduler.md), with [their own APIs](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/design-proposals/api-machinery/extending-api.md) that can be targeted by a general-purpose [command-line tool](/docs/user-guide/kubectl-overview/). -This [design](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/design-proposals/architecture/principles.md) has enabled a number of other systems to build atop Kubernetes. +This [design](https://git.k8s.io/community/contributors/design-proposals/architecture/principles.md) has enabled a number of other systems to build atop Kubernetes. #### What Kubernetes is not diff --git a/docs/concepts/overview/working-with-objects/annotations.md b/docs/concepts/overview/working-with-objects/annotations.md index 2bb89e17e5a50..e0b844325328c 100644 --- a/docs/concepts/overview/working-with-objects/annotations.md +++ b/docs/concepts/overview/working-with-objects/annotations.md @@ -55,7 +55,7 @@ and the like. {% endcapture %} {% capture whatsnext %} -Learn more about [Labels and Selectors](/docs/user-guide/labels/). +Learn more about [Labels and Selectors](/docs/concepts/overview/working-with-objects/labels/). {% endcapture %} {% include templates/concept.md %} diff --git a/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/docs/concepts/overview/working-with-objects/kubernetes-objects.md index b462fb5f0b7e3..bb8358f617643 100644 --- a/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -56,7 +56,7 @@ In the `.yaml` file for the Kubernetes object you want to create, you'll need to * `kind` - What kind of object you want to create * `metadata` - Data that helps uniquely identify the object, including a `name` string, UID, and optional `namespace` -You'll also need to provide the object `spec` field. The precise format of the object `spec` is different for every Kubernetes object, and contains nested fields specific to that object. The [Kubernetes API reference](/docs/api/) can help you find the spec format for all of the objects you can create using Kubernetes. +You'll also need to provide the object `spec` field. The precise format of the object `spec` is different for every Kubernetes object, and contains nested fields specific to that object. The [Kubernetes API reference](/docs/concepts/overview/kubernetes-api/) can help you find the spec format for all of the objects you can create using Kubernetes. {% endcapture %} diff --git a/docs/concepts/policy/pod-security-policy.md b/docs/concepts/policy/pod-security-policy.md index b408a845081ee..2ea42a4de33e0 100644 --- a/docs/concepts/policy/pod-security-policy.md +++ b/docs/concepts/policy/pod-security-policy.md @@ -8,7 +8,7 @@ Objects of type `PodSecurityPolicy` govern the ability to make requests on a pod that affect the `SecurityContext` that will be applied to a pod and container. -See [PodSecurityPolicy proposal](https://git.k8s.io/community/contributors/design-proposals/auth/security-context-constraints.md) for more information. +See [PodSecurityPolicy proposal](https://git.k8s.io/community/contributors/design-proposals/auth/pod-security-policy.md) for more information. * TOC {:toc} diff --git a/docs/concepts/policy/resource-quotas.md b/docs/concepts/policy/resource-quotas.md index fb9b07e3d6f83..11a51a76f2533 100644 --- a/docs/concepts/policy/resource-quotas.md +++ b/docs/concepts/policy/resource-quotas.md @@ -67,16 +67,16 @@ The following resource types are supported: ## Storage Resource Quota -You can limit the total sum of [storage resources](/docs/user-guide/persistent-volumes) that can be requested in a given namespace. +You can limit the total sum of [storage resources](/docs/concepts/storage/persistent-volumes/) that can be requested in a given namespace. In addition, you can limit consumption of storage resources based on associated storage-class. | Resource Name | Description | | --------------------- | ----------------------------------------------------------- | | `requests.storage` | Across all persistent volume claims, the sum of storage requests cannot exceed this value. | -| `persistentvolumeclaims` | The total number of [persistent volume claims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. | +| `persistentvolumeclaims` | The total number of [persistent volume claims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. | | `.storageclass.storage.k8s.io/requests.storage` | Across all persistent volume claims associated with the storage-class-name, the sum of storage requests cannot exceed this value. | -| `.storageclass.storage.k8s.io/persistentvolumeclaims` | Across all persistent volume claims associated with the storage-class-name, the total number of [persistent volume claims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. | +| `.storageclass.storage.k8s.io/persistentvolumeclaims` | Across all persistent volume claims associated with the storage-class-name, the total number of [persistent volume claims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. | For example, if an operator wants to quota storage with `gold` storage class separate from `bronze` storage class, the operator can define a quota as follows: @@ -99,7 +99,7 @@ are supported: | Resource Name | Description | | ------------------------------- | ------------------------------------------------- | | `configmaps` | The total number of config maps that can exist in the namespace. | -| `persistentvolumeclaims` | The total number of [persistent volume claims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. | +| `persistentvolumeclaims` | The total number of [persistent volume claims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. | | `pods` | The total number of pods in a non-terminal state that can exist in the namespace. A pod is in a terminal state if `status.phase in (Failed, Succeeded)` is true. | | `replicationcontrollers` | The total number of replication controllers that can exist in the namespace. | | `resourcequotas` | The total number of [resource quotas](/docs/admin/admission-controllers/#resourcequota) that can exist in the namespace. | @@ -244,4 +244,4 @@ See a [detailed example for how to use resource quota](/docs/tasks/administer-cl ## Read More -See [ResourceQuota design doc](https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md) for more information. +See [ResourceQuota design doc](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) for more information. diff --git a/docs/concepts/services-networking/connect-applications-service.md b/docs/concepts/services-networking/connect-applications-service.md index b69c831383480..7079cfbeea8e3 100644 --- a/docs/concepts/services-networking/connect-applications-service.md +++ b/docs/concepts/services-networking/connect-applications-service.md @@ -94,7 +94,7 @@ NAME ENDPOINTS AGE my-nginx 10.244.2.5:80,10.244.3.4:80 1m ``` -You should now be able to curl the nginx Service on `:` from any node in your cluster. Note that the Service IP is completely virtual, it never hits the wire, if you're curious about how this works you can read more about the [service proxy](/docs/user-guide/services/#virtual-ips-and-service-proxies). +You should now be able to curl the nginx Service on `:` from any node in your cluster. Note that the Service IP is completely virtual, it never hits the wire, if you're curious about how this works you can read more about the [service proxy](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies). ## Accessing the Service diff --git a/docs/concepts/services-networking/ingress.md b/docs/concepts/services-networking/ingress.md index 115903c240209..6914c898676a1 100644 --- a/docs/concepts/services-networking/ingress.md +++ b/docs/concepts/services-networking/ingress.md @@ -292,7 +292,7 @@ Please track the [L7 and Ingress proposal](https://github.com/kubernetes/kuberne You can expose a Service in multiple ways that don't directly involve the Ingress resource: -* Use [Service.Type=LoadBalancer](/docs/user-guide/services/#type-loadbalancer) -* Use [Service.Type=NodePort](/docs/user-guide/services/#type-nodeport) +* Use [Service.Type=LoadBalancer](/docs/concepts/services-networking/service/#type-loadbalancer) +* Use [Service.Type=NodePort](/docs/concepts/services-networking/service/#type-nodeport) * Use a [Port Proxy](https://git.k8s.io/contrib/for-demos/proxy-to-service) * Deploy the [Service loadbalancer](https://git.k8s.io/contrib/service-loadbalancer). This allows you to share a single IP among multiple Services and achieve more advanced loadbalancing through Service Annotations. diff --git a/docs/concepts/services-networking/network-policies.md b/docs/concepts/services-networking/network-policies.md index ee03ad0715fb5..e88049922d944 100644 --- a/docs/concepts/services-networking/network-policies.md +++ b/docs/concepts/services-networking/network-policies.md @@ -44,6 +44,10 @@ spec: - Egress ingress: - from: + - ipBlock: + cidr: 172.17.0.0/16 + except: + - 172.17.1.0/24 - namespaceSelector: matchLabels: project: myproject @@ -76,6 +80,11 @@ __ingress__: Each `NetworkPolicy` may include a list of whitelist `ingress` rule __egress__: Each `NetworkPolicy` may include a list of whitelist `egress` rules. Each rule allows traffic which matches both the `to` and `ports` sections. The example policy contains a single rule, which matches traffic on a single port to any destination in `10.0.0.0/24`. +__ipBlock__: `ipBlock` describes a particular CIDR that is allowed to +the pods matched by a NetworkPolicySpec's podSelector. The `except` entry +is a slice of CIDRs that should not be included within an IP Block. Except +values will be rejected if they are outside the CIDR range. + So, the example NetworkPolicy: 1. isolates "role=db" pods in the "default" namespace for both ingress and egress traffic (if they weren't already isolated) diff --git a/docs/concepts/services-networking/service.md b/docs/concepts/services-networking/service.md index 653e10aeb3bd6..7bb7ecbba43aa 100644 --- a/docs/concepts/services-networking/service.md +++ b/docs/concepts/services-networking/service.md @@ -120,7 +120,7 @@ subsets: NOTE: Endpoint IPs may not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast (224.0.0.0/24). -Accessing a `Service` without a selector works the same as if it had selector. +Accessing a `Service` without a selector works the same as if it had a selector. The traffic will be routed to endpoints defined by the user (`1.2.3.4:9376` in this example). @@ -176,7 +176,9 @@ or `Services` or `Pods`. By default, the choice of backend is round robin. Client-IP based session affinity can be selected by setting `service.spec.sessionAffinity` to `"ClientIP"` (the -default is `"None"`). +default is `"None"`), and you can set the max session sticky time by setting the field +`service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` if you have already set +`service.spec.sessionAffinity` to `"ClientIP"` (the default is "10800"). ![Services overview diagram for userspace proxy](/images/docs/services-userspace-overview.svg) @@ -191,7 +193,9 @@ select a backend `Pod`. By default, the choice of backend is random. Client-IP based session affinity can be selected by setting `service.spec.sessionAffinity` to `"ClientIP"` (the -default is `"None"`). +default is `"None"`), and you can set the max session sticky time by setting the field +`service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` if you have already set +`service.spec.sessionAffinity` to `"ClientIP"` (the default is "10800"). As with the userspace proxy, the net result is that any traffic bound for the `Service`'s IP:Port is proxied to an appropriate backend without the clients diff --git a/docs/concepts/storage/persistent-volumes.md b/docs/concepts/storage/persistent-volumes.md index f0db8975b49a0..df42021572230 100644 --- a/docs/concepts/storage/persistent-volumes.md +++ b/docs/concepts/storage/persistent-volumes.md @@ -35,7 +35,7 @@ administrators. Kubernetes itself is unopinionated about what classes represent. This concept is sometimes called "profiles" in other storage systems. -Please see the [detailed walkthrough with working examples](/docs/user-guide/persistent-volumes/walkthrough/). +Please see the [detailed walkthrough with working examples](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/). ## Lifecycle of a volume and claim @@ -334,7 +334,7 @@ Claims, like pods, can request specific quantities of a resource. In this case, ### Selector -Claims can specify a [label selector](/docs/user-guide/labels/#label-selectors) to further filter the set of volumes. Only the volumes whose labels match the selector can be bound to the claim. The selector can consist of two fields: +Claims can specify a [label selector](/docs/concepts/overview/working-with-objects/labels/#label-selectors) to further filter the set of volumes. Only the volumes whose labels match the selector can be bound to the claim. The selector can consist of two fields: * matchLabels - the volume must have a label with this value * matchExpressions - a list of requirements made by specifying key, list of values, and operator that relates the key and values. Valid operators include In, NotIn, Exists, and DoesNotExist. diff --git a/docs/concepts/storage/volumes.md b/docs/concepts/storage/volumes.md index 56bf83acd0c60..bd03bb9196d8c 100644 --- a/docs/concepts/storage/volumes.md +++ b/docs/concepts/storage/volumes.md @@ -462,7 +462,7 @@ Secrets are described in more detail [here](/docs/user-guide/secrets). ### persistentVolumeClaim A `persistentVolumeClaim` volume is used to mount a -[PersistentVolume](/docs/user-guide/persistent-volumes) into a pod. PersistentVolumes are a +[PersistentVolume](/docs/concepts/storage/persistent-volumes/) into a pod. PersistentVolumes are a way for users to "claim" durable storage (such as a GCE PersistentDisk or an iSCSI volume) without knowing the details of the particular cloud environment. @@ -474,7 +474,7 @@ details. A `downwardAPI` volume is used to make downward API data available to applications. It mounts a directory and writes the requested data in plain text files. -See the [`downwardAPI` volume example](/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information/) for more details. +See the [`downwardAPI` volume example](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) for more details. ### projected @@ -686,8 +686,8 @@ More details and examples can be found [here](https://github.com/kubernetes/exam ### ScaleIO ScaleIO is a software-based storage platform that can use existing hardware to create clusters of scalable shared block networked storage. The ScaleIO volume plugin allows deployed pods to access existing ScaleIO -volumes or it can dynamically provision new volumes, see -[ScaleIO Persistent Volumes](/docs/user-guide/persistent-volumes/#scaleio). +volumes (or it can dynamically provision new volumes for persistent volume claims, see +[ScaleIO Persistent Volumes](/docs/concepts/storage/persistent-volumes/#scaleio)). **Important:** You must have an existing ScaleIO cluster already setup and running with the volumes created before you can use them. {: .caution} diff --git a/docs/concepts/workloads/controllers/daemonset.md b/docs/concepts/workloads/controllers/daemonset.md index 69920f92e9d5f..30f1b33248833 100644 --- a/docs/concepts/workloads/controllers/daemonset.md +++ b/docs/concepts/workloads/controllers/daemonset.md @@ -138,8 +138,8 @@ Some possible patterns for communicating with Pods in a DaemonSet are: - **Push**: Pods in the DaemonSet are configured to send updates to another service, such as a stats database. They do not have clients. -- **NodeIP and Known Port**: Pods in the DaemonSet can use a `hostPort`, so that the Pods are reachable via the node IPs. Clients know the list of node IPs somehow, and know the port by convention. -- **DNS**: Create a [headless service](/docs/user-guide/services/#headless-services) with the same Pod selector, +- **NodeIP and Known Port**: Pods in the DaemonSet can use a `hostPort`, so that the pods are reachable via the node IPs. Clients know the list of node IPs somehow, and know the port by convention. +- **DNS**: Create a [headless service](/docs/concepts/services-networking/service/#headless-services) with the same pod selector, and then discover DaemonSets using the `endpoints` resource or retrieve multiple A records from DNS. - **Service**: Create a service with the same Pod selector, and use the service to reach a diff --git a/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 401368d0356ba..f71008a343a10 100644 --- a/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -92,9 +92,7 @@ $ kubectl logs $pods ## Writing a Job Spec -As with all other Kubernetes config, a Job needs `apiVersion`, `kind`, and `metadata` fields. For -general information about working with config files, see [here](/docs/user-guide/simple-yaml), -[here](/docs/user-guide/configuring-containers), and [here](/docs/user-guide/working-with-resources). +As with all other Kubernetes config, a Job needs `apiVersion`, `kind`, and `metadata` fields. A Job also needs a [`.spec` section](https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status). diff --git a/docs/concepts/workloads/controllers/petset.md b/docs/concepts/workloads/controllers/petset.md new file mode 100644 index 0000000000000..ea4a16b653085 --- /dev/null +++ b/docs/concepts/workloads/controllers/petset.md @@ -0,0 +1,441 @@ +--- +approvers: +- bprashanth +- enisoc +- erictune +- foxish +- janetkuo +- kow3ns +- smarterclayton +title: PetSets +--- + +__Warning:__ Starting in Kubernetes version 1.5, PetSet has been renamed to [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets). To use (or continue to use) PetSet in Kubernetes 1.5, you _must_ [migrate](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) your existing PetSets to StatefulSets. For information on working with StatefulSet, see the tutorial on [how to run replicated stateful applications](/docs/tasks/run-application/run-replicated-stateful-application/). + +__This document has been deprecated__, but can still apply if you're using + Kubernetes version 1.4 or earlier. + +* TOC +{:toc} + +__Terminology__ + +Throughout this doc you will see a few terms that are sometimes used interchangeably elsewhere, that might cause confusion. This section attempts to clarify them. + +* Node: A single virtual or physical machine in a Kubernetes cluster. +* Cluster: A group of nodes in a single failure domain, unless mentioned otherwise. +* Persistent Volume Claim (PVC): A request for storage, typically a [persistent volume](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/). +* Host name: The hostname attached to the UTS namespace of the pod, i.e. the output of `hostname` in the pod. +* DNS/Domain name: A *cluster local* domain name resolvable using standard methods (e.g.: [gethostbyname](http://linux.die.net/man/3/gethostbyname)). +* Ordinality: the property of being "ordinal", or occupying a position in a sequence. +* Pet: a single member of a PetSet; more generally, a stateful application. +* Peer: a process running a server, capable of communicating with other such processes. + +__Prerequisites__ + +This doc assumes familiarity with the following Kubernetes concepts: + +* [Pods](/docs/user-guide/pods/single-container/) +* [Cluster DNS](/docs/concepts/services-networking/dns-pod-service/) +* [Headless Services](/docs/concepts/services-networking/service/#headless-services) +* [Persistent Volumes](/docs/concepts/storage/persistent-volumes/) +* [Persistent Volume Provisioning](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/staging/persistent-volume-provisioning/README.md) + +You need a working Kubernetes cluster at version >= 1.3, with a healthy DNS [cluster addon](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/README.md) at version >= 15. You cannot use PetSet on a hosted Kubernetes provider that has disabled `alpha` resources. + +## What is a PetSet? + +In Kubernetes, most pod management abstractions group them into disposable units of work that compose a micro service. Replication controllers for example, are designed with a weak guarantee - that there should be N replicas of a particular pod template. The pods are treated as stateless units, if one of them is unhealthy or superseded by a newer version, the system just disposes it. + +``` + foo.default.svc.cluster.local + |service| + / \ + | pod-asdf | | pod-zxcv | +``` + +A PetSet, in contrast, is a group of stateful pods that require a stronger notion of identity. The document refers to these as "clustered applications". + +``` + *.foo.default.svc.cluster.local + | mysql-0 | <-> | mysql-1 | + [pv 0] [pv 1] +``` + +The co-ordinated deployment of clustered applications is notoriously hard. They require stronger notions of identity and membership, which they use in opaque internal protocols, and are especially prone to race conditions and deadlock. Traditionally administrators have deployed these applications by leveraging nodes as stable, long-lived entities with persistent storage and static ips. + +The goal of PetSet is to decouple this dependency by assigning identities to individual instances of an application that are not anchored to the underlying physical infrastructure. For the rest of this document we will refer to these entities as "Pets". Our use of this term is predated by the "Pets vs Cattle" analogy. + +__Relationship between Pets and Pods__: PetSet requires there be {0..N-1} Pets. Each Pet has a deterministic name - PetSetName-Ordinal, and a unique identity. Each Pet has at most one pod, and each PetSet has at most one Pet with a given identity. + +## When to use PetSet? + +A PetSet ensures that a specified number of "pets" with unique identities are running at any given time. The identity of a Pet is comprised of: + +* a stable hostname, available in DNS +* an ordinal index +* stable storage: linked to the ordinal & hostname + +These properties are useful in deploying stateful applications. However most stateful applications are also clustered, meaning they form groups with strict membership requirements that rely on stored state. PetSet also helps with the 2 most common problems encountered managing such clustered applications: + +* discovery of peers for quorum +* startup/teardown ordering + +Only use PetSet if your application requires some or all of these properties. Managing pods as stateless replicas is vastly easier. + +Example workloads for PetSet: + +* Databases like MySQL or PostgreSQL that require a single instance attached to an NFS persistent volume at any time +* Clustered software like Zookeeper, Etcd, or Elasticsearch that require stable membership. + +## Alpha limitations + +Before you start deploying applications as PetSets, there are a few limitations you should understand. + +* PetSet is an *alpha* resource, not available in any Kubernetes release prior to 1.3. +* As with all alpha/beta resources, it can be disabled through the `--runtime-config` option passed to the apiserver, and in fact most likely will be disabled on hosted offerings of Kubernetes. +* The only updatable field on a PetSet is `replicas`. +* The storage for a given pet must either be provisioned by a [persistent volume provisioner](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/staging/persistent-volume-provisioning/README.md) based on the requested `storage class`, or pre-provisioned by an admin. Note that persistent volume provisioning is also currently in alpha. +* Deleting and/or scaling a PetSet down will *not* delete the volumes associated with the PetSet. This is done to ensure safety first, your data is more valuable than an auto purge of all related PetSet resources. **Deleting the Persistent Volume Claims will result in a deletion of the associated volumes**. +* All PetSets currently require a "governing service", or a Service responsible for the network identity of the pets. The user is responsible for this Service. +* Updating an existing PetSet is currently a manual process, meaning you either need to deploy a new PetSet with the new image version, or orphan Pets one by one, update their image, and join them back to the cluster. + +## Example PetSet + +We'll create a basic PetSet to demonstrate how Pets are assigned unique and "sticky" identities. + +{% include code.html language="yaml" file="petset.yaml" ghlink="/docs/concepts/workloads/controllers/petset.yaml" %} + +Saving this config into `petset.yaml` and submitting it to a Kubernetes cluster should create the defined PetSet and Pets it manages: + +```shell +$ kubectl create -f petset.yaml +service "nginx" created +petset "web" created +``` + +## Pet Identity + +The identity of a Pet sticks to it, regardless of which node it's (re) scheduled on. We can examine the identity of the pets we just created. + +### Ordinal index + +you should see 2 pods with predictable names formatted thus: `$(petset name)-$(ordinal index assigned by petset controller)` + +```shell +$ kubectl get po +NAME READY STATUS RESTARTS AGE +web-0 1/1 Running 0 10m +web-1 1/1 Running 0 10m +``` + +### Stable storage + +2 persistent volumes, one per pod. This is auto created by the PetSet based on the `volumeClaimTemplate` field + +```shell +$ kubectl get pv +NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE +pvc-90234946-3717-11e6-a46e-42010af00002 1Gi RWO Bound default/www-web-0 11m +pvc-902733c2-3717-11e6-a46e-42010af00002 1Gi RWO Bound default/www-web-1 11m +``` + +### Network identity + +The network identity has 2 parts. First, we created a headless Service that controls the domain within which we create Pets. The domain managed by this Service takes the form: `$(service name).$(namespace).svc.cluster.local`, where "cluster.local" is the [cluster domain](/docs/concepts/services-networking/dns-pod-service/). As each pet is created, it gets a matching DNS subdomain, taking the form: `$(petname).$(governing service domain)`, where the governing service is defined by the `serviceName` field on the PetSet. + +Here are some examples of choices for Cluster Domain, Service name, PetSet name, and how that affects the DNS names for the Pets and the hostnames in the Pet's pods: + +Cluster Domain | Service (ns/name) | PetSet (ns/name) | PetSet Domain | Pet DNS | Pet Hostname | +-------------- | ----------------- | ----------------- | -------------- | ------- | ------------ | + cluster.local | default/nginx | default/web | nginx.default.svc.cluster.local | web-{0..N-1}.nginx.default.svc.cluster.local | web-{0..N-1} | + cluster.local | foo/nginx | foo/web | nginx.foo.svc.cluster.local | web-{0..N-1}.nginx.foo.svc.cluster.local | web-{0..N-1} | + kube.local | foo/nginx | foo/web | nginx.foo.svc.kube.local | web-{0..N-1}.nginx.foo.svc.kube.local | web-{0..N-1} | + +Note that Cluster Domain will be set to `cluster.local` unless [otherwise configured](https://github.com/kubernetes/kubernetes/blob/master/examples/cluster-dns/README.md). + +Let's verify our assertion with a simple test. + +```shell +$ kubectl get svc +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +nginx None 80/TCP 12m +... +``` + +First, the PetSet provides a stable hostname: + +```shell +$ for i in 0 1; do kubectl exec web-$i -- sh -c 'hostname'; done +web-0 +web-1 +``` + +And the hostname is linked to the in-cluster DNS address: + +```shell +$ kubectl run -i --tty --image busybox dns-test --restart=Never /bin/sh +dns-test # nslookup web-0.nginx +Server: 10.0.0.10 +Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local + +Name: web-0.nginx +Address 1: 10.180.3.5 + +dns-test # nslookup web-1.nginx +Server: 10.0.0.10 +Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local + +Name: web-1.nginx +Address 1: 10.180.0.9 +``` + +The containers are running nginx webservers, which by default will look for an index.html file in `/usr/share/nginx/html/index.html`. That directory is backed by a `PersistentVolume` created by the PetSet. So let's write our hostname there: + +```shell +$ for i in 0 1; do + kubectl exec web-$i -- sh -c 'echo $(hostname) > /usr/share/nginx/html/index.html'; +done +``` + +And verify each webserver serves its own hostname: + +```shell +$ for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done +web-0 +web-1 +``` + +Now delete all pods in the petset: + +```shell +$ kubectl delete po -l app=nginx +pod "web-0" deleted +pod "web-1" deleted +``` + +Wait for them to come back up, and try to retrieve the previously written hostname through the DNS name of the peer. They match, because the storage, DNS name, and hostname stick to the Pet no matter where it gets scheduled: + +```shell +$ kubectl exec -it web-1 -- curl web-0.nginx +web-0 +$ kubectl exec -it web-0 -- curl web-1.nginx +web-1 +``` + +## Peer discovery + +A pet can piece together its own identity: + +1. Use the [downward api](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) to find its pod name +2. Run `hostname` to find its DNS name +3. Run `mount` or `df` to find its volumes (usually this is unnecessary) + +It's not necessary to "discover" the governing Service of a PetSet, since it's known at creation time you can simply pass it down through an [environment variable](/docs/user-guide/environment-guide). + +Usually pets also need to find their peers. In the previous nginx example, we just used `kubectl` to get the names of existing pods, and as humans, we could tell which ones belonged to a given PetSet. Another way to find peers is by contacting the API server, just like `kubectl`, but that has several disadvantages (you end up implementing a Kubernetes specific init system that runs as pid 1 in your application container). + +PetSet gives you a way to discover your peers using DNS records. To illustrate this we can use the previous example (note: one usually doesn't `apt-get` in a container). + +```shell +$ kubectl exec -it web-0 /bin/sh +web-0 # apt-get update && apt-get install -y dnsutils +... + +web-0 # nslookup -type=srv nginx.default +Server: 10.0.0.10 +Address: 10.0.0.10#53 + +nginx.default.svc.cluster.local service = 10 50 0 web-1.ub.default.svc.cluster.local. +nginx.default.svc.cluster.local service = 10 50 0 web-0.ub.default.svc.cluster.local. +``` + +## Updating a PetSet + +You cannot update any field of the PetSet except `spec.replicas` and the `containers` in the podTemplate. Updating `spec.replicas` will scale the PetSet, updating `containers` will not have any effect till a Pet is deleted, at which time it is recreated with the modified podTemplate. + +## Scaling a PetSet + +You can scale a PetSet by updating the "replicas" field. Note however that the controller will only: + +1. Create one pet at a time, in order from {0..N-1}, and wait till each one is in [Running and Ready](/docs/user-guide/pod-states) before creating the next +2. Delete one pet at a time, in reverse order from {N-1..0}, and wait till each one is completely shutdown (past its [terminationGracePeriodSeconds](/docs/concepts/workloads/pods/pod/#termination-of-pods) before deleting the next + +```shell +$ kubectl get po +NAME READY STATUS RESTARTS AGE +web-0 1/1 Running 0 30s +web-1 1/1 Running 0 36s + +$ kubectl patch petset web -p '{"spec":{"replicas":3}}' +petset "web" patched + +$ kubectl get po +NAME READY STATUS RESTARTS AGE +web-0 1/1 Running 0 40s +web-1 1/1 Running 0 46s +web-2 1/1 Running 0 8s +``` + +You can also use the `kubectl scale` command: + +```shell +$ kubectl get petset +NAME DESIRED CURRENT AGE +web 3 3 24m + +$ kubectl scale petset web --replicas=5 +petset "web" scaled + +$ kubectl get po --watch-only +NAME READY STATUS RESTARTS AGE +web-0 1/1 Running 0 10m +web-1 1/1 Running 0 27m +web-2 1/1 Running 0 10m +web-3 1/1 Running 0 3m +web-4 0/1 ContainerCreating 0 48s + +$ kubectl get petset web +NAME DESIRED CURRENT AGE +web 5 5 30m +``` + +Note however, that scaling up to N and back down to M *will not* delete the volumes of the M-N pets, as described in the section on [deletion](#deleting-a-petset), i.e. scaling back up to M creates new pets that use the same volumes. To see this in action, scale the PetSet back down to 3: + +```shell +$ kubectl get po --watch-only +web-4 1/1 Terminating 0 4m +web-4 1/1 Terminating 0 4m +web-3 1/1 Terminating 0 6m +web-3 1/1 Terminating 0 6m +``` + +Note that we still have 5 pvcs: + +```shell +$ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESSMODES AGE +www-web-0 Bound pvc-42ca5cef-8113-11e6-82f6-42010af00002 1Gi RWO 32m +www-web-1 Bound pvc-42de30af-8113-11e6-82f6-42010af00002 1Gi RWO 32m +www-web-2 Bound pvc-ba416413-8115-11e6-82f6-42010af00002 1Gi RWO 14m +www-web-3 Bound pvc-ba45f19c-8115-11e6-82f6-42010af00002 1Gi RWO 14m +www-web-4 Bound pvc-ba47674a-8115-11e6-82f6-42010af00002 1Gi RWO 14m +``` + +This allows you to upgrade the image of a petset and have it come back up with the same data, as described in the next section. + +## Image upgrades + +PetSet currently *does not* support automated image upgrade as noted in the section on [limitations](#alpha-limitations), however you can update the `image` field of any container in the podTemplate and delete Pets one by one, the PetSet controller will recreate it with the new image. + +Edit the image on the PetSet to `gcr.io/google_containers/nginx-slim:0.7` and delete 1 Pet: + +```shell{% raw %} +$ for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +gcr.io/google_containers/nginx-slim:0.8 +gcr.io/google_containers/nginx-slim:0.8 +gcr.io/google_containers/nginx-slim:0.8 + +$ kubectl delete po web-0 +pod "web-0" deleted + +$ for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +gcr.io/google_containers/nginx-slim:0.7 +gcr.io/google_containers/nginx-slim:0.8 +gcr.io/google_containers/nginx-slim:0.8 +{% endraw %}``` + +Delete the remaining 2: + +```shell +$ kubectl delete po web-1 web-2 +pod "web-1" deleted +pod "web-2" deleted +``` + +Wait till the PetSet is stable and check the images: + +```shell{% raw %} +$ for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +gcr.io/google_containers/nginx-slim:0.7 +gcr.io/google_containers/nginx-slim:0.7 +gcr.io/google_containers/nginx-slim:0.7 +{% endraw %}``` + +## Deleting a PetSet + +Deleting a PetSet through kubectl will scale it down to 0, thereby deleting all the Pets. If you wish to delete just the PetSet and not the Pets, use `--cascade=false`: + +```shell +$ kubectl delete -f petset.yaml --cascade=false +petset "web" deleted + +$ kubectl get po -l app=nginx +NAME READY STATUS RESTARTS AGE +web-0 1/1 Running 0 21h +web-1 1/1 Running 0 21h + +$ kubectl delete po -l app=nginx +pod "web-0" deleted +pod "web-1" deleted +``` + +Deleting the pods will *not* delete the volumes. Until we finalize the recycle policy for these volumes they will have to get cleaned up by an admin. This is to ensure that you have the chance to copy data off the volume before deleting it. Simply deleting the PVC after the pods have left the [terminating state](/docs/concepts/workloads/pods/pod/#termination-of-pods) should trigger deletion of the backing Persistent Volumes. + +**Note: you will lose all your data once the PVC is deleted, do this with caution.** + +```shell +$ kubectl get po -l app=nginx +$ kubectl get pvc -l app=nginx +NAME STATUS VOLUME CAPACITY ACCESSMODES AGE +www-web-0 Bound pvc-62d271cd-3822-11e6-b1b7-42010af00002 0 21h +www-web-1 Bound pvc-62d6750e-3822-11e6-b1b7-42010af00002 0 21h + +$ kubectl delete pvc -l app=nginx +$ kubectl get pv +``` + +If you simply want to clean everything: + +```shell{% raw %} +$ grace=$(kubectl get po web-0 --template '{{.spec.terminationGracePeriodSeconds}}') +$ kubectl delete petset,po -l app=nginx +$ sleep $grace +$ kubectl delete pvc -l app=nginx +{% endraw %} +``` + +## Troubleshooting + +You might have noticed an `annotations` field in all the PetSets shown above. + +```yaml +annotations: + pod.alpha.kubernetes.io/initialized: "true" +``` + +This field is a debugging hook. It pauses any scale up/down operations on the entire PetSet. If you'd like to pause a petset after each pet, set it to `false` in the template, wait for each pet to come up, verify it has initialized correctly, and then set it to `true` using `kubectl edit` on the pet (setting it to `false` on *any pet* is enough to pause the PetSet). If you don't need it, create the PetSet with it set to `true` as shown. This is surprisingly useful in debugging bootstrapping race conditions. + +## Future Work + +There are a LOT of planned improvements since PetSet is still in alpha. + +* Data gravity and local storage +* Richer notification events +* Public network identities +* WAN cluster deployments (multi-AZ/region/cloud provider) +* Image and node upgrades + +This list goes on, if you have examples, ideas or thoughts, please contribute. + +## Alternatives + +Deploying one RC of size 1/Service per pod is a popular alternative, as is simply deploying a DaemonSet that utilizes the identity of a Node. + +## Next steps + +* Learn about [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets/), + the replacement for PetSet introduced in Kubernetes version 1.5. +* [Migrate your existing PetSets to StatefulSets](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) + when upgrading to Kubernetes version 1.5 or higher. + diff --git a/docs/concepts/workloads/controllers/replicaset.md b/docs/concepts/workloads/controllers/replicaset.md index 224b922473b53..7f91398263118 100644 --- a/docs/concepts/workloads/controllers/replicaset.md +++ b/docs/concepts/workloads/controllers/replicaset.md @@ -12,7 +12,7 @@ ReplicaSet is the next-generation Replication Controller. The only difference between a _ReplicaSet_ and a [_Replication Controller_](/docs/concepts/workloads/controllers/replicationcontroller/) right now is the selector support. ReplicaSet supports the new set-based selector requirements -as described in the [labels user guide](/docs/user-guide/labels/#label-selectors) +as described in the [labels user guide](/docs/concepts/overview/working-with-objects/labels/#label-selectors) whereas a Replication Controller only supports equality-based selector requirements. {% endcapture %} diff --git a/docs/concepts/workloads/controllers/replicationcontroller.md b/docs/concepts/workloads/controllers/replicationcontroller.md index 42f929317f34a..12a37bc4456a7 100644 --- a/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/docs/concepts/workloads/controllers/replicationcontroller.md @@ -129,7 +129,7 @@ different, and the `.metadata.labels` do not affect the behavior of the Replicat ### Pod Selector -The `.spec.selector` field is a [label selector](/docs/user-guide/labels/#label-selectors). A ReplicationController +The `.spec.selector` field is a [label selector](/docs/concepts/overview/working-with-objects/labels/#label-selectors). A ReplicationController manages all the pods with labels that match the selector. It does not distinguish between pods that it created or deleted and pods that another person or process created or deleted. This allows the ReplicationController to be replaced without affecting the running pods. @@ -243,7 +243,7 @@ object](/docs/api-reference/{{page.version}}/#replicationcontroller-v1-core). ### ReplicaSet -[`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) is the next-generation ReplicationController that supports the new [set-based label selector](/docs/user-guide/labels/#set-based-requirement). +[`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) is the next-generation ReplicationController that supports the new [set-based label selector](/docs/concepts/overview/working-with-objects/labels/#set-based-requirement). It’s mainly used by [`Deployment`](/docs/concepts/workloads/controllers/deployment/) as a mechanism to orchestrate pod creation, deletion and updates. Note that we recommend using Deployments instead of directly using Replica Sets, unless you require custom update orchestration or don’t require updates at all. diff --git a/docs/concepts/workloads/controllers/statefulset.md b/docs/concepts/workloads/controllers/statefulset.md index 841d11119ac57..7e2e0fcd872db 100644 --- a/docs/concepts/workloads/controllers/statefulset.md +++ b/docs/concepts/workloads/controllers/statefulset.md @@ -163,7 +163,7 @@ The StatefulSet should not specify a `pod.Spec.TerminationGracePeriodSeconds` of When the nginx example above is created, three Pods will be deployed in the order web-0, web-1, web-2. web-1 will not be deployed before web-0 is -[Running and Ready](/docs/user-guide/pod-states), and web-2 will not be deployed until +[Running and Ready](/docs/user-guide/pod-states/), and web-2 will not be deployed until web-1 is Running and Ready. If web-0 should fail, after web-1 is Running and Ready, but before web-2 is launched, web-2 will not be launched until web-0 is successfully relaunched and becomes Running and Ready. @@ -228,7 +228,7 @@ update, roll out a canary, or perform a phased roll out. {% endcapture %} {% capture whatsnext %} -* Follow an example of [deploying a stateful application](/docs/tutorials/stateful-application/basic-stateful-set). +* Follow an example of [deploying a stateful application](/docs/tutorials/stateful-application/basic-stateful-set/). * Follow an example of [deploying Cassandra with Stateful Sets](/docs/tutorials/stateful-application/cassandra/). {% endcapture %} diff --git a/docs/getting-started-guides/coreos/bare_metal_offline.md b/docs/getting-started-guides/coreos/bare_metal_offline.md index c7684f73b22a0..35824a4f03fd9 100644 --- a/docs/getting-started-guides/coreos/bare_metal_offline.md +++ b/docs/getting-started-guides/coreos/bare_metal_offline.md @@ -631,7 +631,7 @@ Reboot these servers to get the images PXEd and ready for running containers! Now that the CoreOS with Kubernetes installed is up and running lets spin up some Kubernetes pods to demonstrate the system. -See [a simple nginx example](/docs/user-guide/simple-nginx) to try out your new cluster. +See [a simple nginx example](/docs/user-guide/simple-nginx/) to try out your new cluster. For more complete applications, please look in the [examples directory](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/). @@ -683,6 +683,6 @@ for i in `kubectl get pods | awk '{print $1}'`; do kubectl delete pod $i; done IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level -------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- -Bare-metal (Offline) | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/bare_metal_offline) | | Community ([@jeffbean](https://github.com/jeffbean)) +Bare-metal (Offline) | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/bare_metal_offline/) | | Community ([@jeffbean](https://github.com/jeffbean)) -For support level information on all solutions, see the [Table of solutions](/docs/getting-started-guides/#table-of-solutions) chart. +For support level information on all solutions, see the [Table of solutions](/docs/getting-started-guides/#table-of-solutions/) chart. diff --git a/docs/getting-started-guides/dcos.md b/docs/getting-started-guides/dcos.md index 07aa56c002f0e..23ad7912dd65d 100644 --- a/docs/getting-started-guides/dcos.md +++ b/docs/getting-started-guides/dcos.md @@ -1,143 +1,16 @@ --- approvers: -- karlkfi -title: DCOS +- smugcloud +title: Kubernetes on DCOS --- -{% assign for_k8s_version="1.6" %}{% include feature-state-deprecated.md %} +Mesosphere provides an easy option to provision Kubernetes onto [DC/OS](https://mesosphere.com/product/), offering: -This guide will walk you through installing [Kubernetes-Mesos](https://github.com/mesosphere/kubernetes-mesos) on [Datacenter Operating System (DCOS)](https://mesosphere.com/product/) with the [DCOS CLI](https://github.com/mesosphere/dcos-cli) and operating Kubernetes with the [DCOS Kubectl plugin](https://github.com/mesosphere/dcos-kubectl). +* Pure upstream Kubernetes +* Single-click cluster provisioning +* Highly available and secure by default +* Kubernetes running alongside fast-data platforms (e.g. Akka, Cassandra, Kafka, Spark) -* TOC -{:toc} +## Official Mesosphere Guide - -## About Kubernetes on DCOS - -DCOS is system software that manages computer cluster hardware and software resources and provides common services for distributed applications. Among other services, it provides [Apache Mesos](http://mesos.apache.org/) as its cluster kernel and [Marathon](https://mesosphere.github.io/marathon/) as its init system. With DCOS CLI, Mesos frameworks like [Kubernetes-Mesos](https://github.com/mesosphere/kubernetes-mesos) can be installed with a single command. - -Another feature of the DCOS CLI is that it allows plugins like the [DCOS Kubectl plugin](https://github.com/mesosphere/dcos-kubectl). This allows for easy access to a version-compatible Kubectl without having to manually download or install. - -Further information about the benefits of installing Kubernetes on DCOS can be found in the [Kubernetes-Mesos documentation](https://releases.k8s.io/{{page.githubbranch}}/contrib/mesos/README.md). - -For more details about the Kubernetes DCOS packaging, see the [Kubernetes-Mesos project](https://github.com/mesosphere/kubernetes-mesos). - -Since Kubernetes-Mesos is still alpha, it is a good idea to familiarize yourself with the [current known issues](https://releases.k8s.io/{{page.githubbranch}}/contrib/mesos/docs/issues.md) which may limit or modify the behavior of Kubernetes on DCOS. - -If you have problems completing the steps below, please [file an issue against the kubernetes-mesos project](https://github.com/mesosphere/kubernetes-mesos/issues). - - -## Resources - -Explore the following resources for more information about Kubernetes, Kubernetes on Mesos/DCOS, and DCOS itself. - -- [DCOS Documentation](https://docs.mesosphere.com/) -- [Managing DCOS Services](https://docs.mesosphere.com/services/kubernetes/) -- [Kubernetes Examples](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/) -- [Kubernetes on Mesos Documentation](https://github.com/kubernetes-incubator/kube-mesos-framework/blob/master/README.md) -- [Kubernetes on Mesos Release Notes](https://github.com/mesosphere/kubernetes-mesos/releases) -- [Kubernetes on DCOS Package Source](https://github.com/mesosphere/kubernetes-mesos) - - -## Prerequisites - -- A running [DCOS cluster](https://mesosphere.com/product/) - - [DCOS Community Edition](https://docs.mesosphere.com/1.7/archived-dcos-enterprise-edition/installing-enterprise-edition-1-6/cloud/) is currently available on [AWS](https://mesosphere.com/amazon/). - - [DCOS Enterprise Edition](https://mesosphere.com/product/) can be deployed on virtual or bare metal machines. Contact sales@mesosphere.com for more info and to set up an engagement. -- [DCOS CLI](https://docs.mesosphere.com/install/cli/) installed locally - - -## Install - -1. Configure and validate the [Mesosphere Multiverse](https://github.com/mesosphere/multiverse) as a package source repository - - ```shell -$ dcos config prepend package.sources https://github.com/mesosphere/multiverse/archive/version-1.x.zip - $ dcos package update --validate - ``` -2. Install etcd - - By default, the Kubernetes DCOS package starts a single-node etcd. In order to avoid state loss in the event of Kubernetes component container failure, install an HA [etcd-mesos](https://github.com/mesosphere/etcd-mesos) cluster on DCOS. - - ```shell -$ dcos package install etcd - ``` -3. Verify that etcd is installed and healthy - - The etcd cluster takes a short while to deploy. Verify that `/etcd` is healthy before going on to the next step. - - ```shell -$ dcos marathon app list - ID MEM CPUS TASKS HEALTH DEPLOYMENT CONTAINER CMD - /etcd 128 0.2 1/1 1/1 --- DOCKER None - ``` -4. Create Kubernetes installation configuration - - Configure Kubernetes to use the HA etcd installed on DCOS. - - ```shell -$ cat >/tmp/options.json <Container v1

resources
ResourceRequirements -Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources securityContext
SecurityContext @@ -2268,11 +2268,11 @@

PersistentVolumeClaim v1

spec
PersistentVolumeClaimSpec -Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims status
PersistentVolumeClaimStatus -Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims @@ -2290,11 +2290,11 @@

PersistentVolumeClaimSpec v1

accessModes
string array -AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 resources
ResourceRequirements -Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources selector
LabelSelector @@ -2320,7 +2320,7 @@

PersistentVolumeClaimStatus v1

accessModes
string array -AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 capacity
object @@ -2347,7 +2347,7 @@

PersistentVolumeClaimList v1

items
PersistentVolumeClaim array -A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +A list of persistent volume claims. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims kind
string @@ -2543,7 +2543,7 @@

Volume v1

persistentVolumeClaim
PersistentVolumeClaimVolumeSource -PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -3913,7 +3913,7 @@

NodeStatus v1

capacity
object -Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. +Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity for more details. conditions
NodeCondition array @@ -3995,7 +3995,7 @@

PersistentVolume v1

-

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes

+

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/

@@ -4021,11 +4021,11 @@

PersistentVolume v1

spec
PersistentVolumeSpec -Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes status
PersistentVolumeStatus -Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes @@ -4043,7 +4043,7 @@

PersistentVolumeSpec v1

accessModes
string array -AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes +AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes awsElasticBlockStore
AWSElasticBlockStoreVolumeSource @@ -4059,7 +4059,7 @@

PersistentVolumeSpec v1

capacity
object -A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity +A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity cephfs
CephFSVolumeSource @@ -4071,7 +4071,7 @@

PersistentVolumeSpec v1

claimRef
ObjectReference -ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding +ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#binding fc
FCVolumeSource @@ -4107,7 +4107,7 @@

PersistentVolumeSpec v1

persistentVolumeReclaimPolicy
string -What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy +What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#recycling-policy photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -4145,7 +4145,7 @@

PersistentVolumeStatus v1

phase
string -Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase +Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#phase reason
string @@ -4168,7 +4168,7 @@

PersistentVolumeList v1

items
PersistentVolume array -List of persistent volumes. More info: http://kubernetes.io/docs/user-guide/persistent-volumes +List of persistent volumes. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/ kind
string @@ -8515,7 +8515,7 @@

PersistentVolumeClaimVolumeSource claimName
string -ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims readOnly
boolean diff --git a/docs/resources-reference/v1.6/index.html b/docs/resources-reference/v1.6/index.html index 4c69ee05eb547..563418f892221 100644 --- a/docs/resources-reference/v1.6/index.html +++ b/docs/resources-reference/v1.6/index.html @@ -116,7 +116,7 @@

Container v1 core

resources
ResourceRequirements -Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources securityContext
SecurityContext @@ -2357,11 +2357,11 @@

PersistentVolumeClaim v1 core

spec
PersistentVolumeClaimSpec -Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims status
PersistentVolumeClaimStatus -Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims @@ -2379,11 +2379,11 @@

PersistentVolumeClaimSpec v1 core

accessModes
string array -AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 resources
ResourceRequirements -Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#resources selector
LabelSelector @@ -2391,7 +2391,7 @@

PersistentVolumeClaimSpec v1 core

storageClassName
string -Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 +Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 volumeName
string @@ -2413,7 +2413,7 @@

PersistentVolumeClaimStatus v1 core accessModes
string array -AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 capacity
object @@ -2440,7 +2440,7 @@

PersistentVolumeClaimList v1 core

items
PersistentVolumeClaim array -A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +A list of persistent volume claims. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims kind
string @@ -2639,7 +2639,7 @@

Volume v1 core

persistentVolumeClaim
PersistentVolumeClaimVolumeSource -PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -4299,7 +4299,7 @@

NodeStatus v1 core

capacity
object -Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. +Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity for more details. conditions
NodeCondition array @@ -4381,7 +4381,7 @@

PersistentVolume v1 core

-

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes

+

PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/

@@ -4407,11 +4407,11 @@

PersistentVolume v1 core

spec
PersistentVolumeSpec -Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes status
PersistentVolumeStatus -Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes +Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes @@ -4429,7 +4429,7 @@

PersistentVolumeSpec v1 core

accessModes
string array -AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes +AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes awsElasticBlockStore
AWSElasticBlockStoreVolumeSource @@ -4445,7 +4445,7 @@

PersistentVolumeSpec v1 core

capacity
object -A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity +A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#capacity cephfs
CephFSVolumeSource @@ -4457,7 +4457,7 @@

PersistentVolumeSpec v1 core

claimRef
ObjectReference -ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding +ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#binding fc
FCVolumeSource @@ -4493,7 +4493,7 @@

PersistentVolumeSpec v1 core

persistentVolumeReclaimPolicy
string -What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy +What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#recycling-policy photonPersistentDisk
PhotonPersistentDiskVolumeSource @@ -4543,7 +4543,7 @@

PersistentVolumeStatus v1 core

phase
string -Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase +Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#phase reason
string @@ -4566,7 +4566,7 @@

PersistentVolumeList v1 core

items
PersistentVolume array -List of persistent volumes. More info: http://kubernetes.io/docs/user-guide/persistent-volumes +List of persistent volumes. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/ kind
string @@ -9523,7 +9523,7 @@

PersistentVolumeClaimVolumeSo claimName
string -ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims readOnly
boolean diff --git a/docs/setup/independent/create-cluster-kubeadm.md b/docs/setup/independent/create-cluster-kubeadm.md index b579fd8e22839..e26bf009510fe 100644 --- a/docs/setup/independent/create-cluster-kubeadm.md +++ b/docs/setup/independent/create-cluster-kubeadm.md @@ -258,7 +258,6 @@ kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8 ```shell kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel-rbac.yml ``` {% endcapture %} diff --git a/docs/setup/independent/install-kubeadm.md b/docs/setup/independent/install-kubeadm.md index 69a1d682a59ec..445c4a3108ea1 100644 --- a/docs/setup/independent/install-kubeadm.md +++ b/docs/setup/independent/install-kubeadm.md @@ -39,7 +39,7 @@ This page shows how to use install kubeadm. |-------------|---------------------------------| | 10250 | Kubelet API | | 10255 | Read-only Kubelet API (Heapster)| -| 30000-32767 | Default port range for [NodePort Services](/docs/concepts/services-networking/service). Typically, these ports would need to be exposed to external load-balancers, or other external consumers of the application itself. | +| 30000-32767 | Default port range for [NodePort Services](/docs/concepts/services-networking/service/). Typically, these ports would need to be exposed to external load-balancers, or other external consumers of the application itself. | Any port numbers marked with * are overridable, so you will need to ensure any custom ports you provide are also open. @@ -127,7 +127,8 @@ example. You have to do this until SELinux support is improved in the kubelet. {% capture whatsnext %} -* [Using kubeadm to Create a Cluster](/docs/getting-started-guides/kubeadm/) +* [Using kubeadm to Create a + Cluster](/docs/setup/independent/create-cluster-kubeadm/) {% endcapture %} diff --git a/docs/setup/pick-right-solution.md b/docs/setup/pick-right-solution.md index 5c65f1a38cf6d..8a624c15236e6 100644 --- a/docs/setup/pick-right-solution.md +++ b/docs/setup/pick-right-solution.md @@ -29,7 +29,7 @@ a Kubernetes cluster from scratch. * [Minikube](/docs/getting-started-guides/minikube/) is the recommended method for creating a local, single-node Kubernetes cluster for development and testing. Setup is completely automated and doesn't require a cloud provider account. -* [Ubuntu on LXD](/docs/getting-started-guides/ubuntu/local) supports a nine-instance deployment on localhost. +* [Ubuntu on LXD](/docs/getting-started-guides/ubuntu/local/) supports a nine-instance deployment on localhost. * [IBM Cloud private-ce (Community Edition)](https://www.ibm.com/support/knowledgecenter/en/SSBS6K/product_welcome_cloud_private.html) can use VirtualBox on your machine to deploy Kubernetes to one or more VMs for dev and test scenarios. Scales to full multi-node cluster. Free version of the enterprise solution. @@ -62,11 +62,11 @@ a Kubernetes cluster from scratch. These solutions allow you to create Kubernetes clusters on a range of Cloud IaaS providers with only a few commands. These solutions are actively developed and have active community support. -* [Google Compute Engine (GCE)](/docs/getting-started-guides/gce) -* [AWS](/docs/getting-started-guides/aws) -* [Azure](/docs/getting-started-guides/azure) +* [Google Compute Engine (GCE)](/docs/getting-started-guides/gce/) +* [AWS](/docs/getting-started-guides/aws/) +* [Azure](/docs/getting-started-guides/azure/) * [Tectonic by CoreOS](https://coreos.com/tectonic) -* [CenturyLink Cloud](/docs/getting-started-guides/clc) +* [CenturyLink Cloud](/docs/getting-started-guides/clc/) * [IBM Bluemix](https://github.com/patrocinio/kubernetes-softlayer) * [Stackpoint.io](/docs/getting-started-guides/stackpoint/) * [KUBE2GO.io](https://kube2go.io/) @@ -80,7 +80,7 @@ base operating systems. If you can find a guide below that matches your needs, use it. It may be a little out of date, but it will be easier than starting from scratch. If you do want to start from scratch, either because you have special requirements, or just because you want to understand what is underneath a Kubernetes -cluster, try the [Getting Started from Scratch](/docs/getting-started-guides/scratch) guide. +cluster, try the [Getting Started from Scratch](/docs/getting-started-guides/scratch/) guide. If you are interested in supporting Kubernetes on a new platform, see [Writing a Getting Started Guide](https://git.k8s.io/community/contributors/devel/writing-a-getting-started-guide.md). @@ -95,40 +95,40 @@ with a single command per machine. These solutions are combinations of cloud providers and operating systems not covered by the above solutions. -* [CoreOS on AWS or GCE](/docs/getting-started-guides/coreos) +* [CoreOS on AWS or GCE](/docs/getting-started-guides/coreos/) * [Kubernetes on Ubuntu](/docs/getting-started-guides/ubuntu/) * [Kubespray](/docs/getting-started-guides/kubespray/) ## On-Premises VMs -* [Vagrant](/docs/getting-started-guides/coreos) (uses CoreOS and flannel) -* [CloudStack](/docs/getting-started-guides/cloudstack) (uses Ansible, CoreOS and flannel) -* [Vmware vSphere](/docs/getting-started-guides/vsphere) (uses Debian) -* [Vmware Photon Controller](/docs/getting-started-guides/photon-controller) (uses Debian) +* [Vagrant](/docs/getting-started-guides/coreos/) (uses CoreOS and flannel) +* [CloudStack](/docs/getting-started-guides/cloudstack/) (uses Ansible, CoreOS and flannel) +* [Vmware vSphere](/docs/getting-started-guides/vsphere/) (uses Debian) +* [Vmware Photon Controller](/docs/getting-started-guides/photon-controller/) (uses Debian) * [Vmware vSphere, OpenStack, or Bare Metal](/docs/getting-started-guides/ubuntu/) (uses Juju, Ubuntu and flannel) -* [Vmware](/docs/getting-started-guides/coreos) (uses CoreOS and flannel) -* [CoreOS on libvirt](/docs/getting-started-guides/libvirt-coreos) (uses CoreOS) -* [oVirt](/docs/getting-started-guides/ovirt) -* [OpenStack Heat](/docs/getting-started-guides/openstack-heat) (uses CentOS and flannel) -* [Fedora (Multi Node)](/docs/getting-started-guides/fedora/flannel_multi_node_cluster) (uses Fedora and flannel) +* [Vmware](/docs/getting-started-guides/coreos/) (uses CoreOS and flannel) +* [CoreOS on libvirt](/docs/getting-started-guides/libvirt-coreos/) (uses CoreOS) +* [oVirt](/docs/getting-started-guides/ovirt/) +* [OpenStack Heat](/docs/getting-started-guides/openstack-heat/) (uses CentOS and flannel) +* [Fedora (Multi Node)](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) (uses Fedora and flannel) ## Bare Metal -* [Offline](/docs/getting-started-guides/coreos/bare_metal_offline) (no internet required. Uses CoreOS and Flannel) -* [Fedora via Ansible](/docs/getting-started-guides/fedora/fedora_ansible_config) -* [Fedora (Single Node)](/docs/getting-started-guides/fedora/fedora_manual_config) -* [Fedora (Multi Node)](/docs/getting-started-guides/fedora/flannel_multi_node_cluster) -* [CentOS](/docs/getting-started-guides/centos/centos_manual_config) +* [Offline](/docs/getting-started-guides/coreos/bare_metal_offline/) (no internet required. Uses CoreOS and Flannel) +* [Fedora via Ansible](/docs/getting-started-guides/fedora/fedora_ansible_config/) +* [Fedora (Single Node)](/docs/getting-started-guides/fedora/fedora_manual_config/) +* [Fedora (Multi Node)](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) +* [CentOS](/docs/getting-started-guides/centos/centos_manual_config/) * [Kubernetes on Ubuntu](/docs/getting-started-guides/ubuntu/) -* [CoreOS on AWS or GCE](/docs/getting-started-guides/coreos) +* [CoreOS on AWS or GCE](/docs/getting-started-guides/coreos/) ## Integrations These solutions provide integration with third-party schedulers, resource managers, and/or lower level platforms. -* [Kubernetes on Mesos](/docs/getting-started-guides/mesos) +* [Kubernetes on Mesos](/docs/getting-started-guides/mesos/) * Instructions specify GCE, but are generic enough to be adapted to most existing Mesos clusters -* [DCOS](/docs/getting-started-guides/dcos) +* [DCOS](/docs/getting-started-guides/dcos/) * Community Edition DCOS uses AWS * Enterprise Edition DCOS supports cloud hosting, on-premises VMs, and bare metal @@ -146,37 +146,37 @@ KUBE2GO.io | | multi-support | multi-support | [docs](http Madcore.Ai | Jenkins DSL | Ubuntu | flannel | [docs](https://madcore.ai) | Community ([@madcore-ai](https://github.com/madcore-ai)) Platform9 | | multi-support | multi-support | [docs](https://platform9.com/managed-kubernetes/) | Commercial Giant Swarm | | CoreOS | flannel and/or Calico | [docs](https://docs.giantswarm.io/) | Commercial -GCE | Saltstack | Debian | GCE | [docs](/docs/getting-started-guides/gce) | Project +GCE | Saltstack | Debian | GCE | [docs](/docs/getting-started-guides/gce/) | Project Azure Container Service | | Ubuntu | Azure | [docs](https://azure.microsoft.com/en-us/services/container-service/) | Commercial -Azure (IaaS) | | Ubuntu | Azure | [docs](/docs/getting-started-guides/azure) | [Community (Microsoft)](https://github.com/Azure/acs-engine) -Bare-metal | Ansible | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/fedora_ansible_config) | Project -Bare-metal | custom | Fedora | _none_ | [docs](/docs/getting-started-guides/fedora/fedora_manual_config) | Project -Bare-metal | custom | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) -libvirt | custom | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) -KVM | custom | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) -Mesos/Docker | custom | Ubuntu | Docker | [docs](/docs/getting-started-guides/mesos-docker) | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) -Mesos/GCE | | | | [docs](/docs/getting-started-guides/mesos) | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) -DCOS | Marathon | CoreOS/Alpine | custom | [docs](/docs/getting-started-guides/dcos) | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) -AWS | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/aws) | Community -GCE | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos) | Community ([@pires](https://github.com/pires)) -Vagrant | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos) | Community ([@pires](https://github.com/pires), [@AntonioMeireles](https://github.com/AntonioMeireles)) -Bare-metal (Offline) | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/bare_metal_offline) | Community ([@jeffbean](https://github.com/jeffbean)) -CloudStack | Ansible | CoreOS | flannel | [docs](/docs/getting-started-guides/cloudstack) | Community ([@sebgoa](https://github.com/sebgoa)) -Vmware vSphere | Saltstack | Debian | OVS | [docs](/docs/getting-started-guides/vsphere) | Community ([@imkin](https://github.com/imkin)) -Vmware Photon | Saltstack | Debian | OVS | [docs](/docs/getting-started-guides/photon-controller) | Community ([@alainroy](https://github.com/alainroy)) -Bare-metal | custom | CentOS | flannel | [docs](/docs/getting-started-guides/centos/centos_manual_config) | Community ([@coolsvap](https://github.com/coolsvap)) +Azure (IaaS) | | Ubuntu | Azure | [docs](/docs/getting-started-guides/azure/) | [Community (Microsoft)](https://github.com/Azure/acs-engine) +Bare-metal | Ansible | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/fedora_ansible_config/) | Project +Bare-metal | custom | Fedora | _none_ | [docs](/docs/getting-started-guides/fedora/fedora_manual_config/) | Project +Bare-metal | custom | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) +libvirt | custom | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) +KVM | custom | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) +Mesos/Docker | custom | Ubuntu | Docker | [docs](/docs/getting-started-guides/mesos-docker/) | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) +Mesos/GCE | | | | [docs](/docs/getting-started-guides/mesos/) | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) +DCOS | Marathon | CoreOS/Alpine | custom | [docs](/docs/getting-started-guides/dcos/) | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) +AWS | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/aws/) | Community +GCE | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/) | Community ([@pires](https://github.com/pires)) +Vagrant | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/) | Community ([@pires](https://github.com/pires), [@AntonioMeireles](https://github.com/AntonioMeireles)) +Bare-metal (Offline) | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/bare_metal_offline/) | Community ([@jeffbean](https://github.com/jeffbean)) +CloudStack | Ansible | CoreOS | flannel | [docs](/docs/getting-started-guides/cloudstack/) | Community ([@sebgoa](https://github.com/sebgoa)) +Vmware vSphere | Saltstack | Debian | OVS | [docs](/docs/getting-started-guides/vsphere/) | Community ([@imkin](https://github.com/imkin)) +Vmware Photon | Saltstack | Debian | OVS | [docs](/docs/getting-started-guides/photon-controller/) | Community ([@alainroy](https://github.com/alainroy)) +Bare-metal | custom | CentOS | flannel | [docs](/docs/getting-started-guides/centos/centos_manual_config/) | Community ([@coolsvap](https://github.com/coolsvap)) AWS | Juju | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](http://www.ubuntu.com/cloud/kubernetes) and [Community](https://github.com/juju-solutions/bundle-canonical-kubernetes) ( [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) GCE | Juju | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](http://www.ubuntu.com/cloud/kubernetes) and [Community](https://github.com/juju-solutions/bundle-canonical-kubernetes) ( [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) Bare Metal | Juju | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](http://www.ubuntu.com/cloud/kubernetes) and [Community](https://github.com/juju-solutions/bundle-canonical-kubernetes) ( [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) Rackspace | Juju | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](http://www.ubuntu.com/cloud/kubernetes) and [Community](https://github.com/juju-solutions/bundle-canonical-kubernetes) ( [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) Vmware vSphere | Juju | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](http://www.ubuntu.com/cloud/kubernetes) and [Community](https://github.com/juju-solutions/bundle-canonical-kubernetes) ( [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) -AWS | Saltstack | Debian | AWS | [docs](/docs/getting-started-guides/aws) | Community ([@justinsb](https://github.com/justinsb)) -AWS | kops | Debian | AWS | [docs](https://github.com/kubernetes/kops) | Community ([@justinsb](https://github.com/justinsb)) -Bare-metal | custom | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu) | Community ([@resouer](https://github.com/resouer), [@WIZARD-CXY](https://github.com/WIZARD-CXY)) -libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](/docs/getting-started-guides/libvirt-coreos) | Community ([@lhuard1A](https://github.com/lhuard1A)) -oVirt | | | | [docs](/docs/getting-started-guides/ovirt) | Community ([@simon3z](https://github.com/simon3z)) -OpenStack Heat | Saltstack | CentOS | Neutron + flannel hostgw | [docs](/docs/getting-started-guides/openstack-heat) | Community ([@FujitsuEnablingSoftwareTechnologyGmbH](https://github.com/FujitsuEnablingSoftwareTechnologyGmbH)) -any | any | any | any | [docs](/docs/getting-started-guides/scratch) | Community ([@erictune](https://github.com/erictune)) +AWS | Saltstack | Debian | AWS | [docs](/docs/getting-started-guides/aws/) | Community ([@justinsb](https://github.com/justinsb)) +AWS | kops | Debian | AWS | [docs](https://github.com/kubernetes/kops/) | Community ([@justinsb](https://github.com/justinsb)) +Bare-metal | custom | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu/) | Community ([@resouer](https://github.com/resouer), [@WIZARD-CXY](https://github.com/WIZARD-CXY)) +libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](/docs/getting-started-guides/libvirt-coreos/) | Community ([@lhuard1A](https://github.com/lhuard1A)) +oVirt | | | | [docs](/docs/getting-started-guides/ovirt/) | Community ([@simon3z](https://github.com/simon3z)) +OpenStack Heat | Saltstack | CentOS | Neutron + flannel hostgw | [docs](/docs/getting-started-guides/openstack-heat/) | Community ([@FujitsuEnablingSoftwareTechnologyGmbH](https://github.com/FujitsuEnablingSoftwareTechnologyGmbH)) +any | any | any | any | [docs](/docs/getting-started-guides/scratch/) | Community ([@erictune](https://github.com/erictune)) any | any | any | any | [docs](http://docs.projectcalico.org/v2.2/getting-started/kubernetes/installation/) | Commercial and Community **Note**: The above table is ordered by version test/used in nodes, followed by support level. diff --git a/docs/tasks/access-application-cluster/access-cluster.md b/docs/tasks/access-application-cluster/access-cluster.md index a8c0abcf894e1..297ff597d71d4 100644 --- a/docs/tasks/access-application-cluster/access-cluster.md +++ b/docs/tasks/access-application-cluster/access-cluster.md @@ -136,7 +136,7 @@ If the application is deployed as a Pod in the cluster, please refer to the [nex To use [Python client](https://github.com/kubernetes-incubator/client-python), run the following command: `pip install kubernetes`. See [Python Client Library page](https://github.com/kubernetes-incubator/client-python) for more installation options. -The Python client can use the same [kubeconfig file](/docs/user-guide/kubeconfig-file) +The Python client can use the same [kubeconfig file](/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig/) as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes-incubator/client-python/tree/master/examples/example1.py). #### Other languages @@ -308,7 +308,7 @@ There are several different proxies you may encounter when using Kubernetes: - proxy to target may use HTTP or HTTPS as chosen by proxy using available information - can be used to reach a Node, Pod, or Service - does load balancing when used to reach a Service - 1. The [kube proxy](/docs/user-guide/services/#ips-and-vips): + 1. The [kube proxy](/docs/concepts/services-networking/service/#ips-and-vips): - runs on each node - proxies UDP and TCP - does not understand HTTP diff --git a/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/docs/tasks/access-application-cluster/connecting-frontend-backend.md index c1ba86065e00e..7b2798a0e1ad1 100644 --- a/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -29,7 +29,7 @@ frontend and backend are connected using a Kubernetes Service object. [Services with external load balancers](/docs/tasks/access-application-cluster/create-external-load-balancer/), which require a supported environment. If your environment does not support this, you can use a Service of type - [NodePort](/docs/user-guide/services/#type-nodeport) instead. + [NodePort](/docs/concepts/services-networking/service/#type-nodeport) instead. {% endcapture %} diff --git a/docs/tasks/access-application-cluster/create-external-load-balancer.md b/docs/tasks/access-application-cluster/create-external-load-balancer.md index effd07dd87d21..1ac15d13b2ebd 100644 --- a/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -25,7 +25,7 @@ cluster nodes _provided your cluster runs in a supported environment and is conf ## Configuration file To create an external load balancer, add the following line to your -[service configuration file](/docs/user-guide/services/operations/#service-configuration-file): +[service configuration file](/docs/concepts/services-networking/service/#type-loadbalancer): ```json "type": "LoadBalancer" @@ -68,7 +68,7 @@ resource (in the case of the example above, a replication controller named `example`). For more information, including optional flags, refer to the -[`kubectl expose` reference](/docs/user-guide/kubectl/v1.6/#expose). +[`kubectl expose` reference](/docs/user-guide/kubectl/{{page.version}}/#expose). ## Finding your IP address diff --git a/docs/tasks/access-application-cluster/load-balance-access-application-cluster.md b/docs/tasks/access-application-cluster/load-balance-access-application-cluster.md index 91de9ece98ce5..458dc2062e028 100644 --- a/docs/tasks/access-application-cluster/load-balance-access-application-cluster.md +++ b/docs/tasks/access-application-cluster/load-balance-access-application-cluster.md @@ -101,7 +101,7 @@ load-balanced access to an application running in a cluster. ## Using a service configuration file As an alternative to using `kubectl expose`, you can use a -[service configuration file](/docs/user-guide/services/operations) +[service configuration file](/docs/concepts/services-networking/service/) to create a Service. diff --git a/docs/tasks/access-application-cluster/service-access-application-cluster.md b/docs/tasks/access-application-cluster/service-access-application-cluster.md index 46bb4c983345d..212e1be9e6826 100644 --- a/docs/tasks/access-application-cluster/service-access-application-cluster.md +++ b/docs/tasks/access-application-cluster/service-access-application-cluster.md @@ -117,7 +117,7 @@ provides load balancing for an application that has two running instances. ## Using a service configuration file As an alternative to using `kubectl expose`, you can use a -[service configuration file](/docs/user-guide/services/operations) +[service configuration file](/docs/concepts/services-networking/service/) to create a Service. {% endcapture %} diff --git a/docs/tasks/access-application-cluster/web-ui-dashboard.md b/docs/tasks/access-application-cluster/web-ui-dashboard.md index d650f69545401..a6bd934444684 100644 --- a/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -20,7 +20,7 @@ Dashboard also provides information on the state of Kubernetes resources in your The Dashboard UI is not deployed by default. To deploy it, run the following command: ``` -kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml +kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml ``` ## Accessing the Dashboard UI @@ -64,7 +64,7 @@ To access the deploy wizard from the Welcome page, click the respective button. The deploy wizard expects that you provide the following information: -- **App name** (mandatory): Name for your application. A [label](/docs/user-guide/labels/) with the name will be added to the Deployment and Service, if any, that will be deployed. +- **App name** (mandatory): Name for your application. A [label](/docs/concepts/overview/working-with-objects/labels/) with the name will be added to the Deployment and Service, if any, that will be deployed. The application name must be unique within the selected Kubernetes [namespace](/docs/tasks/administer-cluster/namespaces/). It must start with a lowercase character, and end with a lowercase character or a number, and contain only lowercase letters, numbers and dashes (-). It is limited to 24 characters. Leading and trailing spaces are ignored. @@ -84,7 +84,7 @@ If needed, you can expand the **Advanced options** section where you can specify - **Description**: The text you enter here will be added as an [annotation](/docs/concepts/overview/working-with-objects/annotations/) to the Deployment and displayed in the application's details. -- **Labels**: Default [labels](/docs/user-guide/labels/) to be used for your application are application name and version. You can specify additional labels to be applied to the Deployment, Service (if any), and Pods, such as release, environment, tier, partition, and release track. +- **Labels**: Default [labels](/docs/concepts/overview/working-with-objects/labels/) to be used for your application are application name and version. You can specify additional labels to be applied to the Deployment, Service (if any), and Pods, such as release, environment, tier, partition, and release track. Example: diff --git a/docs/tasks/administer-cluster/access-cluster-api.md b/docs/tasks/administer-cluster/access-cluster-api.md index f1fd4ea5c1630..0eb4d059ca3a0 100644 --- a/docs/tasks/administer-cluster/access-cluster-api.md +++ b/docs/tasks/administer-cluster/access-cluster-api.md @@ -147,7 +147,7 @@ If the application is deployed as a Pod in the cluster, please refer to the [nex To use [Python client](https://github.com/kubernetes-incubator/client-python), run the following command: `pip install kubernetes` See [Python Client Library page](https://github.com/kubernetes-incubator/client-python) for more installation options. -The Python client can use the same [kubeconfig file](/docs/user-guide/kubeconfig-file) +The Python client can use the same [kubeconfig file](/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig/) as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes-incubator/client-python/tree/master/examples/example1.py): ```python diff --git a/docs/tasks/administer-cluster/calico-network-policy.md b/docs/tasks/administer-cluster/calico-network-policy.md index 4543aa7069743..424ffea4df518 100644 --- a/docs/tasks/administer-cluster/calico-network-policy.md +++ b/docs/tasks/administer-cluster/calico-network-policy.md @@ -15,7 +15,7 @@ This page shows how to use Calico for NetworkPolicy. {% capture steps %} ## Deploying a cluster using Calico -You can deploy a cluster using Calico for network policy in the default [GCE deployment](/docs/getting-started-guides/gce) using the following set of commands: +You can deploy a cluster using Calico for network policy in the default [GCE deployment](/docs/getting-started-guides/gce/) using the following set of commands: ```shell export NETWORK_POLICY_PROVIDER=calico diff --git a/docs/tasks/administer-cluster/change-default-storage-class.md b/docs/tasks/administer-cluster/change-default-storage-class.md index 8326d49f4fa00..625d599dfcf68 100644 --- a/docs/tasks/administer-cluster/change-default-storage-class.md +++ b/docs/tasks/administer-cluster/change-default-storage-class.md @@ -22,7 +22,7 @@ Depending on the installation method, your Kubernetes cluster may be deployed wi an existing StorageClass that is marked as default. This default StorageClass is then used to dynamically provision storage for PersistentVolumeClaims that do not require any specific storage class. See -[PersistentVolumeClaim documentation](/docs/user-guide/persistent-volumes/#class-1) +[PersistentVolumeClaim documentation](/docs/concepts/storage/persistent-volumes/#class-1) for details. The pre-installed default StorageClass may not fit well with your expected workload; diff --git a/docs/tasks/administer-cluster/change-pv-reclaim-policy.md b/docs/tasks/administer-cluster/change-pv-reclaim-policy.md index 43080789ed324..3cbb8a76426db 100644 --- a/docs/tasks/administer-cluster/change-pv-reclaim-policy.md +++ b/docs/tasks/administer-cluster/change-pv-reclaim-policy.md @@ -68,7 +68,7 @@ the corresponding `PersistentVolume` is not be deleted. Instead, it is moved to {% capture whatsnext %} * Learn more about [PersistentVolumes](/docs/concepts/storage/persistent-volumes/). -* Learn more about [PersistentVolumeClaims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims). +* Learn more about [PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). ### Reference diff --git a/docs/tasks/administer-cluster/configure-upgrade-etcd.md b/docs/tasks/administer-cluster/configure-upgrade-etcd.md index b672375a9ee19..c5b7037185f65 100644 --- a/docs/tasks/administer-cluster/configure-upgrade-etcd.md +++ b/docs/tasks/administer-cluster/configure-upgrade-etcd.md @@ -184,7 +184,7 @@ etcd supports restoring from snapshots that are taken from an etcd process of th Before starting the restore operation, a snapshot file must be present. It can either be a snapshot file from a previous backup operation, or from a remaining [data directory](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/configuration.md#--data-dir). `datadir` is located at `$DATA_DIR/member/snap/db`. For more information and examples on restoring a cluster from a snapshot file, see [etcd disaster recovery documentation](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/recovery.md#restoring-a-cluster). -If the access URLs of the restored cluster is changed from the previous cluster, the Kubernetes API server must be reconfigured accordingly. In this case, restart Kubernetes API server with the flag `--etcd-servers=$NEW_ETCD_CLUSTER` instead of the flag `--etcd-servers=$OLD__ETCD_CLUSTER`. Replace `$NEW_ETCD_CLUSTER` and `$OLD__ETCD_CLUSTER` with the respective IP addresses. If a load balancer is used in front of an etcd cluster, you might need to update the load balancer instead. +If the access URLs of the restored cluster is changed from the previous cluster, the Kubernetes API server must be reconfigured accordingly. In this case, restart Kubernetes API server with the flag `--etcd-servers=$NEW_ETCD_CLUSTER` instead of the flag `--etcd-servers=$OLD_ETCD_CLUSTER`. Replace `$NEW_ETCD_CLUSTER` and `$OLD_ETCD_CLUSTER` with the respective IP addresses. If a load balancer is used in front of an etcd cluster, you might need to update the load balancer instead. If the majority of etcd members have permanently failed, the etcd cluster is considered failed. In this scenario, Kubernetes cannot make any changes to its current state. Although the scheduled pods might continue to run, no new pods can be scheduled. In such cases, recover the etcd cluster and potentially reconfigure Kubernetes API server to fix the issue. diff --git a/docs/tasks/administer-cluster/cpu-constraint-namespace.md b/docs/tasks/administer-cluster/cpu-constraint-namespace.md index 89f771794ede6..25681d77d7027 100644 --- a/docs/tasks/administer-cluster/cpu-constraint-namespace.md +++ b/docs/tasks/administer-cluster/cpu-constraint-namespace.md @@ -195,7 +195,7 @@ resources: Because your Container did not specify its own CPU request and limit, it was given the [default CPU request and limit](/docs/tasks/administer-cluster/cpu-default-namespace/) from the LimitRange. -* [Configure Memory and CPU Quotas for a Namespace](/docs/tasks/administer-cluster/quota-memory-cpu-namespace) + At this point, your Container might be running or it might not be running. Recall that a prerequisite for this task is that your Nodes have at least 1 CPU. If each of your Nodes has only 1 CPU, then there might not be enough allocatable CPU on any Node to accommodate a request @@ -219,12 +219,12 @@ Pods that were created previously. As a cluster administrator, you might want to impose restrictions on the CPU resources that Pods can use. For example: -* Each Node in a cluster has 2 cpu. You do not want to accept any Pod that requests -more than 2 cpu, because no Node in the cluster can support the request. +* Each Node in a cluster has 2 CPU. You do not want to accept any Pod that requests +more than 2 CPU, because no Node in the cluster can support the request. * A cluster is shared by your production and development departments. -You want to allow production workloads to consume up to 3 cpu, but you want development workloads to be limited -to 1 cpu. You create separate namespaces for production and development, and you apply CPU constraints to +You want to allow production workloads to consume up to 3 CPU, but you want development workloads to be limited +to 1 CPU. You create separate namespaces for production and development, and you apply CPU constraints to each namespace. ## Clean up diff --git a/docs/tasks/administer-cluster/declare-network-policy.md b/docs/tasks/administer-cluster/declare-network-policy.md index a1cffa197bd1c..d1b141c607347 100644 --- a/docs/tasks/administer-cluster/declare-network-policy.md +++ b/docs/tasks/administer-cluster/declare-network-policy.md @@ -15,7 +15,7 @@ You'll need to have a Kubernetes cluster in place, with network policy support. * [Cilium](/docs/tasks/administer-cluster/cilium-network-policy/) * [Kube-router](/docs/tasks/administer-cluster/kube-router-network-policy/) * [Romana](/docs/tasks/configure-pod-container/romana-network-policy/) -* [Weave Net](/docs/tasks/configure-pod-container/weave-network-policy/) +* [Weave Net](/docs/tasks/administer-cluster/weave-network-policy/) **Note**: The above list is sorted alphabetically by product name, not by recommendation or preference. This example is valid for a Kubernetes cluster using any of these providers. {% endcapture %} diff --git a/docs/tasks/administer-cluster/out-of-resource.md b/docs/tasks/administer-cluster/out-of-resource.md index a86f70ddf2c4d..02098595df1fc 100644 --- a/docs/tasks/administer-cluster/out-of-resource.md +++ b/docs/tasks/administer-cluster/out-of-resource.md @@ -49,7 +49,7 @@ container, and if users use the [node allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) feature, out of resource decisions are made local to the end user pod part of the cgroup hierarchy as well as the root node. This -[script](/docs/concepts/cluster-administration/out-of-resource/memory-available.sh) +[script](/docs/tasks/administer-cluster/out-of-resource/memory-available.sh) reproduces the same set of steps that the `kubelet` performs to calculate `memory.available`. The `kubelet` excludes inactive_file (i.e. # of bytes of file-backed memory on inactive LRU list) from its calculation as it assumes that diff --git a/docs/tasks/administer-cluster/securing-a-cluster.md b/docs/tasks/administer-cluster/securing-a-cluster.md index 2885a27da9c89..b967a01138089 100644 --- a/docs/tasks/administer-cluster/securing-a-cluster.md +++ b/docs/tasks/administer-cluster/securing-a-cluster.md @@ -66,7 +66,7 @@ being terminated and recreated on other nodes. The out of the box roles represen between flexibility and the common use cases, but more limited roles should be carefully reviewed to prevent accidental escalation. You can make roles specific to your use case if the out-of-box ones don't meet your needs. -Consult the [authorization reference section](/docs/admin/authorization) for more information. +Consult the [authorization reference section](/docs/admin/authorization/) for more information. ## Controlling the capabilities of a workload or user at runtime @@ -82,7 +82,7 @@ resources granted to a namespace. This is most often used to limit the amount of or persistent disk a namespace can allocate, but can also control how many pods, services, or volumes exist in each namespace. -[Limit ranges](/docs/admin/limitrange) restrict the maximum or minimum size of some of the +[Limit ranges](/docs/tasks/administer-cluster/memory-default-namespace/) restrict the maximum or minimum size of some of the resources above, to prevent users from requesting unreasonably high or low values for commonly reserved resources like memory, or to provide default limits when none are specified. diff --git a/docs/tasks/configure-pod-container/assign-cpu-resource.md b/docs/tasks/configure-pod-container/assign-cpu-resource.md index 083b43855b43d..81da766f9827d 100644 --- a/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -219,7 +219,7 @@ could use all of the CPU resources available on the Node where it is running. * The Container is running in a namespace that has a default CPU limit, and the Container is automatically assigned the default limit. Cluster administrators can use a -[LimitRange](https://kubernetes.io/docs/api-reference/v1.6/) +[LimitRange](https://kubernetes.io/docs/api-reference/v1.7/#limitrange-v1-core/) to specify a default value for the CPU limit. ## Motivation for CPU requests and limits diff --git a/docs/tasks/configure-pod-container/assign-memory-resource.md b/docs/tasks/configure-pod-container/assign-memory-resource.md index d717828345c21..bc3a4bf90857b 100644 --- a/docs/tasks/configure-pod-container/assign-memory-resource.md +++ b/docs/tasks/configure-pod-container/assign-memory-resource.md @@ -313,7 +313,7 @@ could use all of the memory available on the Node where it is running. * The Container is running in a namespace that has a default memory limit, and the Container is automatically assigned the default limit. Cluster administrators can use a -[LimitRange](https://kubernetes.io/docs/api-reference/v1.6/) +[LimitRange](https://kubernetes.io/docs/api-reference/v1.7/#limitrange-v1-core) to specify a default value for the memory limit. ## Motivation for memory requests and limits diff --git a/docs/tasks/configure-pod-container/assign-pods-nodes.md b/docs/tasks/configure-pod-container/assign-pods-nodes.md index 06a29e575ac68..613c731a0ef40 100644 --- a/docs/tasks/configure-pod-container/assign-pods-nodes.md +++ b/docs/tasks/configure-pod-container/assign-pods-nodes.md @@ -75,7 +75,7 @@ a `disktype=ssd` label. {% capture whatsnext %} Learn more about -[labels and selectors](/docs/user-guide/labels/). +[labels and selectors](/docs/concepts/overview/working-with-objects/labels/). {% endcapture %} {% include templates/task.md %} diff --git a/docs/tasks/configure-pod-container/configmap.md b/docs/tasks/configure-pod-container/configmap.md index 352ea85c11bd7..8236fd4ceba71 100644 --- a/docs/tasks/configure-pod-container/configmap.md +++ b/docs/tasks/configure-pod-container/configmap.md @@ -23,7 +23,7 @@ This page shows you how to configure an application using a ConfigMap. ConfigMap ## Use kubectl to create a ConfigMap -Use the `kubectl create configmap` command to create configmaps from [directories](#creating-configmaps-from-directories), [files](#creating-configmaps-from-files), or [literal values](#creating-configmaps-from-literal-values): +Use the `kubectl create configmap` command to create configmaps from [directories](#create-configmaps-from-directories), [files](#create-configmaps-from-files), or [literal values](#create-configmaps-from-literal-values): ```shell kubectl create configmap diff --git a/docs/tasks/configure-pod-container/configure-pod-configmap.md b/docs/tasks/configure-pod-container/configure-pod-configmap.md index 6349af7a404ec..7286accd9ad38 100644 --- a/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -124,7 +124,7 @@ This page provides a series of usage examples demonstrating how to configure Pod SPECIAL_TYPE: charm ``` -1. Use `env-from` to define all of the ConfigMap's data as Pod environment variables. The key from the ConfigMap becomes the environment variable name in the Pod. +1. Use `envFrom` to define all of the ConfigMap's data as Pod environment variables. The key from the ConfigMap becomes the environment variable name in the Pod. ```yaml apiVersion: v1 @@ -185,7 +185,7 @@ very charm ## Add ConfigMap data to a Volume -As explained in [Configure Containers Using a ConfigMap](/docs/tasks/configure-pod-container/configmap.html), when you create a ConfigMap using ``--from-file``, the filename becomes a key stored in the `data` section of the ConfigMap. The file contents become the key's value. +As explained in [Configure Containers Using a ConfigMap](/docs/tasks/configure-pod-container/configmap/), when you create a ConfigMap using ``--from-file``, the filename becomes a key stored in the `data` section of the ConfigMap. The file contents become the key's value. The examples in this section refer to a ConfigMap named special-config, shown below. diff --git a/docs/tasks/configure-pod-container/configure-pod-initialization.md b/docs/tasks/configure-pod-container/configure-pod-initialization.md index be18f82e4dbb9..4147b54854ac0 100644 --- a/docs/tasks/configure-pod-container/configure-pod-initialization.md +++ b/docs/tasks/configure-pod-container/configure-pod-initialization.md @@ -81,7 +81,7 @@ The output shows that nginx is serving the web page that was written by the init {% capture whatsnext %} * Learn more about -[communicating between Containers running in the same Pod](/docs/tasks/configure-pod-container/communicate-containers-same-pod/). +[communicating between Containers running in the same Pod](/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume/). * Learn more about [Init Containers](/docs/concepts/workloads/pods/init-containers/). * Learn more about [Volumes](/docs/concepts/storage/volumes/). * Learn more about [Debugging Init Containers](/docs/tasks/debug-application-cluster/debug-init-containers/) diff --git a/docs/tasks/configure-pod-container/configure-service-account.md b/docs/tasks/configure-pod-container/configure-service-account.md index 55e115bd100b7..65a69ae8b4361 100644 --- a/docs/tasks/configure-pod-container/configure-service-account.md +++ b/docs/tasks/configure-pod-container/configure-service-account.md @@ -9,7 +9,7 @@ title: Configure Service Accounts for Pods A service account provides an identity for processes that run in a Pod. *This is a user introduction to Service Accounts. See also the -[Cluster Admin Guide to Service Accounts](/docs/admin/service-accounts-admin).* +[Cluster Admin Guide to Service Accounts](/docs/admin/service-accounts-admin/).* **Note:** This document describes how service accounts behave in a cluster set up as recommended by the Kubernetes project. Your cluster administrator may have @@ -146,18 +146,19 @@ Any tokens for non-existent service accounts will be cleaned up by the token con ```shell $ kubectl describe secrets/build-robot-secret -Name: build-robot-secret -Namespace: default -Labels: -Annotations: kubernetes.io/service-account.name=build-robot,kubernetes.io/service-account.uid=870ef2a5-35cf-11e5-8d06-005056b45392 +Name: build-robot-secret +Namespace: default +Labels: +Annotations: kubernetes.io/service-account.name=build-robot + kubernetes.io/service-account.uid=da68f9c6-9d26-11e7-b84e-002dc52800da -Type: kubernetes.io/service-account-token +Type: kubernetes.io/service-account-token Data ==== -ca.crt: 1220 bytes -token: ... -namespace: 7 bytes +ca.crt: 1338 bytes +namespace: 7 bytes +token: ... ``` **Note:** The content of `token` is elided here. diff --git a/docs/tasks/debug-application-cluster/debug-application-introspection.md b/docs/tasks/debug-application-cluster/debug-application-introspection.md index 55c4c24c7f8ce..292e86a36305c 100644 --- a/docs/tasks/debug-application-cluster/debug-application-introspection.md +++ b/docs/tasks/debug-application-cluster/debug-application-introspection.md @@ -379,7 +379,7 @@ Learn about additional debugging tools, including: * [Logging](/docs/user-guide/logging/overview) * [Monitoring](/docs/user-guide/monitoring) * [Getting into containers via `exec`](/docs/user-guide/getting-into-containers) -* [Connecting to containers via proxies](/docs/user-guide/connecting-to-applications-proxy) +* [Connecting to containers via proxies](/docs/tasks/access-kubernetes-api/http-proxy-access-api/) * [Connecting to containers via port forwarding](/docs/user-guide/connecting-to-applications-port-forward) diff --git a/docs/tasks/federation/federation-service-discovery.md b/docs/tasks/federation/federation-service-discovery.md index 3068ce9ae34ee..a30910af72837 100644 --- a/docs/tasks/federation/federation-service-discovery.md +++ b/docs/tasks/federation/federation-service-discovery.md @@ -125,9 +125,9 @@ underlying Kubernetes services (once these have been allocated - this may take a few seconds). For inter-cluster and inter-cloud-provider networking between service shards to work correctly, your services need to have an externally visible IP address. [Service Type: -Loadbalancer](/docs/user-guide/services/#type-loadbalancer) +Loadbalancer](/docs/concepts/services-networking/service/#type-loadbalancer) is typically used for this, although other options -(e.g. [External IP's](/docs/user-guide/services/#external-ips)) exist. +(e.g. [External IP's](/docs/concepts/services-networking/service/#external-ips)) exist. Note also that we have not yet provisioned any backend Pods to receive the network traffic directed to these addresses (i.e. 'Service diff --git a/docs/tasks/federation/set-up-cluster-federation-kubefed.md b/docs/tasks/federation/set-up-cluster-federation-kubefed.md index c613f1e03d8d9..8f6b970dde776 100644 --- a/docs/tasks/federation/set-up-cluster-federation-kubefed.md +++ b/docs/tasks/federation/set-up-cluster-federation-kubefed.md @@ -261,11 +261,11 @@ kubefed init fellowship \ `kubefed init` exposes the federation API server as a Kubernetes [service](/docs/concepts/services-networking/service/) on the host cluster. By default, this service is exposed as a -[load balanced service](/docs/user-guide/services/#type-loadbalancer). +[load balanced service](/docs/concepts/services-networking/service/#type-loadbalancer). Most on-premises and bare-metal environments, and some cloud environments lack support for load balanced services. `kubefed init` allows exposing the federation API server as a -[`NodePort` service](/docs/user-guide/services/#type-nodeport) on +[`NodePort` service](/docs/concepts/services-networking/service/#type-nodeport) on such environments. This can be accomplished by passing the `--api-server-service-type=NodePort` flag. You can also specify the preferred address to advertise the federation API server by @@ -289,17 +289,17 @@ Federation control plane stores its state in [`etcd`](https://coreos.com/etcd/docs/latest/) data must be stored in a persistent storage volume to ensure correct operation across federation control plane restarts. On host clusters that support -[dynamic provisioning of storage volumes](/docs/user-guide/persistent-volumes/#dynamic), +[dynamic provisioning of storage volumes](/docs/concepts/storage/persistent-volumes/#dynamic), `kubefed init` dynamically provisions a -[`PersistentVolume`](/docs/user-guide/persistent-volumes/#persistent-volumes) +[`PersistentVolume`](/docs/concepts/storage/persistent-volumes/#persistent-volumes) and binds it to a -[`PersistentVolumeClaim`](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) +[`PersistentVolumeClaim`](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) to store [`etcd`](https://coreos.com/etcd/docs/latest/) data. If your host cluster doesn't support dynamic provisioning, you can also statically provision a -[`PersistentVolume`](/docs/user-guide/persistent-volumes/#persistent-volumes). +[`PersistentVolume`](/docs/concepts/storage/persistent-volumes/#persistent-volumes). `kubefed init` creates a -[`PersistentVolumeClaim`](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) +[`PersistentVolumeClaim`](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) that has the following configuration: ```yaml @@ -321,12 +321,12 @@ spec: ``` To statically provision a -[`PersistentVolume`](/docs/user-guide/persistent-volumes/#persistent-volumes), +[`PersistentVolume`](/docs/concepts/storage/persistent-volumes/#persistent-volumes), you must ensure that the -[`PersistentVolume`](/docs/user-guide/persistent-volumes/#persistent-volumes) +[`PersistentVolume`](/docs/concepts/storage/persistent-volumes/#persistent-volumes) that you create has the matching storage class, access mode and at least as much capacity as the requested -[`PersistentVolumeClaim`](/docs/user-guide/persistent-volumes/#persistentvolumeclaims). +[`PersistentVolumeClaim`](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). Alternatively, you can disable persistent storage completely by passing `--etcd-persistent-storage=false` to `kubefed init`. @@ -342,7 +342,7 @@ kubefed init fellowship \ ``` `kubefed init` still doesn't support attaching an existing -[`PersistentVolumeClaim`](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) +[`PersistentVolumeClaim`](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) to the federation control plane that it bootstraps. We are planning to support this in a future version of `kubefed`. @@ -373,7 +373,7 @@ For more information see Once you've deployed a federation control plane, you'll need to make that control plane aware of the clusters it should manage. You can add -a cluster to your federation by using the [`kubefed join`](/docs/admin/kubefed_join) +a cluster to your federation by using the [`kubefed join`](/docs/admin/kubefed_join/) command. To use `kubefed join`, you'll need to provide the name of the cluster @@ -468,7 +468,7 @@ as described in the ## Removing a cluster from a federation -To remove a cluster from a federation, run the [`kubefed unjoin`](/docs/admin/kubefed_unjoin) +To remove a cluster from a federation, run the [`kubefed unjoin`](/docs/admin/kubefed_unjoin/) command with the cluster name and the federation's `--host-cluster-context`: diff --git a/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md b/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md index 391b58ee54518..0b652dd52d689 100644 --- a/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md +++ b/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md @@ -161,7 +161,7 @@ The output shows the values of selected environment variables: {% capture whatsnext %} -* [Defining Environment Variables for a Container](/docs/tasks/configure-pod-container/define-environment-variable-container/) +* [Defining Environment Variables for a Container](/docs/tasks/inject-data-application/define-environment-variable-container/) * [PodSpec](/docs/resources-reference/{{page.version}}/#podspec-v1-core) * [Container](/docs/resources-reference/{{page.version}}/#container-v1-core) * [EnvVar](/docs/resources-reference/{{page.version}}/#envvar-v1-core) diff --git a/docs/tasks/inject-data-application/podpreset.md b/docs/tasks/inject-data-application/podpreset.md index 5b6b3549c00ef..6e1026a5df707 100644 --- a/docs/tasks/inject-data-application/podpreset.md +++ b/docs/tasks/inject-data-application/podpreset.md @@ -123,7 +123,7 @@ metadata: app: website role: frontend annotations: - podpreset.admission.kubernetes.io/allow-database: "resource version" + podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" spec: containers: - name: website @@ -229,7 +229,7 @@ metadata: app: website role: frontend annotations: - podpreset.admission.kubernetes.io/allow-database: "resource version" + podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" spec: containers: - name: website @@ -325,34 +325,35 @@ spec: **Pod spec after admission controller:** ```yaml +apiVersion: v1 kind: Pod - metadata: - labels: - app: guestbook - tier: frontend - annotations: - podpreset.admission.kubernetes.io/allow-database: "resource version" - spec: - containers: - - name: php-redis - image: gcr.io/google_samples/gb-frontend:v3 - resources: - requests: - cpu: 100m - memory: 100Mi - volumeMounts: - - mountPath: /cache - name: cache-volume - env: - - name: GET_HOSTS_FROM - value: dns - - name: DB_PORT - value: "6379" - ports: - - containerPort: 80 - volumes: - - name: cache-volume - emptyDir: {} +metadata: + labels: + app: guestbook + tier: frontend + annotations: + podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" +spec: + containers: + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v3 + resources: + requests: + cpu: 100m + memory: 100Mi + volumeMounts: + - mountPath: /cache + name: cache-volume + env: + - name: GET_HOSTS_FROM + value: dns + - name: DB_PORT + value: "6379" + ports: + - containerPort: 80 + volumes: + - name: cache-volume + emptyDir: {} ``` ### Multiple PodPreset Example @@ -432,8 +433,8 @@ metadata: app: website role: frontend annotations: - podpreset.admission.kubernetes.io/allow-database: "resource version" - podpreset.admission.kubernetes.io/proxy: "resource version" + podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" + podpreset.admission.kubernetes.io/podpreset-proxy: "resource version" spec: containers: - name: website @@ -538,7 +539,7 @@ $ kubectl describe ... .... Events: FirstSeen LastSeen Count From SubobjectPath Reason Message - Tue, 07 Feb 2017 16:56:12 -0700 Tue, 07 Feb 2017 16:56:12 -0700 1 {podpreset.admission.kubernetes.io/allow-database } conflict Conflict on pod preset. Duplicate mountPath /cache. + Tue, 07 Feb 2017 16:56:12 -0700 Tue, 07 Feb 2017 16:56:12 -0700 1 {podpreset.admission.kubernetes.io/podpreset-allow-database } conflict Conflict on pod preset. Duplicate mountPath /cache. ``` ## Deleting a Pod Preset diff --git a/docs/tasks/job/parallel-processing-expansion.md b/docs/tasks/job/parallel-processing-expansion.md index f8fac8066ec0f..7feb9c7602a4f 100644 --- a/docs/tasks/job/parallel-processing-expansion.md +++ b/docs/tasks/job/parallel-processing-expansion.md @@ -109,7 +109,7 @@ Processing item cherry In the first example, each instance of the template had one parameter, and that parameter was also used as a label. However label keys are limited in [what characters they can -contain](/docs/user-guide/labels/#syntax-and-character-set). +contain](/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). This slightly more complex example uses the jinja2 template language to generate our objects. We will use a one-line python script to convert the template to a file. diff --git a/docs/tasks/manage-daemon/update-daemon-set.md b/docs/tasks/manage-daemon/update-daemon-set.md index 653eec57a145c..46a5823218b6e 100644 --- a/docs/tasks/manage-daemon/update-daemon-set.md +++ b/docs/tasks/manage-daemon/update-daemon-set.md @@ -159,7 +159,7 @@ causes: The rollout is stuck because new DaemonSet pods can't be scheduled on at least one node. This is possible when the node is -[running out of resources](/docs/concepts/cluster-administration/out-of-resource/). +[running out of resources](/docs/tasks/administer-cluster/out-of-resource/). When this happens, find the nodes that don't have the DaemonSet pods scheduled on by comparing the output of `kubectl get nodes` and the output of: diff --git a/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 4a7efc9d94ff3..b13ea7a0ff8ea 100644 --- a/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -18,13 +18,13 @@ This document walks you through an example of enabling Horizontal Pod Autoscalin This example requires a running Kubernetes cluster and kubectl, version 1.2 or later. [Heapster](https://github.com/kubernetes/heapster) monitoring needs to be deployed in the cluster as Horizontal Pod Autoscaler uses it to collect metrics -(if you followed [getting started on GCE guide](/docs/getting-started-guides/gce), +(if you followed [getting started on GCE guide](/docs/getting-started-guides/gce/), heapster monitoring will be turned-on by default). To specify multiple resource metrics for a Horizontal Pod Autoscaler, you must have a Kubernetes cluster and kubectl at version 1.6 or later. Furthermore, in order to make use of custom metrics, your cluster must be able to communicate with the API server providing the custom metrics API. -See the [Horizontal Pod Autoscaling user guide](/docs/user-guide/horizontal-pod-autoscaling/#support-for-custom-metrics) for more details. +See the [Horizontal Pod Autoscaling user guide](/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics) for more details. ## Step One: Run & expose php-apache server diff --git a/docs/tasks/run-application/run-replicated-stateful-application.md b/docs/tasks/run-application/run-replicated-stateful-application.md index 5241d555b08db..73e6c1ce9ff03 100644 --- a/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/docs/tasks/run-application/run-replicated-stateful-application.md @@ -148,7 +148,7 @@ properties to perform orderly startup of MySQL replication. ### Generating configuration Before starting any of the containers in the Pod spec, the Pod first runs any -[Init Containers](/docs/user-guide/production-pods/#handling-initialization) +[Init Containers](/docs/concepts/workloads/pods/init-containers/) in the order defined. The first Init Container, named `init-mysql`, generates special MySQL config @@ -168,7 +168,7 @@ Because the example topology consists of a single MySQL master and any number of slaves, the script simply assigns ordinal `0` to be the master, and everyone else to be slaves. Combined with the StatefulSet controller's -[deployment order guarantee](/docs/concepts/abstractions/controllers/statefulsets/#deployment-and-scaling-guarantee), +[deployment order guarantee](/docs/concepts/workloads/controllers/statefulset/#deployment-and-scaling-guarantees/), this ensures the MySQL master is Ready before creating slaves, so they can begin replicating. @@ -292,7 +292,7 @@ running while you force a Pod out of the Ready state. ### Break the Readiness Probe -The [readiness probe](/docs/user-guide/production-pods/#liveness-and-readiness-probes-aka-health-checks) +The [readiness probe](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#define-readiness-probes) for the `mysql` container runs the command `mysql -h 127.0.0.1 -e 'SELECT 1'` to make sure the server is up and able to execute queries. @@ -410,7 +410,7 @@ With MySQL replication, you can scale your read query capacity by adding slaves. With StatefulSet, you can do this with a single command: ```shell -kubectl scale --replicas=5 statefulset mysql +kubectl scale statefulset mysql --replicas=5 ``` Watch the new Pods come up by running: @@ -443,7 +443,7 @@ pod "mysql-client" deleted Scaling back down is also seamless: ```shell -kubectl scale --replicas=3 statefulset mysql +kubectl scale statefulset mysql --replicas=3 ``` Note, however, that while scaling up creates new PersistentVolumeClaims diff --git a/docs/tasks/run-application/run-single-instance-stateful-application.md b/docs/tasks/run-application/run-single-instance-stateful-application.md index aa073b15867b0..3880efb568769 100644 --- a/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -149,7 +149,7 @@ specific to stateful apps: * Don't scale the app. This setup is for single-instance apps only. The underlying PersistentVolume can only be mounted to one Pod. For clustered stateful apps, see the - [StatefulSet documentation](/docs/concepts/workloads/controllers/statefulset.md). + [StatefulSet documentation](/docs/concepts/workloads/controllers/statefulset/). * Use `strategy:` `type: Recreate` in the Deployment configuration YAML file. This instructs Kubernetes to _not_ use rolling updates. Rolling updates will not work, as you cannot have more than diff --git a/docs/tools/kompose/user-guide.md b/docs/tools/kompose/user-guide.md index 483ea22ea066c..2aa6666bde449 100644 --- a/docs/tools/kompose/user-guide.md +++ b/docs/tools/kompose/user-guide.md @@ -427,7 +427,7 @@ $ kompose up --provider openshift --build build-config ## Alternative Conversions -The default `kompose` transformation will generate Kubernetes [Deployments](http://kubernetes.io/docs/user-guide/deployments/) and [Services](http://kubernetes.io/docs/user-guide/services/), in yaml format. You have alternative option to generate json with `-j`. Also, you can alternatively generate [Replication Controllers](http://kubernetes.io/docs/user-guide/replication-controller/) objects, [Deamon Sets](http://kubernetes.io/docs/admin/daemons/), or [Helm](https://github.com/helm/helm) charts. +The default `kompose` transformation will generate Kubernetes [Deployments](http://kubernetes.io/docs/user-guide/deployments/) and [Services](http://kubernetes.io/docs/concepts/services-networking/service/), in yaml format. You have alternative option to generate json with `-j`. Also, you can alternatively generate [Replication Controllers](http://kubernetes.io/docs/user-guide/replication-controller/) objects, [Deamon Sets](http://kubernetes.io/docs/admin/daemons/), or [Helm](https://github.com/helm/helm) charts. ```sh $ kompose convert -j @@ -572,4 +572,4 @@ Please note that changing service name might break some `docker-compose` files. Kompose supports Docker Compose versions: 1, 2 and 3. We have limited support on versions 2.1 and 3.2 due to their experimental nature. -A full list on compatibility between all three versions is listed in our [conversion document](/docs/conversion.md) including a list of all incompatible Docker Compose keys. +A full list on compatibility between all three versions is listed in our [conversion document](https://github.com/kubernetes/kompose/blob/master/docs/conversion.md) including a list of all incompatible Docker Compose keys. diff --git a/docs/tutorials/clusters/apparmor.md b/docs/tutorials/clusters/apparmor.md index b1c60fc596ae8..81301a9b8a8bb 100644 --- a/docs/tutorials/clusters/apparmor.md +++ b/docs/tutorials/clusters/apparmor.md @@ -192,20 +192,7 @@ Next, we'll run a simple "Hello AppArmor" pod with the deny-write profile: {% include code.html language="yaml" file="hello-apparmor-pod.yaml" ghlink="/docs/tutorials/clusters/hello-apparmor-pod.yaml" %} ```shell -$ kubectl create -f /dev/stdin < Annotations: container.apparmor.security.beta.kubernetes.io/hello=localhost/k8s-apparmor-example-allow-write -Status: Failed +Status: Pending Reason: AppArmor Message: Pod Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded IP: diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index e0a9b24d95aeb..09c9ad91ea8b6 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -31,7 +31,7 @@ each of which has a sequence of steps. * [Running a Single-Instance Stateful Application](/docs/tutorials/stateful-application/run-stateful-application/) -* [Running a Replicated Stateful Application](/docs/tutorials/stateful-application/run-replicated-stateful-application/) +* [Running a Replicated Stateful Application](/docs/tasks/run-application/run-replicated-stateful-application/) * [Example: WordPress and MySQL with Persistent Volumes](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) diff --git a/docs/tutorials/services/source-ip.md b/docs/tutorials/services/source-ip.md index bab393d37efca..ba9f8139f054d 100644 --- a/docs/tutorials/services/source-ip.md +++ b/docs/tutorials/services/source-ip.md @@ -53,7 +53,7 @@ deployment "source-ip-app" created ## Source IP for Services with Type=ClusterIP Packets sent to ClusterIP from within the cluster are never source NAT'd if -you're running kube-proxy in [iptables mode](/docs/user-guide/services/#proxy-mode-iptables), +you're running kube-proxy in [iptables mode](/docs/concepts/services-networking/service/#proxy-mode-iptables), which is the default since Kubernetes 1.2. Kube-proxy exposes its mode through a `proxyMode` endpoint: @@ -110,7 +110,7 @@ If the client pod and server pod are in the same node, the client_address is the ## Source IP for Services with Type=NodePort -As of Kubernetes 1.5, packets sent to Services with [Type=NodePort](/docs/user-guide/services/#type-nodeport) +As of Kubernetes 1.5, packets sent to Services with [Type=NodePort](/docs/concepts/services-networking/service/#type-nodeport) are source NAT'd by default. You can test this by creating a `NodePort` Service: ```console @@ -208,7 +208,7 @@ Visually: ## Source IP for Services with Type=LoadBalancer -As of Kubernetes 1.5, packets sent to Services with [Type=LoadBalancer](/docs/user-guide/services/#type-loadbalancer) are +As of Kubernetes 1.5, packets sent to Services with [Type=LoadBalancer](/docs/concepts/services-networking/service/#type-loadbalancer) are source NAT'd by default, because all schedulable Kubernetes nodes in the `Ready` state are eligible for loadbalanced traffic. So if packets arrive at a node without an endpoint, the system proxies it to a node *with* an diff --git a/docs/tutorials/stateful-application/basic-stateful-set.md b/docs/tutorials/stateful-application/basic-stateful-set.md index dc7cfca526655..8612eb322f117 100644 --- a/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/docs/tutorials/stateful-application/basic-stateful-set.md @@ -55,7 +55,7 @@ After this tutorial, you will be familiar with the following. Begin by creating a StatefulSet using the example below. It is similar to the example presented in the [StatefulSets](/docs/concepts/abstractions/controllers/statefulsets/) concept. -It creates a [Headless Service](/docs/user-guide/services/#headless-services), +It creates a [Headless Service](/docs/concepts/services-networking/service/#headless-services), `nginx`, to publish the IP addresses of Pods in the StatefulSet, `web`. {% include code.html language="yaml" file="web.yaml" ghlink="/docs/tutorials/stateful-application/web.yaml" %} @@ -457,7 +457,7 @@ reverse ordinal order, while respecting the StatefulSet guarantees. Patch the `web` StatefulSet to apply the `RollingUpdate` update strategy. ```shell -kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate"}}} +kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate"}}}' statefulset "web" patched ``` @@ -563,7 +563,7 @@ pod "web-2" deleted Wait for the Pod to be Running and Ready. ```shell -kubectl get po -lapp=nginx -w +kubectl get po -l app=nginx -w NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 4m web-1 1/1 Running 0 4m @@ -598,7 +598,7 @@ statefulset "web" patched Wait for `web-2` to be Running and Ready. ```shell -kubectl get po -lapp=nginx -w +kubectl get po -l app=nginx -w NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 4m web-1 1/1 Running 0 4m @@ -628,7 +628,7 @@ pod "web-1" deleted Wait for the `web-1` Pod to be Running and Ready. ```shell -kubectl get po -lapp=nginx -w +kubectl get po -l app=nginx -w NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 6m web-1 0/1 Terminating 0 6m @@ -673,7 +673,7 @@ statefulset "web" patched Wait for all of the Pods in the StatefulSet to become Running and Ready. ```shell -kubectl get po -lapp=nginx -w +kubectl get po -l app=nginx -w NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 3m web-1 0/1 ContainerCreating 0 11s @@ -935,7 +935,7 @@ of the `web` StatefulSet is set to `Parallel`. In one terminal, watch the Pods in the StatefulSet. ```shell -kubectl get po -lapp=nginx -w +kubectl get po -l app=nginx -w ``` In another terminal, create the StatefulSet and Service in the manifest. @@ -949,7 +949,7 @@ statefulset "web" created Examine the output of the `kubectl get` command that you executed in the first terminal. ```shell -kubectl get po -lapp=nginx -w +kubectl get po -l app=nginx -w NAME READY STATUS RESTARTS AGE web-0 0/1 Pending 0 0s web-0 0/1 Pending 0 0s diff --git a/docs/tutorials/stateful-application/zookeeper.md b/docs/tutorials/stateful-application/zookeeper.md index 732e5ac3dc918..cbbd3a96c13e3 100644 --- a/docs/tutorials/stateful-application/zookeeper.md +++ b/docs/tutorials/stateful-application/zookeeper.md @@ -13,7 +13,7 @@ title: Running ZooKeeper, A CP Distributed System {% capture overview %} This tutorial demonstrates [Apache Zookeeper](https://zookeeper.apache.org) on Kubernetes using [StatefulSets](/docs/concepts/abstractions/controllers/statefulsets/), -[PodDisruptionBudgets](/docs/admin/disruptions/#specifying-a-poddisruptionbudget), +[PodDisruptionBudgets](/docs/concepts/workloads/pods/disruptions/#specifying-a-poddisruptionbudget), and [PodAntiAffinity](/docs/user-guide/node-selection/#inter-pod-affinity-and-anti-affinity-beta-feature). {% endcapture %} @@ -28,7 +28,7 @@ Kubernetes concepts. * [PersistentVolumes](/docs/concepts/storage/volumes/) * [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/staging/persistent-volume-provisioning/) * [StatefulSets](/docs/concepts/abstractions/controllers/statefulsets/) -* [PodDisruptionBudgets](/docs/admin/disruptions/#specifying-a-poddisruptionbudget) +* [PodDisruptionBudgets](/docs/concepts/workloads/pods/disruptions/#specifying-a-poddisruptionbudget) * [PodAntiAffinity](/docs/user-guide/node-selection/#inter-pod-affinity-and-anti-affinity-beta-feature) * [kubectl CLI](/docs/user-guide/kubectl) @@ -88,9 +88,9 @@ safely discarded. ## Creating a ZooKeeper Ensemble The manifest below contains a -[Headless Service](/docs/user-guide/services/#headless-services), +[Headless Service](/docs/concepts/services-networking/service/#headless-services), a [Service](/docs/concepts/services-networking/service), -a [PodDisruptionBudget](/docs/admin/disruptions/#specifying-a-poddisruptionbudget), +a [PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions//#specifying-a-poddisruptionbudget), and a [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets/). {% include code.html language="yaml" file="zookeeper.yaml" ghlink="/docs/tutorials/stateful-application/zookeeper.yaml" %} @@ -935,7 +935,7 @@ This is because the Pods in the `zk` StatefulSet have a PodAntiAffinity specifie topologyKey: "kubernetes.io/hostname" ``` -The `requiredDuringSchedulingRequiredDuringExecution` field tells the +The `requiredDuringSchedulingIgnoredDuringExecution` field tells the Kubernetes Scheduler that it should never co-locate two Pods from the `zk-headless` Service in the domain defined by the `topologyKey`. The `topologyKey` `kubernetes.io/hostname` indicates that the domain is an individual node. Using diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index f5f42d7120473..b9d659c05f9a5 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -46,7 +46,7 @@ List all Pods with the label `app=nginx`: kubectl get pods -l app=nginx ``` -For more information, see [Labels](/docs/user-guide/labels/). +For more information, see [Labels](/docs/concepts/overview/working-with-objects/labels/). They are a core concept used by two additional Kubernetes building blocks: Deployments and Services. diff --git a/images/case_studies/Video-Clip-Box.png b/images/case_studies/Video-Clip-Box.png new file mode 100644 index 0000000000000..4c61e7440fc48 Binary files /dev/null and b/images/case_studies/Video-Clip-Box.png differ diff --git a/images/case_studies/box-small.png b/images/case_studies/box-small.png new file mode 100644 index 0000000000000..105b66a5832bb Binary files /dev/null and b/images/case_studies/box-small.png differ diff --git a/images/case_studies/golfnow_logo.png b/images/case_studies/golfnow_logo.png new file mode 100644 index 0000000000000..dbeb127b02a27 Binary files /dev/null and b/images/case_studies/golfnow_logo.png differ diff --git a/images/case_studies/peardeck_logo.png b/images/case_studies/peardeck_logo.png new file mode 100644 index 0000000000000..c1b9772ec45a0 Binary files /dev/null and b/images/case_studies/peardeck_logo.png differ diff --git a/images/case_studies/wink.png b/images/case_studies/wink.png new file mode 100644 index 0000000000000..ef2ee30bf3a46 Binary files /dev/null and b/images/case_studies/wink.png differ