diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 4ada85622e93d..79422a2183246 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -104,11 +104,9 @@ aliases: - atoato88 - bells17 - kakts - - makocchi-git - ptux - t-inu sig-docs-ko-owners: # Admins for Korean content - - ClaudiaJKang - gochist - ianychoi - jihoon-seo @@ -116,7 +114,6 @@ aliases: - yoonian - ysyukr sig-docs-ko-reviews: # PR reviews for Korean content - - ClaudiaJKang - gochist - ianychoi - jihoon-seo @@ -160,9 +157,7 @@ aliases: - devlware - edsoncelio - femrtnz - - jailton - jcjesus - - jhonmike - rikatz - stormqueen1990 - yagonobre @@ -170,9 +165,7 @@ aliases: - devlware - edsoncelio - femrtnz - - jailton - jcjesus - - jhonmike - rikatz - stormqueen1990 - yagonobre @@ -196,9 +189,7 @@ aliases: - mfilocha - nvtkaszpir sig-docs-uk-owners: # Admins for Ukrainian content - - anastyakulyk - Arhell - - butuzov - MaxymVlasov sig-docs-uk-reviews: # PR reviews for Ukrainian content - Arhell diff --git a/README-pt.md b/README-pt.md index d856bf7b427e6..4e67d820f4a4f 100644 --- a/README-pt.md +++ b/README-pt.md @@ -13,7 +13,7 @@ Você pode executar o website localmente utilizando o Hugo (versão Extended), o Para usar este repositório, você precisa instalar: - [npm](https://www.npmjs.com/) -- [Go](https://golang.org/) +- [Go](https://go.dev/) - [Hugo (versão Extended)](https://gohugo.io/) - Um container runtime, por exemplo [Docker](https://www.docker.com/). diff --git a/content/en/blog/_posts/2019-04-04-local-persistent-volumes-ga.md b/content/en/blog/_posts/2019-04-04-local-persistent-volumes-ga.md index c3681bd1d68da..61a37e3f123e0 100644 --- a/content/en/blog/_posts/2019-04-04-local-persistent-volumes-ga.md +++ b/content/en/blog/_posts/2019-04-04-local-persistent-volumes-ga.md @@ -129,7 +129,7 @@ spec: spec: containers: - name: test-container - image: k8s.gcr.io/busybox + image: registry.k8s.io/busybox # updated after publication (previously used k8s.gcr.io/busybox) command: - "/bin/sh" args: diff --git a/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md b/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md index f16fad7105d98..3bab474a4b70b 100644 --- a/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md +++ b/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md @@ -55,7 +55,7 @@ The team has made progress in the last few months that is well worth celebrating - The K8s-Infrastructure Working Group released an automated billing report that they start every meeting off by reviewing as a group. - DNS for k8s.io and kubernetes.io are also fully [community-owned](https://groups.google.com/g/kubernetes-dev/c/LZTYJorGh7c/m/u-ydk-yNEgAJ), with community members able to [file issues](https://github.com/kubernetes/k8s.io/issues/new?assignees=&labels=wg%2Fk8s-infra&template=dns-request.md&title=DNS+REQUEST%3A+%3Cyour-dns-record%3E) to manage records. -- The container registry [k8s.gcr.io](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io) is also fully community-owned and available for all Kubernetes subprojects to use. +- The container registry [registry.k8s.io](https://github.com/kubernetes/k8s.io/tree/main/registry.k8s.io) is also fully community-owned and available for all Kubernetes subprojects to use. _Note:_ The container registry has changed to registry.k8s.io. Updated on August 25, 2022. - The Kubernetes [publishing-bot](https://github.com/kubernetes/publishing-bot) responsible for keeping k8s.io/kubernetes/staging repositories published to their own top-level repos (For example: [kubernetes/api](https://github.com/kubernetes/api)) runs on a community-owned cluster. - The gcsweb.k8s.io service used to provide anonymous access to GCS buckets for kubernetes artifacts runs on a community-owned cluster. diff --git a/content/en/blog/_posts/2022-05-13-grpc-probes-in-beta.md b/content/en/blog/_posts/2022-05-13-grpc-probes-in-beta.md index 619f82e021b8f..e4a9ab0092cb3 100644 --- a/content/en/blog/_posts/2022-05-13-grpc-probes-in-beta.md +++ b/content/en/blog/_posts/2022-05-13-grpc-probes-in-beta.md @@ -115,7 +115,8 @@ metadata: spec: containers: - name: agnhost - image: k8s.gcr.io/e2e-test-images/agnhost:2.35 + # image changed since publication (previously used registry "k8s.gcr.io") + image: registry.k8s.io/e2e-test-images/agnhost:2.35 command: ["/agnhost", "grpc-health-checking"] ports: - containerPort: 5000 diff --git a/content/en/blog/_posts/2022-05-27-maxunavailable-for-statefulset.md b/content/en/blog/_posts/2022-05-27-maxunavailable-for-statefulset.md index 5dd786d9ad741..5d34a7196cdec 100644 --- a/content/en/blog/_posts/2022-05-27-maxunavailable-for-statefulset.md +++ b/content/en/blog/_posts/2022-05-27-maxunavailable-for-statefulset.md @@ -50,7 +50,8 @@ spec: app: nginx spec: containers: - - image: k8s.gcr.io/nginx-slim:0.8 + # image changed since publication (previously used registry "k8s.gcr.io") + - image: registry.k8s.io/nginx-slim:0.8 imagePullPolicy: IfNotPresent name: nginx updateStrategy: @@ -66,7 +67,7 @@ If you enable the new feature and you don't specify a value for `maxUnavailable` I'll run through a scenario based on that example manifest to demonstrate how this feature works. I will deploy a StatefulSet that has 5 replicas, with `maxUnavailable` set to 2 and `partition` set to 0. -I can trigger a rolling update by changing the image to `k8s.gcr.io/nginx-slim:0.9`. Once I initiate the rolling update, I can +I can trigger a rolling update by changing the image to `registry.k8s.io/nginx-slim:0.9`. Once I initiate the rolling update, I can watch the pods update 2 at a time as the current value of maxUnavailable is 2. The below output shows a span of time and is not complete. The maxUnavailable can be an absolute number (for example, 2) or a percentage of desired Pods (for example, 10%). The absolute number is calculated from percentage by rounding up to the nearest integer. diff --git a/content/en/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/index.md b/content/en/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/index.md index 0a295d1d06f41..7e1ec725c607f 100644 --- a/content/en/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/index.md +++ b/content/en/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/index.md @@ -103,7 +103,7 @@ spec: resources: requests: memory: "256Mi" - cpu: "0.2" + cpu: "0.2" limits: memory: ".5Gi" cpu: "0.5" diff --git a/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/Example.png b/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/Example.png new file mode 100644 index 0000000000000..175c21a889626 Binary files /dev/null and b/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/Example.png differ diff --git a/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/Microservices.png b/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/Microservices.png new file mode 100644 index 0000000000000..da0f60a5054a4 Binary files /dev/null and b/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/Microservices.png differ diff --git a/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/index.md b/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/index.md new file mode 100644 index 0000000000000..8ecad96975dd3 --- /dev/null +++ b/content/en/blog/_posts/2023-01-20-Security-Bahavior-Analysis/index.md @@ -0,0 +1,84 @@ +--- +layout: blog +title: Consider All Microservices Vulnerable — And Monitor Their Behavior +date: 2023-01-20 +slug: security-behavior-analysis +--- + +**Author:** +David Hadas (IBM Research Labs) + +_This post warns Devops from a false sense of security. Following security best practices when developing and configuring microservices do not result in non-vulnerable microservices. The post shows that although all deployed microservices are vulnerable, there is much that can be done to ensure microservices are not exploited. It explains how analyzing the behavior of clients and services from a security standpoint, named here **"Security-Behavior Analysis"**, can protect the deployed vulnerable microservices. It points to [Guard](http://knative.dev/security-guard), an open source project offering security-behavior monitoring and control of Kubernetes microservices presumed vulnerable._ + +As cyber attacks continue to intensify in sophistication, organizations deploying cloud services continue to grow their cyber investments aiming to produce safe and non-vulnerable services. However, the year-by-year growth in cyber investments does not result in a parallel reduction in cyber incidents. Instead, the number of cyber incidents continues to grow annually. Evidently, organizations are doomed to fail in this struggle - no matter how much effort is made to detect and remove cyber weaknesses from deployed services, it seems offenders always have the upper hand. + +Considering the current spread of offensive tools, sophistication of offensive players, and ever-growing cyber financial gains to offenders, any cyber strategy that relies on constructing a non-vulnerable, weakness-free service in 2023 is clearly too naïve. It seems the only viable strategy is to: + +➥ **Admit that your services are vulnerable!** + +In other words, consciously accept that you will never create completely invulnerable services. If your opponents find even a single weakness as an entry-point, you lose! Admitting that in spite of your best efforts, all your services are still vulnerable is an important first step. Next, this post discusses what you can do about it... + +## How to protect microservices from being exploited + +Being vulnerable does not necessarily mean that your service will be exploited. Though your services are vulnerable in some ways unknown to you, offenders still need to identify these vulnerabilities and then exploit them. If offenders fail to exploit your service vulnerabilities, you win! In other words, having a vulnerability that can’t be exploited, represents a risk that can’t be realized. + +{{< figure src="Example.png" alt="Image of an example of offender gaining foothold in a service" class="diagram-large" caption="Figure 1. An Offender gaining foothold in a vulnerable service" >}} + +The above diagram shows an example in which the offender does not yet have a foothold in the service; that is, it is assumed that your service does not run code controlled by the offender on day 1. In our example the service has vulnerabilities in the API exposed to clients. To gain an initial foothold the offender uses a malicious client to try and exploit one of the service API vulnerabilities. The malicious client sends an exploit that triggers some unplanned behavior of the service. + +More specifically, let’s assume the service is vulnerable to an SQL injection. The developer failed to sanitize the user input properly, thereby allowing clients to send values that would change the intended behavior. In our example, if a client sends a query string with key “username” and value of _“tom or 1=1”_, the client will receive the data of all users. Exploiting this vulnerability requires the client to send an irregular string as the value. Note that benign users will not be sending a string with spaces or with the equal sign character as a username, instead they will normally send legal usernames which for example may be defined as a short sequence of characters a-z. No legal username can trigger service unplanned behavior. + +In this simple example, one can already identify several opportunities to detect and block an attempt to exploit the vulnerability (un)intentionally left behind by the developer, making the vulnerability unexploitable. First, the malicious client behavior differs from the behavior of benign clients, as it sends irregular requests. If such a change in behavior is detected and blocked, the exploit will never reach the service. Second, the service behavior in response to the exploit differs from the service behavior in response to a regular request. Such behavior may include making subsequent irregular calls to other services such as a data store, taking irregular time to respond, and/or responding to the malicious client with an irregular response (for example, containing much more data than normally sent in case of benign clients making regular requests). Service behavioral changes, if detected, will also allow blocking the exploit in different stages of the exploitation attempt. + +More generally: + +- Monitoring the behavior of clients can help detect and block exploits against service API vulnerabilities. In fact, deploying efficient client behavior monitoring makes many vulnerabilities unexploitable and others very hard to achieve. To succeed, the offender needs to create an exploit undetectable from regular requests. + +- Monitoring the behavior of services can help detect services as they are being exploited regardless of the attack vector used. Efficient service behavior monitoring limits what an attacker may be able to achieve as the offender needs to ensure the service behavior is undetectable from regular service behavior. + +Combining both approaches may add a protection layer to the deployed vulnerable services, drastically decreasing the probability for anyone to successfully exploit any of the deployed vulnerable services. Next, let us identify four use cases where you need to use security-behavior monitoring. + +## Use cases + +One can identify the following four different stages in the life of any service from a security standpoint. In each stage, security-behavior monitoring is required to meet different challenges: + +Service State | Use case | What do you need in order to cope with this use case? +------------- | ------------- | ----------------------------------------- +**Normal** | **No known vulnerabilities:** The service owner is normally not aware of any known vulnerabilities in the service image or configuration. Yet, it is reasonable to assume that the service has weaknesses. | **Provide generic protection against any unknown, zero-day, service vulnerabilities** - Detect/block irregular patterns sent as part of incoming client requests that may be used as exploits. +**Vulnerable** | **An applicable CVE is published:** The service owner is required to release a new non-vulnerable revision of the service. Research shows that in practice this process of removing a known vulnerability may take many weeks to accomplish (2 months on average). | **Add protection based on the CVE analysis** - Detect/block incoming requests that include specific patterns that may be used to exploit the discovered vulnerability. Continue to offer services, although the service has a known vulnerability. +**Exploitable** | **A known exploit is published:** The service owner needs a way to filter incoming requests that contain the known exploit. | **Add protection based on a known exploit signature** - Detect/block incoming client requests that carry signatures identifying the exploit. Continue to offer services, although the presence of an exploit. +**Misused** | **An offender misuses pods backing the service:** The offender can follow an attack pattern enabling him/her to misuse pods. The service owner needs to restart any compromised pods while using non compromised pods to continue offering the service. Note that once a pod is restarted, the offender needs to repeat the attack pattern before he/she may again misuse it. | **Identify and restart instances of the component that is being misused** - At any given time, some backing pods may be compromised and misused, while others behave as designed. Detect/remove the misused pods while allowing other pods to continue servicing client requests. + +Fortunately, microservice architecture is well suited to security-behavior monitoring as discussed next. + +## Security-Behavior of microservices versus monoliths {#microservices-vs-monoliths} + +Kubernetes is often used to support workloads designed with microservice architecture. By design, microservices aim to follow the UNIX philosophy of "Do One Thing And Do It Well". Each microservice has a bounded context and a clear interface. In other words, you can expect the microservice clients to send relatively regular requests and the microservice to present a relatively regular behavior as a response to these requests. Consequently, a microservice architecture is an excellent candidate for security-behavior monitoring. + +{{< figure src="Microservices.png" alt="Image showing why microservices are well suited for security-behavior monitoring" class="diagram-large" caption="Figure 2. Microservices are well suited for security-behavior monitoring" >}} + +The diagram above clarifies how dividing a monolithic service to a set of microservices improves our ability to perform security-behavior monitoring and control. In a monolithic service approach, different client requests are intertwined, resulting in a diminished ability to identify irregular client behaviors. Without prior knowledge, an observer of the intertwined client requests will find it hard to distinguish between types of requests and their related characteristics. Further, internal client requests are not exposed to the observer. Lastly, the aggregated behavior of the monolithic service is a compound of the many different internal behaviors of its components, making it hard to identify irregular service behavior. + +In a microservice environment, each microservice is expected by design to offer a more well-defined service and serve better defined type of requests. This makes it easier for an observer to identify irregular client behavior and irregular service behavior. Further, a microservice design exposes the internal requests and internal services which offer more security-behavior data to identify irregularities by an observer. Overall, this makes the microservice design pattern better suited for security-behavior monitoring and control. + +## Security-Behavior monitoring on Kubernetes + +Kubernetes deployments seeking to add Security-Behavior may use [Guard](http://knative.dev/security-guard), developed under the CNCF project Knative. Guard is integrated into the full Knative automation suite that runs on top of Kubernetes. Alternatively, **you can deploy Guard as a standalone tool** to protect any HTTP-based workload on Kubernetes. + +See: + +- [Guard](https://github.com/knative-sandbox/security-guard) on Github, for using Guard as a standalone tool. +- The Knative automation suite - Read about Knative, in the blog post [Opinionated Kubernetes](https://davidhadas.wordpress.com/2022/08/29/knative-an-opinionated-kubernetes) which describes how Knative simplifies and unifies the way web services are deployed on Kubernetes. +- You may contact Guard maintainers on the [SIG Security](https://kubernetes.slack.com/archives/C019LFTGNQ3) Slack channel or on the Knative community [security](https://knative.slack.com/archives/CBYV1E0TG) Slack channel. The Knative community channel will move soon to the [CNCF Slack](https://communityinviter.com/apps/cloud-native/cncf) under the name `#knative-security`. + +The goal of this post is to invite the Kubernetes community to action and introduce Security-Behavior monitoring and control to help secure Kubernetes based deployments. Hopefully, the community as a follow up will: + +1. Analyze the cyber challenges presented for different Kubernetes use cases +1. Add appropriate security documentation for users on how to introduce Security-Behavior monitoring and control. +1. Consider how to integrate with tools that can help users monitor and control their vulnerable services. + +## Getting involved + +You are welcome to get involved and join the effort to develop security behavior monitoring +and control for Kubernetes; to share feedback and contribute to code or documentation; +and to make or suggest improvements of any kind. diff --git a/content/en/docs/concepts/architecture/garbage-collection.md b/content/en/docs/concepts/architecture/garbage-collection.md index 70fd8423de086..a6e4290710563 100644 --- a/content/en/docs/concepts/architecture/garbage-collection.md +++ b/content/en/docs/concepts/architecture/garbage-collection.md @@ -144,7 +144,7 @@ which you can define: * `MinAge`: the minimum age at which the kubelet can garbage collect a container. Disable by setting to `0`. - * `MaxPerPodContainer`: the maximum number of dead containers each Pod pair + * `MaxPerPodContainer`: the maximum number of dead containers each Pod can have. Disable by setting to less than `0`. * `MaxContainers`: the maximum number of dead containers the cluster can have. Disable by setting to less than `0`. diff --git a/content/en/docs/concepts/cluster-administration/manage-deployment.md b/content/en/docs/concepts/cluster-administration/manage-deployment.md index 1da6bcf87a879..0dbbe9b6deb67 100644 --- a/content/en/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/en/docs/concepts/cluster-administration/manage-deployment.md @@ -1,20 +1,26 @@ --- -reviewers: -- janetkuo title: Managing Resources content_type: concept +reviewers: +- janetkuo weight: 40 --- -You've deployed your application and exposed it via a service. Now what? Kubernetes provides a number of tools to help you manage your application deployment, including scaling and updating. Among the features that we will discuss in more depth are [configuration files](/docs/concepts/configuration/overview/) and [labels](/docs/concepts/overview/working-with-objects/labels/). +You've deployed your application and exposed it via a service. Now what? Kubernetes provides a +number of tools to help you manage your application deployment, including scaling and updating. +Among the features that we will discuss in more depth are +[configuration files](/docs/concepts/configuration/overview/) and +[labels](/docs/concepts/overview/working-with-objects/labels/). ## Organizing resource configurations -Many applications require multiple resources to be created, such as a Deployment and a Service. Management of multiple resources can be simplified by grouping them together in the same file (separated by `---` in YAML). For example: +Many applications require multiple resources to be created, such as a Deployment and a Service. +Management of multiple resources can be simplified by grouping them together in the same file +(separated by `---` in YAML). For example: {{< codenew file="application/nginx-app.yaml" >}} @@ -24,81 +30,99 @@ Multiple resources can be created the same way as a single resource: kubectl apply -f https://k8s.io/examples/application/nginx-app.yaml ``` -```shell +```none service/my-nginx-svc created deployment.apps/my-nginx created ``` -The resources will be created in the order they appear in the file. Therefore, it's best to specify the service first, since that will ensure the scheduler can spread the pods associated with the service as they are created by the controller(s), such as Deployment. +The resources will be created in the order they appear in the file. Therefore, it's best to +specify the service first, since that will ensure the scheduler can spread the pods associated +with the service as they are created by the controller(s), such as Deployment. `kubectl apply` also accepts multiple `-f` arguments: ```shell -kubectl apply -f https://k8s.io/examples/application/nginx/nginx-svc.yaml -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://k8s.io/examples/application/nginx/nginx-svc.yaml \ + -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml ``` -It is a recommended practice to put resources related to the same microservice or application tier into the same file, and to group all of the files associated with your application in the same directory. If the tiers of your application bind to each other using DNS, you can deploy all of the components of your stack together. -A URL can also be specified as a configuration source, which is handy for deploying directly from configuration files checked into GitHub: +It is a recommended practice to put resources related to the same microservice or application tier +into the same file, and to group all of the files associated with your application in the same +directory. If the tiers of your application bind to each other using DNS, you can deploy all of +the components of your stack together. + +A URL can also be specified as a configuration source, which is handy for deploying directly from +configuration files checked into GitHub: ```shell -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml ``` -```shell +```none deployment.apps/my-nginx created ``` ## Bulk operations in kubectl -Resource creation isn't the only operation that `kubectl` can perform in bulk. It can also extract resource names from configuration files in order to perform other operations, in particular to delete the same resources you created: +Resource creation isn't the only operation that `kubectl` can perform in bulk. It can also extract +resource names from configuration files in order to perform other operations, in particular to +delete the same resources you created: ```shell kubectl delete -f https://k8s.io/examples/application/nginx-app.yaml ``` -```shell +```none deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` -In the case of two resources, you can specify both resources on the command line using the resource/name syntax: +In the case of two resources, you can specify both resources on the command line using the +resource/name syntax: ```shell kubectl delete deployments/my-nginx services/my-nginx-svc ``` -For larger numbers of resources, you'll find it easier to specify the selector (label query) specified using `-l` or `--selector`, to filter resources by their labels: +For larger numbers of resources, you'll find it easier to specify the selector (label query) +specified using `-l` or `--selector`, to filter resources by their labels: ```shell kubectl delete deployment,services -l app=nginx ``` -```shell +```none deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` -Because `kubectl` outputs resource names in the same syntax it accepts, you can chain operations using `$()` or `xargs`: +Because `kubectl` outputs resource names in the same syntax it accepts, you can chain operations +using `$()` or `xargs`: ```shell kubectl get $(kubectl create -f docs/concepts/cluster-administration/nginx/ -o name | grep service) kubectl create -f docs/concepts/cluster-administration/nginx/ -o name | grep service | xargs -i kubectl get {} ``` -```shell +```none NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE my-nginx-svc LoadBalancer 10.0.0.208 80/TCP 0s ``` -With the above commands, we first create resources under `examples/application/nginx/` and print the resources created with `-o name` output format -(print each resource as resource/name). Then we `grep` only the "service", and then print it with `kubectl get`. +With the above commands, we first create resources under `examples/application/nginx/` and print +the resources created with `-o name` output format (print each resource as resource/name). +Then we `grep` only the "service", and then print it with `kubectl get`. -If you happen to organize your resources across several subdirectories within a particular directory, you can recursively perform the operations on the subdirectories also, by specifying `--recursive` or `-R` alongside the `--filename,-f` flag. +If you happen to organize your resources across several subdirectories within a particular +directory, you can recursively perform the operations on the subdirectories also, by specifying +`--recursive` or `-R` alongside the `--filename,-f` flag. -For instance, assume there is a directory `project/k8s/development` that holds all of the {{< glossary_tooltip text="manifests" term_id="manifest" >}} needed for the development environment, organized by resource type: +For instance, assume there is a directory `project/k8s/development` that holds all of the +{{< glossary_tooltip text="manifests" term_id="manifest" >}} needed for the development environment, +organized by resource type: -``` +```none project/k8s/development ├── configmap │   └── my-configmap.yaml @@ -108,13 +132,15 @@ project/k8s/development └── my-pvc.yaml ``` -By default, performing a bulk operation on `project/k8s/development` will stop at the first level of the directory, not processing any subdirectories. If we had tried to create the resources in this directory using the following command, we would have encountered an error: +By default, performing a bulk operation on `project/k8s/development` will stop at the first level +of the directory, not processing any subdirectories. If we had tried to create the resources in +this directory using the following command, we would have encountered an error: ```shell kubectl apply -f project/k8s/development ``` -```shell +```none error: you must provide one or more resources by argument or filename (.json|.yaml|.yml|stdin) ``` @@ -124,13 +150,14 @@ Instead, specify the `--recursive` or `-R` flag with the `--filename,-f` flag as kubectl apply -f project/k8s/development --recursive ``` -```shell +```none configmap/my-config created deployment.apps/my-deployment created persistentvolumeclaim/my-pvc created ``` -The `--recursive` flag works with any operation that accepts the `--filename,-f` flag such as: `kubectl {create,get,delete,describe,rollout}` etc. +The `--recursive` flag works with any operation that accepts the `--filename,-f` flag such as: +`kubectl {create,get,delete,describe,rollout}` etc. The `--recursive` flag also works when multiple `-f` arguments are provided: @@ -138,7 +165,7 @@ The `--recursive` flag also works when multiple `-f` arguments are provided: kubectl apply -f project/k8s/namespaces -f project/k8s/development --recursive ``` -```shell +```none namespace/development created namespace/staging created configmap/my-config created @@ -146,36 +173,41 @@ deployment.apps/my-deployment created persistentvolumeclaim/my-pvc created ``` -If you're interested in learning more about `kubectl`, go ahead and read [Command line tool (kubectl)](/docs/reference/kubectl/). +If you're interested in learning more about `kubectl`, go ahead and read +[Command line tool (kubectl)](/docs/reference/kubectl/). ## Using labels effectively -The examples we've used so far apply at most a single label to any resource. There are many scenarios where multiple labels should be used to distinguish sets from one another. +The examples we've used so far apply at most a single label to any resource. There are many +scenarios where multiple labels should be used to distinguish sets from one another. -For instance, different applications would use different values for the `app` label, but a multi-tier application, such as the [guestbook example](https://github.com/kubernetes/examples/tree/master/guestbook/), would additionally need to distinguish each tier. The frontend could carry the following labels: +For instance, different applications would use different values for the `app` label, but a +multi-tier application, such as the [guestbook example](https://github.com/kubernetes/examples/tree/master/guestbook/), +would additionally need to distinguish each tier. The frontend could carry the following labels: ```yaml - labels: - app: guestbook - tier: frontend +labels: + app: guestbook + tier: frontend ``` -while the Redis master and slave would have different `tier` labels, and perhaps even an additional `role` label: +while the Redis master and slave would have different `tier` labels, and perhaps even an +additional `role` label: ```yaml - labels: - app: guestbook - tier: backend - role: master +labels: + app: guestbook + tier: backend + role: master ``` and ```yaml - labels: - app: guestbook - tier: backend - role: slave +labels: + app: guestbook + tier: backend + role: slave ``` The labels allow us to slice and dice our resources along any dimension specified by a label: @@ -185,7 +217,7 @@ kubectl apply -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml kubectl get pods -Lapp -Ltier -Lrole ``` -```shell +```none NAME READY STATUS RESTARTS AGE APP TIER ROLE guestbook-fe-4nlpb 1/1 Running 0 1m guestbook frontend guestbook-fe-ght6d 1/1 Running 0 1m guestbook frontend @@ -200,7 +232,8 @@ my-nginx-o0ef1 1/1 Running 0 29m nginx ```shell kubectl get pods -lapp=guestbook,role=slave ``` -```shell + +```none NAME READY STATUS RESTARTS AGE guestbook-redis-slave-2q2yf 1/1 Running 0 3m guestbook-redis-slave-qgazl 1/1 Running 0 3m @@ -208,62 +241,72 @@ guestbook-redis-slave-qgazl 1/1 Running 0 3m ## Canary deployments -Another scenario where multiple labels are needed is to distinguish deployments of different releases or configurations of the same component. It is common practice to deploy a *canary* of a new application release (specified via image tag in the pod template) side by side with the previous release so that the new release can receive live production traffic before fully rolling it out. +Another scenario where multiple labels are needed is to distinguish deployments of different +releases or configurations of the same component. It is common practice to deploy a *canary* of a +new application release (specified via image tag in the pod template) side by side with the +previous release so that the new release can receive live production traffic before fully rolling +it out. For instance, you can use a `track` label to differentiate different releases. The primary, stable release would have a `track` label with value as `stable`: -```yaml - name: frontend - replicas: 3 - ... - labels: - app: guestbook - tier: frontend - track: stable - ... - image: gb-frontend:v3 +```none +name: frontend +replicas: 3 +... +labels: + app: guestbook + tier: frontend + track: stable +... +image: gb-frontend:v3 ``` -and then you can create a new release of the guestbook frontend that carries the `track` label with different value (i.e. `canary`), so that two sets of pods would not overlap: +and then you can create a new release of the guestbook frontend that carries the `track` label +with different value (i.e. `canary`), so that two sets of pods would not overlap: -```yaml - name: frontend-canary - replicas: 1 - ... - labels: - app: guestbook - tier: frontend - track: canary - ... - image: gb-frontend:v4 +```none +name: frontend-canary +replicas: 1 +... +labels: + app: guestbook + tier: frontend + track: canary +... +image: gb-frontend:v4 ``` - -The frontend service would span both sets of replicas by selecting the common subset of their labels (i.e. omitting the `track` label), so that the traffic will be redirected to both applications: +The frontend service would span both sets of replicas by selecting the common subset of their +labels (i.e. omitting the `track` label), so that the traffic will be redirected to both +applications: ```yaml - selector: - app: guestbook - tier: frontend +selector: + app: guestbook + tier: frontend ``` -You can tweak the number of replicas of the stable and canary releases to determine the ratio of each release that will receive live production traffic (in this case, 3:1). -Once you're confident, you can update the stable track to the new application release and remove the canary one. +You can tweak the number of replicas of the stable and canary releases to determine the ratio of +each release that will receive live production traffic (in this case, 3:1). +Once you're confident, you can update the stable track to the new application release and remove +the canary one. -For a more concrete example, check the [tutorial of deploying Ghost](https://github.com/kelseyhightower/talks/tree/master/kubecon-eu-2016/demo#deploy-a-canary). +For a more concrete example, check the +[tutorial of deploying Ghost](https://github.com/kelseyhightower/talks/tree/master/kubecon-eu-2016/demo#deploy-a-canary). ## Updating labels -Sometimes existing pods and other resources need to be relabeled before creating new resources. This can be done with `kubectl label`. +Sometimes existing pods and other resources need to be relabeled before creating new resources. +This can be done with `kubectl label`. For example, if you want to label all your nginx pods as frontend tier, run: ```shell kubectl label pods -l app=nginx tier=fe ``` -```shell +```none pod/my-nginx-2035384211-j5fhi labeled pod/my-nginx-2035384211-u2c7e labeled pod/my-nginx-2035384211-u3t6x labeled @@ -275,20 +318,25 @@ To see the pods you labeled, run: ```shell kubectl get pods -l app=nginx -L tier ``` -```shell + +```none NAME READY STATUS RESTARTS AGE TIER my-nginx-2035384211-j5fhi 1/1 Running 0 23m fe my-nginx-2035384211-u2c7e 1/1 Running 0 23m fe my-nginx-2035384211-u3t6x 1/1 Running 0 23m fe ``` -This outputs all "app=nginx" pods, with an additional label column of pods' tier (specified with `-L` or `--label-columns`). +This outputs all "app=nginx" pods, with an additional label column of pods' tier (specified with +`-L` or `--label-columns`). -For more information, please see [labels](/docs/concepts/overview/working-with-objects/labels/) and [kubectl label](/docs/reference/generated/kubectl/kubectl-commands/#label). +For more information, please see [labels](/docs/concepts/overview/working-with-objects/labels/) +and [kubectl label](/docs/reference/generated/kubectl/kubectl-commands/#label). ## Updating annotations -Sometimes you would want to attach annotations to resources. Annotations are arbitrary non-identifying metadata for retrieval by API clients such as tools, libraries, etc. This can be done with `kubectl annotate`. For example: +Sometimes you would want to attach annotations to resources. Annotations are arbitrary +non-identifying metadata for retrieval by API clients such as tools, libraries, etc. +This can be done with `kubectl annotate`. For example: ```shell kubectl annotate pods my-nginx-v4-9gw19 description='my frontend running nginx' @@ -304,17 +352,19 @@ metadata: ... ``` -For more information, please see [annotations](/docs/concepts/overview/working-with-objects/annotations/) and [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands/#annotate) document. +For more information, see [annotations](/docs/concepts/overview/working-with-objects/annotations/) +and [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands/#annotate) document. ## Scaling your application -When load on your application grows or shrinks, use `kubectl` to scale your application. For instance, to decrease the number of nginx replicas from 3 to 1, do: +When load on your application grows or shrinks, use `kubectl` to scale your application. +For instance, to decrease the number of nginx replicas from 3 to 1, do: ```shell kubectl scale deployment/my-nginx --replicas=1 ``` -```shell +```none deployment.apps/my-nginx scaled ``` @@ -324,25 +374,27 @@ Now you only have one pod managed by the deployment. kubectl get pods -l app=nginx ``` -```shell +```none NAME READY STATUS RESTARTS AGE my-nginx-2035384211-j5fhi 1/1 Running 0 30m ``` -To have the system automatically choose the number of nginx replicas as needed, ranging from 1 to 3, do: +To have the system automatically choose the number of nginx replicas as needed, +ranging from 1 to 3, do: ```shell kubectl autoscale deployment/my-nginx --min=1 --max=3 ``` -```shell +```none horizontalpodautoscaler.autoscaling/my-nginx autoscaled ``` Now your nginx replicas will be scaled up and down as needed, automatically. -For more information, please see [kubectl scale](/docs/reference/generated/kubectl/kubectl-commands/#scale), [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale) and [horizontal pod autoscaler](/docs/tasks/run-application/horizontal-pod-autoscale/) document. - +For more information, please see [kubectl scale](/docs/reference/generated/kubectl/kubectl-commands/#scale), +[kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale) and +[horizontal pod autoscaler](/docs/tasks/run-application/horizontal-pod-autoscale/) document. ## In-place updates of resources @@ -353,20 +405,34 @@ Sometimes it's necessary to make narrow, non-disruptive updates to resources you It is suggested to maintain a set of configuration files in source control (see [configuration as code](https://martinfowler.com/bliki/InfrastructureAsCode.html)), so that they can be maintained and versioned along with the code for the resources they configure. -Then, you can use [`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands/#apply) to push your configuration changes to the cluster. +Then, you can use [`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands/#apply) +to push your configuration changes to the cluster. -This command will compare the version of the configuration that you're pushing with the previous version and apply the changes you've made, without overwriting any automated changes to properties you haven't specified. +This command will compare the version of the configuration that you're pushing with the previous +version and apply the changes you've made, without overwriting any automated changes to properties +you haven't specified. ```shell kubectl apply -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml +``` + +```none deployment.apps/my-nginx configured ``` -Note that `kubectl apply` attaches an annotation to the resource in order to determine the changes to the configuration since the previous invocation. When it's invoked, `kubectl apply` does a three-way diff between the previous configuration, the provided input and the current configuration of the resource, in order to determine how to modify the resource. +Note that `kubectl apply` attaches an annotation to the resource in order to determine the changes +to the configuration since the previous invocation. When it's invoked, `kubectl apply` does a +three-way diff between the previous configuration, the provided input and the current +configuration of the resource, in order to determine how to modify the resource. -Currently, resources are created without this annotation, so the first invocation of `kubectl apply` will fall back to a two-way diff between the provided input and the current configuration of the resource. During this first invocation, it cannot detect the deletion of properties set when the resource was created. For this reason, it will not remove them. +Currently, resources are created without this annotation, so the first invocation of `kubectl +apply` will fall back to a two-way diff between the provided input and the current configuration +of the resource. During this first invocation, it cannot detect the deletion of properties set +when the resource was created. For this reason, it will not remove them. -All subsequent calls to `kubectl apply`, and other commands that modify the configuration, such as `kubectl replace` and `kubectl edit`, will update the annotation, allowing subsequent calls to `kubectl apply` to detect and perform deletions using a three-way diff. +All subsequent calls to `kubectl apply`, and other commands that modify the configuration, such as +`kubectl replace` and `kubectl edit`, will update the annotation, allowing subsequent calls to +`kubectl apply` to detect and perform deletions using a three-way diff. ### kubectl edit @@ -376,7 +442,8 @@ Alternatively, you may also update resources with `kubectl edit`: kubectl edit deployment/my-nginx ``` -This is equivalent to first `get` the resource, edit it in text editor, and then `apply` the resource with the updated version: +This is equivalent to first `get` the resource, edit it in text editor, and then `apply` the +resource with the updated version: ```shell kubectl get deployment my-nginx -o yaml > /tmp/nginx.yaml @@ -389,7 +456,8 @@ deployment.apps/my-nginx configured rm /tmp/nginx.yaml ``` -This allows you to do more significant changes more easily. Note that you can specify the editor with your `EDITOR` or `KUBE_EDITOR` environment variables. +This allows you to do more significant changes more easily. Note that you can specify the editor +with your `EDITOR` or `KUBE_EDITOR` environment variables. For more information, please see [kubectl edit](/docs/reference/generated/kubectl/kubectl-commands/#edit) document. @@ -403,20 +471,25 @@ and ## Disruptive updates -In some cases, you may need to update resource fields that cannot be updated once initialized, or you may want to make a recursive change immediately, such as to fix broken pods created by a Deployment. To change such fields, use `replace --force`, which deletes and re-creates the resource. In this case, you can modify your original configuration file: +In some cases, you may need to update resource fields that cannot be updated once initialized, or +you may want to make a recursive change immediately, such as to fix broken pods created by a +Deployment. To change such fields, use `replace --force`, which deletes and re-creates the +resource. In this case, you can modify your original configuration file: ```shell kubectl replace -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml --force ``` -```shell +```none deployment.apps/my-nginx deleted deployment.apps/my-nginx replaced ``` ## Updating your application without a service outage -At some point, you'll eventually need to update your deployed application, typically by specifying a new image or image tag, as in the canary deployment scenario above. `kubectl` supports several update operations, each of which is applicable to different scenarios. +At some point, you'll eventually need to update your deployed application, typically by specifying +a new image or image tag, as in the canary deployment scenario above. `kubectl` supports several +update operations, each of which is applicable to different scenarios. We'll guide you through how to create and update applications with Deployments. @@ -426,7 +499,7 @@ Let's say you were running version 1.14.2 of nginx: kubectl create deployment my-nginx --image=nginx:1.14.2 ``` -```shell +```none deployment.apps/my-nginx created ``` @@ -436,24 +509,24 @@ with 3 replicas (so the old and new revisions can coexist): kubectl scale deployment my-nginx --current-replicas=1 --replicas=3 ``` -``` +```none deployment.apps/my-nginx scaled ``` -To update to version 1.16.1, change `.spec.template.spec.containers[0].image` from `nginx:1.14.2` to `nginx:1.16.1` using the previous kubectl commands. +To update to version 1.16.1, change `.spec.template.spec.containers[0].image` from `nginx:1.14.2` +to `nginx:1.16.1` using the previous kubectl commands. ```shell kubectl edit deployment/my-nginx ``` -That's it! The Deployment will declaratively update the deployed nginx application progressively behind the scene. It ensures that only a certain number of old replicas may be down while they are being updated, and only a certain number of new replicas may be created above the desired number of pods. To learn more details about it, visit [Deployment page](/docs/concepts/workloads/controllers/deployment/). - - +That's it! The Deployment will declaratively update the deployed nginx application progressively +behind the scene. It ensures that only a certain number of old replicas may be down while they are +being updated, and only a certain number of new replicas may be created above the desired number +of pods. To learn more details about it, visit [Deployment page](/docs/concepts/workloads/controllers/deployment/). ## {{% heading "whatsnext" %}} - - Learn about [how to use `kubectl` for application introspection and debugging](/docs/tasks/debug/debug-application/debug-running-pod/). - See [Configuration Best Practices and Tips](/docs/concepts/configuration/overview/). - diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index c8cd33fe149ee..b26a25af04d9e 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -87,60 +87,65 @@ spec: The general workflow of a device plugin includes the following steps: -* Initialization. During this phase, the device plugin performs vendor specific +1. Initialization. During this phase, the device plugin performs vendor-specific initialization and setup to make sure the devices are in a ready state. -* The plugin starts a gRPC service, with a Unix socket under host path +1. The plugin starts a gRPC service, with a Unix socket under the host path `/var/lib/kubelet/device-plugins/`, that implements the following interfaces: - ```gRPC - service DevicePlugin { - // GetDevicePluginOptions returns options to be communicated with Device Manager. - rpc GetDevicePluginOptions(Empty) returns (DevicePluginOptions) {} - - // ListAndWatch returns a stream of List of Devices - // Whenever a Device state change or a Device disappears, ListAndWatch - // returns the new list - rpc ListAndWatch(Empty) returns (stream ListAndWatchResponse) {} - - // Allocate is called during container creation so that the Device - // Plugin can run device specific operations and instruct Kubelet - // of the steps to make the Device available in the container - rpc Allocate(AllocateRequest) returns (AllocateResponse) {} - - // GetPreferredAllocation returns a preferred set of devices to allocate - // from a list of available ones. The resulting preferred allocation is not - // guaranteed to be the allocation ultimately performed by the - // devicemanager. It is only designed to help the devicemanager make a more - // informed allocation decision when possible. - rpc GetPreferredAllocation(PreferredAllocationRequest) returns (PreferredAllocationResponse) {} - - // PreStartContainer is called, if indicated by Device Plugin during registeration phase, - // before each container start. Device plugin can run device specific operations - // such as resetting the device before making devices available to the container. - rpc PreStartContainer(PreStartContainerRequest) returns (PreStartContainerResponse) {} - } - ``` - - {{< note >}} - Plugins are not required to provide useful implementations for - `GetPreferredAllocation()` or `PreStartContainer()`. Flags indicating which - (if any) of these calls are available should be set in the `DevicePluginOptions` - message sent back by a call to `GetDevicePluginOptions()`. The `kubelet` will - always call `GetDevicePluginOptions()` to see which optional functions are - available, before calling any of them directly. - {{< /note >}} - -* The plugin registers itself with the kubelet through the Unix socket at host + ```gRPC + service DevicePlugin { + // GetDevicePluginOptions returns options to be communicated with Device Manager. + rpc GetDevicePluginOptions(Empty) returns (DevicePluginOptions) {} + + // ListAndWatch returns a stream of List of Devices + // Whenever a Device state change or a Device disappears, ListAndWatch + // returns the new list + rpc ListAndWatch(Empty) returns (stream ListAndWatchResponse) {} + + // Allocate is called during container creation so that the Device + // Plugin can run device specific operations and instruct Kubelet + // of the steps to make the Device available in the container + rpc Allocate(AllocateRequest) returns (AllocateResponse) {} + + // GetPreferredAllocation returns a preferred set of devices to allocate + // from a list of available ones. The resulting preferred allocation is not + // guaranteed to be the allocation ultimately performed by the + // devicemanager. It is only designed to help the devicemanager make a more + // informed allocation decision when possible. + rpc GetPreferredAllocation(PreferredAllocationRequest) returns (PreferredAllocationResponse) {} + + // PreStartContainer is called, if indicated by Device Plugin during registeration phase, + // before each container start. Device plugin can run device specific operations + // such as resetting the device before making devices available to the container. + rpc PreStartContainer(PreStartContainerRequest) returns (PreStartContainerResponse) {} + } + ``` + + {{< note >}} + Plugins are not required to provide useful implementations for + `GetPreferredAllocation()` or `PreStartContainer()`. Flags indicating + the availability of these calls, if any, should be set in the `DevicePluginOptions` + message sent back by a call to `GetDevicePluginOptions()`. The `kubelet` will + always call `GetDevicePluginOptions()` to see which optional functions are + available, before calling any of them directly. + {{< /note >}} + +1. The plugin registers itself with the kubelet through the Unix socket at host path `/var/lib/kubelet/device-plugins/kubelet.sock`. -* After successfully registering itself, the device plugin runs in serving mode, during which it keeps - monitoring device health and reports back to the kubelet upon any device state changes. - It is also responsible for serving `Allocate` gRPC requests. During `Allocate`, the device plugin may - do device-specific preparation; for example, GPU cleanup or QRNG initialization. - If the operations succeed, the device plugin returns an `AllocateResponse` that contains container - runtime configurations for accessing the allocated devices. The kubelet passes this information - to the container runtime. + {{< note >}} + The ordering of the workflow is important. A plugin MUST start serving gRPC + service before registering itself with kubelet for successful registration. + {{< /note >}} + +1. After successfully registering itself, the device plugin runs in serving mode, during which it keeps + monitoring device health and reports back to the kubelet upon any device state changes. + It is also responsible for serving `Allocate` gRPC requests. During `Allocate`, the device plugin may + do device-specific preparation; for example, GPU cleanup or QRNG initialization. + If the operations succeed, the device plugin returns an `AllocateResponse` that contains container + runtime configurations for accessing the allocated devices. The kubelet passes this information + to the container runtime. ### Handling kubelet restarts diff --git a/content/en/docs/concepts/overview/working-with-objects/labels.md b/content/en/docs/concepts/overview/working-with-objects/labels.md index d21ed8b37b5d2..477ce6f2f5ca5 100644 --- a/content/en/docs/concepts/overview/working-with-objects/labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/labels.md @@ -9,9 +9,12 @@ weight: 40 _Labels_ are key/value pairs that are attached to objects, such as pods. -Labels are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. -Labels can be used to organize and to select subsets of objects. Labels can be attached to objects at creation time and subsequently added and modified at any time. -Each object can have a set of key/value labels defined. Each Key must be unique for a given object. +Labels are intended to be used to specify identifying attributes of objects +that are meaningful and relevant to users, but do not directly imply semantics +to the core system. Labels can be used to organize and to select subsets of +objects. Labels can be attached to objects at creation time and subsequently +added and modified at any time. Each object can have a set of key/value labels +defined. Each Key must be unique for a given object. ```json "metadata": { @@ -30,37 +33,56 @@ and CLIs. Non-identifying information should be recorded using ## Motivation -Labels enable users to map their own organizational structures onto system objects in a loosely coupled fashion, without requiring clients to store these mappings. +Labels enable users to map their own organizational structures onto system objects +in a loosely coupled fashion, without requiring clients to store these mappings. -Service deployments and batch processing pipelines are often multi-dimensional entities (e.g., multiple partitions or deployments, multiple release tracks, multiple tiers, multiple micro-services per tier). Management often requires cross-cutting operations, which breaks encapsulation of strictly hierarchical representations, especially rigid hierarchies determined by the infrastructure rather than by users. +Service deployments and batch processing pipelines are often multi-dimensional entities +(e.g., multiple partitions or deployments, multiple release tracks, multiple tiers, +multiple micro-services per tier). Management often requires cross-cutting operations, +which breaks encapsulation of strictly hierarchical representations, especially rigid +hierarchies determined by the infrastructure rather than by users. Example labels: - * `"release" : "stable"`, `"release" : "canary"` - * `"environment" : "dev"`, `"environment" : "qa"`, `"environment" : "production"` - * `"tier" : "frontend"`, `"tier" : "backend"`, `"tier" : "cache"` - * `"partition" : "customerA"`, `"partition" : "customerB"` - * `"track" : "daily"`, `"track" : "weekly"` +* `"release" : "stable"`, `"release" : "canary"` +* `"environment" : "dev"`, `"environment" : "qa"`, `"environment" : "production"` +* `"tier" : "frontend"`, `"tier" : "backend"`, `"tier" : "cache"` +* `"partition" : "customerA"`, `"partition" : "customerB"` +* `"track" : "daily"`, `"track" : "weekly"` -These are examples of [commonly used labels](/docs/concepts/overview/working-with-objects/common-labels/); you are free to develop your own conventions. Keep in mind that label Key must be unique for a given object. +These are examples of +[commonly used labels](/docs/concepts/overview/working-with-objects/common-labels/); +you are free to develop your own conventions. +Keep in mind that label Key must be unique for a given object. ## Syntax and character set -_Labels_ are key/value pairs. Valid label keys have two segments: an optional prefix and name, separated by a slash (`/`). The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots (`.`), not longer than 253 characters in total, followed by a slash (`/`). +_Labels_ are key/value pairs. Valid label keys have two segments: an optional +prefix and name, separated by a slash (`/`). The name segment is required and +must be 63 characters or less, beginning and ending with an alphanumeric +character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), +and alphanumerics between. The prefix is optional. If specified, the prefix +must be a DNS subdomain: a series of DNS labels separated by dots (`.`), +not longer than 253 characters in total, followed by a slash (`/`). -If the prefix is omitted, the label Key is presumed to be private to the user. Automated system components (e.g. `kube-scheduler`, `kube-controller-manager`, `kube-apiserver`, `kubectl`, or other third-party automation) which add labels to end-user objects must specify a prefix. +If the prefix is omitted, the label Key is presumed to be private to the user. +Automated system components (e.g. `kube-scheduler`, `kube-controller-manager`, +`kube-apiserver`, `kubectl`, or other third-party automation) which add labels +to end-user objects must specify a prefix. -The `kubernetes.io/` and `k8s.io/` prefixes are [reserved](/docs/reference/labels-annotations-taints/) for Kubernetes core components. +The `kubernetes.io/` and `k8s.io/` prefixes are +[reserved](/docs/reference/labels-annotations-taints/) for Kubernetes core components. Valid label value: + * must be 63 characters or less (can be empty), * unless empty, must begin and end with an alphanumeric character (`[a-z0-9A-Z]`), * could contain dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. -For example, here's the configuration file for a Pod that has two labels `environment: production` and `app: nginx` : +For example, here's the configuration file for a Pod that has two labels +`environment: production` and `app: nginx`: ```yaml - apiVersion: v1 kind: Pod metadata: @@ -74,34 +96,43 @@ spec: image: nginx:1.14.2 ports: - containerPort: 80 - ``` ## Label selectors -Unlike [names and UIDs](/docs/concepts/overview/working-with-objects/names/), labels do not provide uniqueness. In general, we expect many objects to carry the same label(s). +Unlike [names and UIDs](/docs/concepts/overview/working-with-objects/names/), labels +do not provide uniqueness. In general, we expect many objects to carry the same label(s). -Via a _label selector_, the client/user can identify a set of objects. The label selector is the core grouping primitive in Kubernetes. +Via a _label selector_, the client/user can identify a set of objects. +The label selector is the core grouping primitive in Kubernetes. The API currently supports two types of selectors: _equality-based_ and _set-based_. -A label selector can be made of multiple _requirements_ which are comma-separated. In the case of multiple requirements, all must be satisfied so the comma separator acts as a logical _AND_ (`&&`) operator. +A label selector can be made of multiple _requirements_ which are comma-separated. +In the case of multiple requirements, all must be satisfied so the comma separator +acts as a logical _AND_ (`&&`) operator. The semantics of empty or non-specified selectors are dependent on the context, and API types that use selectors should document the validity and meaning of them. {{< note >}} -For some API types, such as ReplicaSets, the label selectors of two instances must not overlap within a namespace, or the controller can see that as conflicting instructions and fail to determine how many replicas should be present. +For some API types, such as ReplicaSets, the label selectors of two instances must +not overlap within a namespace, or the controller can see that as conflicting +instructions and fail to determine how many replicas should be present. {{< /note >}} {{< caution >}} -For both equality-based and set-based conditions there is no logical _OR_ (`||`) operator. Ensure your filter statements are structured accordingly. +For both equality-based and set-based conditions there is no logical _OR_ (`||`) operator. +Ensure your filter statements are structured accordingly. {{< /caution >}} ### _Equality-based_ requirement -_Equality-_ or _inequality-based_ requirements allow filtering by label keys and values. Matching objects must satisfy all of the specified label constraints, though they may have additional labels as well. -Three kinds of operators are admitted `=`,`==`,`!=`. The first two represent _equality_ (and are synonyms), while the latter represents _inequality_. For example: +_Equality-_ or _inequality-based_ requirements allow filtering by label keys and values. +Matching objects must satisfy all of the specified label constraints, though they may +have additional labels as well. Three kinds of operators are admitted `=`,`==`,`!=`. +The first two represent _equality_ (and are synonyms), while the latter represents _inequality_. +For example: ``` environment = production @@ -109,8 +140,9 @@ tier != frontend ``` The former selects all resources with key equal to `environment` and value equal to `production`. -The latter selects all resources with key equal to `tier` and value distinct from `frontend`, and all resources with no labels with the `tier` key. -One could filter for resources in `production` excluding `frontend` using the comma operator: `environment=production,tier!=frontend` +The latter selects all resources with key equal to `tier` and value distinct from `frontend`, +and all resources with no labels with the `tier` key. One could filter for resources in `production` +excluding `frontend` using the comma operator: `environment=production,tier!=frontend` One usage scenario for equality-based label requirement is for Pods to specify node selection criteria. For example, the sample Pod below selects nodes with @@ -134,7 +166,9 @@ spec: ### _Set-based_ requirement -_Set-based_ label requirements allow filtering keys according to a set of values. Three kinds of operators are supported: `in`,`notin` and `exists` (only the key identifier). For example: +_Set-based_ label requirements allow filtering keys according to a set of values. +Three kinds of operators are supported: `in`,`notin` and `exists` (only the key identifier). +For example: ``` environment in (production, qa) @@ -143,27 +177,38 @@ partition !partition ``` -* The first example selects all resources with key equal to `environment` and value equal to `production` or `qa`. -* The second example selects all resources with key equal to `tier` and values other than `frontend` and `backend`, and all resources with no labels with the `tier` key. -* The third example selects all resources including a label with key `partition`; no values are checked. -* The fourth example selects all resources without a label with key `partition`; no values are checked. - -Similarly the comma separator acts as an _AND_ operator. So filtering resources with a `partition` key (no matter the value) and with `environment` different than  `qa` can be achieved using `partition,environment notin (qa)`. -The _set-based_ label selector is a general form of equality since `environment=production` is equivalent to `environment in (production)`; similarly for `!=` and `notin`. - -_Set-based_ requirements can be mixed with _equality-based_ requirements. For example: `partition in (customerA, customerB),environment!=qa`. - +- The first example selects all resources with key equal to `environment` and value + equal to `production` or `qa`. +- The second example selects all resources with key equal to `tier` and values other + than `frontend` and `backend`, and all resources with no labels with the `tier` key. +- The third example selects all resources including a label with key `partition`; + no values are checked. +- The fourth example selects all resources without a label with key `partition`; + no values are checked. + +Similarly the comma separator acts as an _AND_ operator. So filtering resources +with a `partition` key (no matter the value) and with `environment` different +than `qa` can be achieved using `partition,environment notin (qa)`. +The _set-based_ label selector is a general form of equality since +`environment=production` is equivalent to `environment in (production)`; +similarly for `!=` and `notin`. + +_Set-based_ requirements can be mixed with _equality-based_ requirements. +For example: `partition in (customerA, customerB),environment!=qa`. ## API ### LIST and WATCH filtering -LIST and WATCH operations may specify label selectors to filter the sets of objects returned using a query parameter. Both requirements are permitted (presented here as they would appear in a URL query string): +LIST and WATCH operations may specify label selectors to filter the sets of objects +returned using a query parameter. Both requirements are permitted +(presented here as they would appear in a URL query string): - * _equality-based_ requirements: `?labelSelector=environment%3Dproduction,tier%3Dfrontend` - * _set-based_ requirements: `?labelSelector=environment+in+%28production%2Cqa%29%2Ctier+in+%28frontend%29` +* _equality-based_ requirements: `?labelSelector=environment%3Dproduction,tier%3Dfrontend` +* _set-based_ requirements: `?labelSelector=environment+in+%28production%2Cqa%29%2Ctier+in+%28frontend%29` -Both label selector styles can be used to list or watch resources via a REST client. For example, targeting `apiserver` with `kubectl` and using _equality-based_ one may write: +Both label selector styles can be used to list or watch resources via a REST client. +For example, targeting `apiserver` with `kubectl` and using _equality-based_ one may write: ```shell kubectl get pods -l environment=production,tier=frontend @@ -175,7 +220,8 @@ or using _set-based_ requirements: kubectl get pods -l 'environment in (production),tier in (frontend)' ``` -As already mentioned _set-based_ requirements are more expressive.  For instance, they can implement the _OR_ operator on values: +As already mentioned _set-based_ requirements are more expressive. +For instance, they can implement the _OR_ operator on values: ```shell kubectl get pods -l 'environment in (production, qa)' @@ -196,15 +242,19 @@ also use label selectors to specify sets of other resources, such as #### Service and ReplicationController -The set of pods that a `service` targets is defined with a label selector. Similarly, the population of pods that a `replicationcontroller` should manage is also defined with a label selector. +The set of pods that a `service` targets is defined with a label selector. +Similarly, the population of pods that a `replicationcontroller` should +manage is also defined with a label selector. -Labels selectors for both objects are defined in `json` or `yaml` files using maps, and only _equality-based_ requirement selectors are supported: +Labels selectors for both objects are defined in `json` or `yaml` files using maps, +and only _equality-based_ requirement selectors are supported: ```json "selector": { "component" : "redis", } ``` + or ```yaml @@ -212,7 +262,8 @@ selector: component: redis ``` -this selector (respectively in `json` or `yaml` format) is equivalent to `component=redis` or `component in (redis)`. +This selector (respectively in `json` or `yaml` format) is equivalent to +`component=redis` or `component in (redis)`. #### Resources that support set-based requirements @@ -231,12 +282,19 @@ selector: - {key: environment, operator: NotIn, values: [dev]} ``` -`matchLabels` is a map of `{key,value}` pairs. A single `{key,value}` in the `matchLabels` map is equivalent to an element of `matchExpressions`, whose `key` field is "key", the `operator` is "In", and the `values` array contains only "value". `matchExpressions` is a list of pod selector requirements. Valid operators include In, NotIn, Exists, and DoesNotExist. The values set must be non-empty in the case of In and NotIn. All of the requirements, from both `matchLabels` and `matchExpressions` are ANDed together -- they must all be satisfied in order to match. +`matchLabels` is a map of `{key,value}` pairs. A single `{key,value}` in the +`matchLabels` map is equivalent to an element of `matchExpressions`, whose `key` +field is "key", the `operator` is "In", and the `values` array contains only "value". +`matchExpressions` is a list of pod selector requirements. Valid operators include +In, NotIn, Exists, and DoesNotExist. The values set must be non-empty in the case of +In and NotIn. All of the requirements, from both `matchLabels` and `matchExpressions` +are ANDed together -- they must all be satisfied in order to match. #### Selecting sets of nodes -One use case for selecting over labels is to constrain the set of nodes onto which a pod can schedule. -See the documentation on [node selection](/docs/concepts/scheduling-eviction/assign-pod-node/) for more information. +One use case for selecting over labels is to constrain the set of nodes onto which +a pod can schedule. See the documentation on +[node selection](/docs/concepts/scheduling-eviction/assign-pod-node/) for more information. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/security/rbac-good-practices.md b/content/en/docs/concepts/security/rbac-good-practices.md index 8b883bba9a3db..b6abde0d7494e 100644 --- a/content/en/docs/concepts/security/rbac-good-practices.md +++ b/content/en/docs/concepts/security/rbac-good-practices.md @@ -121,8 +121,20 @@ considered weak. ### Persistent volume creation -As noted in the [PodSecurityPolicy](/docs/concepts/security/pod-security-policy/#volumes-and-file-systems) -documentation, access to create PersistentVolumes can allow for escalation of access to the underlying host. +If someone - or some application - is allowed to create arbitrary PersistentVolumes, that access +includes the creation of `hostPath` volumes, which then means that a Pod would get access +to the underlying host filesystem(s) on the associated node. Granting that ability is a security risk. + +There are many ways a container with unrestricted access to the host filesystem can escalate privileges, including +reading data from other containers, and abusing the credentials of system services, such as Kubelet. + +You should only allow access to create PersistentVolume objects for: + +- users (cluster operators) that need this access for their work, and who you trust, +- the Kubernetes control plane components which creates PersistentVolumes based on PersistentVolumeClaims + that are configured for automatic provisioning. + This is usually setup by the Kubernetes provider or by the operator when installing a CSI driver. + Where access to persistent storage is required trusted administrators should create PersistentVolumes, and constrained users should use PersistentVolumeClaims to access that storage. diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index 5e39405779aa1..b761e056018da 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -483,6 +483,8 @@ Kubernetes `ServiceTypes` allow you to specify what kind of Service you want. * `ClusterIP`: Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable from within the cluster. This is the default that is used if you don't explicitly specify a `type` for a Service. + You can expose the service to the public with an [Ingress](docs/reference/kubernetes-api/service-resources/ingress-v1/) or the + [Gateway API](https://gateway-api.sigs.k8s.io/). * [`NodePort`](#type-nodeport): Exposes the Service on each Node's IP at a static port (the `NodePort`). To make the node port available, Kubernetes sets up a cluster IP address, diff --git a/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md index a51c88602fcf6..aca3c090ebcd0 100644 --- a/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -1,75 +1,87 @@ --- reviewers: - janetkuo -title: Automatic Clean-up for Finished Jobs +title: Automatic Cleanup for Finished Jobs content_type: concept weight: 70 +description: >- + A time-to-live mechanism to clean up old Jobs that have finished execution. --- {{< feature-state for_k8s_version="v1.23" state="stable" >}} -TTL-after-finished {{}} provides a -TTL (time to live) mechanism to limit the lifetime of resource objects that -have finished execution. TTL controller only handles -{{< glossary_tooltip text="Jobs" term_id="job" >}}. +When your Job has finished, it's useful to keep that Job in the API (and not immediately delete the Job) +so that you can tell whether the Job succeeded or failed. + +Kubernetes' TTL-after-finished {{}} provides a +TTL (time to live) mechanism to limit the lifetime of Job objects that +have finished execution. -## TTL-after-finished Controller +## Cleanup for finished Jobs -The TTL-after-finished controller is only supported for Jobs. A cluster operator can use this feature to clean +The TTL-after-finished controller is only supported for Jobs. You can use this mechanism to clean up finished Jobs (either `Complete` or `Failed`) automatically by specifying the `.spec.ttlSecondsAfterFinished` field of a Job, as in this [example](/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically). -The TTL-after-finished controller will assume that a job is eligible to be cleaned up -TTL seconds after the job has finished, in other words, when the TTL has expired. When the + +The TTL-after-finished controller assumes that a Job is eligible to be cleaned up +TTL seconds after the Job has finished. The timer starts once the +status condition of the Job changes to show that the Job is either `Complete` or `Failed`; once the TTL has +expired, that Job becomes eligible for +[cascading](/docs/concepts/architecture/garbage-collection/#cascading-deletion) removal. When the TTL-after-finished controller cleans up a job, it will delete it cascadingly, that is to say it will delete -its dependent objects together with it. Note that when the job is deleted, -its lifecycle guarantees, such as finalizers, will be honored. +its dependent objects together with it. + +Kubernetes honors object lifecycle guarantees on the Job, such as waiting for +[finalizers](/docs/concepts/overview/working-with-objects/finalizers/). -The TTL seconds can be set at any time. Here are some examples for setting the +You can set the TTL seconds at any time. Here are some examples for setting the `.spec.ttlSecondsAfterFinished` field of a Job: -* Specify this field in the job manifest, so that a Job can be cleaned up +* Specify this field in the Job manifest, so that a Job can be cleaned up automatically some time after it finishes. -* Set this field of existing, already finished jobs, to adopt this new - feature. +* Manually set this field of existing, already finished Jobs, so that they become eligible + for cleanup. * Use a - [mutating admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) - to set this field dynamically at job creation time. Cluster administrators can + [mutating admission webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) + to set this field dynamically at Job creation time. Cluster administrators can use this to enforce a TTL policy for finished jobs. * Use a - [mutating admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) - to set this field dynamically after the job has finished, and choose - different TTL values based on job status, labels, etc. + [mutating admission webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) + to set this field dynamically after the Job has finished, and choose + different TTL values based on job status, labels. For this case, the webhook needs + to detect changes to the `.status` of the Job and only set a TTL when the Job + is being marked as completed. +* Write your own controller to manage the cleanup TTL for Jobs that match a particular + {{< glossary_tooltip term_id="selector" text="selector-selector" >}}. -## Caveat +## Caveats -### Updating TTL Seconds +### Updating TTL for finished Jobs -Note that the TTL period, e.g. `.spec.ttlSecondsAfterFinished` field of Jobs, -can be modified after the job is created or has finished. However, once the -Job becomes eligible to be deleted (when the TTL has expired), the system won't -guarantee that the Jobs will be kept, even if an update to extend the TTL -returns a successful API response. +You can modify the TTL period, e.g. `.spec.ttlSecondsAfterFinished` field of Jobs, +after the job is created or has finished. If you extend the TTL period after the +existing `ttlSecondsAfterFinished` period has expired, Kubernetes doesn't guarantee +to retain that Job, even if an update to extend the TTL returns a successful API +response. -### Time Skew +### Time skew -Because TTL-after-finished controller uses timestamps stored in the Kubernetes jobs to +Because the TTL-after-finished controller uses timestamps stored in the Kubernetes jobs to determine whether the TTL has expired or not, this feature is sensitive to time -skew in the cluster, which may cause TTL-after-finish controller to clean up job objects +skew in your cluster, which may cause the control plane to clean up Job objects at the wrong time. Clocks aren't always correct, but the difference should be very small. Please be aware of this risk when setting a non-zero TTL. - - ## {{% heading "whatsnext" %}} -* [Clean up Jobs automatically](/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) - -* [Design doc](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/592-ttl-after-finish/README.md) +* Read [Clean up Jobs automatically](/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) +* Refer to the [Kubernetes Enhancement Proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/592-ttl-after-finish/README.md) + (KEP) for adding this mechanism. diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index 1e7022fa772e4..734a0a333b4d9 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -373,21 +373,21 @@ An example request body: ```json { - "apiVersion":"imagepolicy.k8s.io/v1alpha1", - "kind":"ImageReview", - "spec":{ - "containers":[ + "apiVersion": "imagepolicy.k8s.io/v1alpha1", + "kind": "ImageReview", + "spec": { + "containers": [ { - "image":"myrepo/myimage:v1" + "image": "myrepo/myimage:v1" }, { - "image":"myrepo/myimage@sha256:beb6bd6a68f114c1dc2ea4b28db81bdf91de202a9014972bec5e4d9171d90ed" + "image": "myrepo/myimage@sha256:beb6bd6a68f114c1dc2ea4b28db81bdf91de202a9014972bec5e4d9171d90ed" } ], - "annotations":{ + "annotations": { "mycluster.image-policy.k8s.io/ticket-1234": "break-glass" }, - "namespace":"mynamespace" + "namespace": "mynamespace" } } ``` @@ -610,9 +610,9 @@ This file may be json or yaml and has the following format: ```yaml podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: name-of-node-selector - namespace1: name-of-node-selector - namespace2: name-of-node-selector + clusterDefaultNodeSelector: name-of-node-selector + namespace1: name-of-node-selector + namespace2: name-of-node-selector ``` Reference the `PodNodeSelector` configuration file from the file provided to the API server's @@ -744,17 +744,37 @@ for more information. ### SecurityContextDeny {#securitycontextdeny} -This admission controller will deny any Pod that attempts to set certain escalating -[SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core) -fields, as shown in the -[Configure a Security Context for a Pod or Container](/docs/tasks/configure-pod-container/security-context/) -task. -If you don't use [Pod Security admission](/docs/concepts/security/pod-security-admission/), -[PodSecurityPolicies](/docs/concepts/security/pod-security-policy/), nor any external enforcement mechanism, -then you could use this admission controller to restrict the set of values a security context can take. - -See [Pod Security Standards](/docs/concepts/security/pod-security-standards/) for more context on restricting -pod privileges. +{{< feature-state for_k8s_version="v1.0" state="alpha" >}} + +{{< caution >}} +This admission controller plugin is **outdated** and **incomplete**, it may be +unusable or not do what you would expect. It was originally designed to prevent +the use of some, but not all, security-sensitive fields. Indeed, fields like +`privileged`, were not filtered at creation and the plugin was not updated with +the most recent fields, and new APIs like the `ephemeralContainers` field for a +Pod. + +The [Pod Security Admission](/docs/concepts/security/pod-security-admission/) +plugin enforcing the [Pod Security Standards](/docs/concepts/security/pod-security-standards/) +`Restricted` profile captures what this plugin was trying to achieve in a better +and up-to-date way. +{{< /caution >}} + +This admission controller will deny any Pod that attempts to set the following +[SecurityContext](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context) +fields: +- `.spec.securityContext.supplementalGroups` +- `.spec.securityContext.seLinuxOptions` +- `.spec.securityContext.runAsUser` +- `.spec.securityContext.fsGroup` +- `.spec.(init)Containers[*].securityContext.seLinuxOptions` +- `.spec.(init)Containers[*].securityContext.runAsUser` + +For more historical context on this plugin, see +[The birth of PodSecurityPolicy](/blog/2022/08/23/podsecuritypolicy-the-historical-context/#the-birth-of-podsecuritypolicy) +from the Kubernetes blog article about PodSecurityPolicy and its removal. The +article details the PodSecurityPolicy historical context and the birth of the +`securityContext` field for Pods. ### ServiceAccount {#serviceaccount} diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 17ecd414758b2..a7b1058b10b6d 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -697,15 +697,12 @@ Each feature gate is designed for enabling/disabling a specific feature: - `RotateKubeletServerCertificate`: Enable the rotation of the server TLS certificate on the kubelet. See [kubelet configuration](/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubelet-configuration) for more details. -- `SELinuxMountReadWriteOncePod`: Speed up container startup by mounting volumes with the correct - SELinux label instead of changing each file on the volumes recursively. The initial implementation - focused on ReadWriteOncePod volumes. +- `SELinuxMountReadWriteOncePod`: Speeds up container startup by allowing kubelet to mount volumes + for a Pod directly with the correct SELinux label instead of changing each file on the volumes + recursively. The initial implementation focused on ReadWriteOncePod volumes. - `SeccompDefault`: Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads. The seccomp profile is specified in the `securityContext` of a Pod and/or a Container. -- `SELinuxMountReadWriteOncePod`: Allows kubelet to mount volumes for a Pod directly with the - right SELinux label instead of applying the SELinux label recursively on every file on the - volume. - `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/server-side-apply/) feature on the API Server. - `ServerSideFieldValidation`: Enables server-side field validation. This means the validation diff --git a/content/en/docs/reference/labels-annotations-taints/_index.md b/content/en/docs/reference/labels-annotations-taints/_index.md index 27203e08d8401..46cf104bf9753 100644 --- a/content/en/docs/reference/labels-annotations-taints/_index.md +++ b/content/en/docs/reference/labels-annotations-taints/_index.md @@ -171,6 +171,16 @@ There are two possible values: - `onstart`: The APIService should be reconciled when an API server starts up, but not otherwise. - `true`: The API server should reconcile this APIService continuously. +### service.alpha.kubernetes.io/tolerate-unready-endpoints (deprecated) + +Used on: StatefulSet + +This annotation on a Service denotes if the Endpoints controller should go ahead and create Endpoints for unready Pods. +Endpoints of these Services retain their DNS records and continue receiving +traffic for the Service from the moment the kubelet starts all containers in the pod +and marks it _Running_, til the kubelet stops all containers and deletes the pod from +the API server. + ### kubernetes.io/hostname {#kubernetesiohostname} Example: `kubernetes.io/hostname: "ip-172-20-114-199.ec2.internal"` @@ -437,6 +447,12 @@ Used on: PersistentVolumeClaim This annotation will be added to dynamic provisioning required PVC. +### volume.kubernetes.io/selected-node + +Used on: PersistentVolumeClaim + +This annotation is added to a PVC that is triggered by a scheduler to be dynamically provisioned. Its value is the name of the selected node. + ### volumes.kubernetes.io/controller-managed-attach-detach Used on: Node diff --git a/content/en/docs/reference/networking/virtual-ips.md b/content/en/docs/reference/networking/virtual-ips.md index 63b0f48619bc9..af3899a703d2e 100644 --- a/content/en/docs/reference/networking/virtual-ips.md +++ b/content/en/docs/reference/networking/virtual-ips.md @@ -84,7 +84,7 @@ to verify that backend Pods are working OK, so that kube-proxy in iptables mode only sees backends that test out as healthy. Doing this means you avoid having traffic sent via kube-proxy to a Pod that's known to have failed. -{{< figure src="/images/docs/services-iptables-overview.svg" title="Services overview diagram for iptables proxy" class="diagram-medium" >}} +{{< figure src="/images/docs/services-iptables-overview.svg" title="Virtual IP mechanism for Services, using iptables mode" class="diagram-medium" >}} #### Example {#packet-processing-iptables} @@ -229,7 +229,7 @@ kernel modules are available. If the IPVS kernel modules are not detected, then falls back to running in iptables proxy mode. {{< /note >}} -{{< figure src="/images/docs/services-ipvs-overview.svg" title="Services overview diagram for IPVS proxy" class="diagram-medium" >}} +{{< figure src="/images/docs/services-ipvs-overview.svg" title="Virtual IP address mechanism for Services, using IPVS mode" class="diagram-medium" >}} ## Session affinity diff --git a/content/en/docs/reference/using-api/server-side-apply.md b/content/en/docs/reference/using-api/server-side-apply.md index c40b168c94f5f..980ad7020fb69 100644 --- a/content/en/docs/reference/using-api/server-side-apply.md +++ b/content/en/docs/reference/using-api/server-side-apply.md @@ -366,12 +366,26 @@ There are two solutions: First, the user defines a new configuration containing only the `replicas` field: -{{< codenew file="application/ssa/nginx-deployment-replicas-only.yaml" >}} +```yaml +# Save this file as 'nginx-deployment-replicas-only.yaml'. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + replicas: 3 +``` + +{{< note >}} +The YAML file for SSA in this case only contains the fields you want to change. +You are not supposed to provide a fully compliant Deployment manifest if you only +want to modify the `spec.replicas` field using SSA. +{{< /note >}} The user applies that configuration using the field manager name `handover-to-hpa`: ```shell -kubectl apply -f https://k8s.io/examples/application/ssa/nginx-deployment-replicas-only.yaml \ +kubectl apply -f nginx-deployment-replicas-only.yaml \ --server-side --field-manager=handover-to-hpa \ --validate=false ``` diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index 1baa12b3b7a0e..9509989daf62e 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -26,15 +26,15 @@ etcd cluster of three members that can be used by kubeadm during cluster creatio ## {{% heading "prerequisites" %}} -* Three hosts that can talk to each other over TCP ports 2379 and 2380. This +- Three hosts that can talk to each other over TCP ports 2379 and 2380. This document assumes these default ports. However, they are configurable through the kubeadm config file. -* Each host must have systemd and a bash compatible shell installed. -* Each host must [have a container runtime, kubelet, and kubeadm installed](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). -* Each host should have access to the Kubernetes container image registry (`registry.k8s.io`) or list/pull the required etcd image using -`kubeadm config images list/pull`. This guide will set up etcd instances as -[static pods](/docs/tasks/configure-pod-container/static-pod/) managed by a kubelet. -* Some infrastructure to copy files between hosts. For example `ssh` and `scp` +- Each host must have systemd and a bash compatible shell installed. +- Each host must [have a container runtime, kubelet, and kubeadm installed](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). +- Each host should have access to the Kubernetes container image registry (`registry.k8s.io`) or list/pull the required etcd image using + `kubeadm config images list/pull`. This guide will set up etcd instances as + [static pods](/docs/tasks/configure-pod-container/static-pod/) managed by a kubelet. +- Some infrastructure to copy files between hosts. For example `ssh` and `scp` can satisfy this requirement. @@ -42,7 +42,7 @@ etcd cluster of three members that can be used by kubeadm during cluster creatio ## Setting up the cluster The general approach is to generate all certs on one node and only distribute -the *necessary* files to the other nodes. +the _necessary_ files to the other nodes. {{< note >}} kubeadm contains all the necessary cryptographic machinery to generate @@ -59,242 +59,239 @@ on Kubernetes dual-stack support see [Dual-stack support with kubeadm](/docs/set 1. Configure the kubelet to be a service manager for etcd. {{< note >}}You must do this on every host where etcd should be running.{{< /note >}} - Since etcd was created first, you must override the service priority by creating a new unit file - that has higher precedence than the kubeadm-provided kubelet unit file. + Since etcd was created first, you must override the service priority by creating a new unit file + that has higher precedence than the kubeadm-provided kubelet unit file. - ```sh - cat << EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf - [Service] - ExecStart= - # Replace "systemd" with the cgroup driver of your container runtime. The default value in the kubelet is "cgroupfs". - # Replace the value of "--container-runtime-endpoint" for a different container runtime if needed. - ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd --container-runtime=remote --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock - Restart=always - EOF + ```sh + cat << EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf + [Service] + ExecStart= + # Replace "systemd" with the cgroup driver of your container runtime. The default value in the kubelet is "cgroupfs". + # Replace the value of "--container-runtime-endpoint" for a different container runtime if needed. + ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd --container-runtime=remote --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock + Restart=always + EOF - systemctl daemon-reload - systemctl restart kubelet - ``` + systemctl daemon-reload + systemctl restart kubelet + ``` - Check the kubelet status to ensure it is running. + Check the kubelet status to ensure it is running. - ```sh - systemctl status kubelet - ``` + ```sh + systemctl status kubelet + ``` 1. Create configuration files for kubeadm. - Generate one kubeadm configuration file for each host that will have an etcd - member running on it using the following script. - - ```sh - # Update HOST0, HOST1 and HOST2 with the IPs of your hosts - export HOST0=10.0.0.6 - export HOST1=10.0.0.7 - export HOST2=10.0.0.8 - - # Update NAME0, NAME1 and NAME2 with the hostnames of your hosts - export NAME0="infra0" - export NAME1="infra1" - export NAME2="infra2" - - # Create temp directories to store files that will end up on other hosts - mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/ - - HOSTS=(${HOST0} ${HOST1} ${HOST2}) - NAMES=(${NAME0} ${NAME1} ${NAME2}) - - for i in "${!HOSTS[@]}"; do - HOST=${HOSTS[$i]} - NAME=${NAMES[$i]} - cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml - --- - apiVersion: "kubeadm.k8s.io/v1beta3" - kind: InitConfiguration - nodeRegistration: - name: ${NAME} - localAPIEndpoint: - advertiseAddress: ${HOST} - --- - apiVersion: "kubeadm.k8s.io/v1beta3" - kind: ClusterConfiguration - etcd: - local: - serverCertSANs: - - "${HOST}" - peerCertSANs: - - "${HOST}" - extraArgs: - initial-cluster: ${NAMES[0]}=https://${HOSTS[0]}:2380,${NAMES[1]}=https://${HOSTS[1]}:2380,${NAMES[2]}=https://${HOSTS[2]}:2380 - initial-cluster-state: new - name: ${NAME} - listen-peer-urls: https://${HOST}:2380 - listen-client-urls: https://${HOST}:2379 - advertise-client-urls: https://${HOST}:2379 - initial-advertise-peer-urls: https://${HOST}:2380 - EOF - done - ``` + Generate one kubeadm configuration file for each host that will have an etcd + member running on it using the following script. + + ```sh + # Update HOST0, HOST1 and HOST2 with the IPs of your hosts + export HOST0=10.0.0.6 + export HOST1=10.0.0.7 + export HOST2=10.0.0.8 + + # Update NAME0, NAME1 and NAME2 with the hostnames of your hosts + export NAME0="infra0" + export NAME1="infra1" + export NAME2="infra2" + + # Create temp directories to store files that will end up on other hosts + mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/ + + HOSTS=(${HOST0} ${HOST1} ${HOST2}) + NAMES=(${NAME0} ${NAME1} ${NAME2}) + + for i in "${!HOSTS[@]}"; do + HOST=${HOSTS[$i]} + NAME=${NAMES[$i]} + cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml + --- + apiVersion: "kubeadm.k8s.io/v1beta3" + kind: InitConfiguration + nodeRegistration: + name: ${NAME} + localAPIEndpoint: + advertiseAddress: ${HOST} + --- + apiVersion: "kubeadm.k8s.io/v1beta3" + kind: ClusterConfiguration + etcd: + local: + serverCertSANs: + - "${HOST}" + peerCertSANs: + - "${HOST}" + extraArgs: + initial-cluster: ${NAMES[0]}=https://${HOSTS[0]}:2380,${NAMES[1]}=https://${HOSTS[1]}:2380,${NAMES[2]}=https://${HOSTS[2]}:2380 + initial-cluster-state: new + name: ${NAME} + listen-peer-urls: https://${HOST}:2380 + listen-client-urls: https://${HOST}:2379 + advertise-client-urls: https://${HOST}:2379 + initial-advertise-peer-urls: https://${HOST}:2380 + EOF + done + ``` 1. Generate the certificate authority. - If you already have a CA then the only action that is copying the CA's `crt` and - `key` file to `/etc/kubernetes/pki/etcd/ca.crt` and - `/etc/kubernetes/pki/etcd/ca.key`. After those files have been copied, - proceed to the next step, "Create certificates for each member". + If you already have a CA then the only action that is copying the CA's `crt` and + `key` file to `/etc/kubernetes/pki/etcd/ca.crt` and + `/etc/kubernetes/pki/etcd/ca.key`. After those files have been copied, + proceed to the next step, "Create certificates for each member". - If you do not already have a CA then run this command on `$HOST0` (where you - generated the configuration files for kubeadm). + If you do not already have a CA then run this command on `$HOST0` (where you + generated the configuration files for kubeadm). - ``` - kubeadm init phase certs etcd-ca - ``` + ``` + kubeadm init phase certs etcd-ca + ``` - This creates two files: + This creates two files: - - `/etc/kubernetes/pki/etcd/ca.crt` - - `/etc/kubernetes/pki/etcd/ca.key` + - `/etc/kubernetes/pki/etcd/ca.crt` + - `/etc/kubernetes/pki/etcd/ca.key` 1. Create certificates for each member. - ```sh - kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml - cp -R /etc/kubernetes/pki /tmp/${HOST2}/ - # cleanup non-reusable certificates - find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete - - kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml - cp -R /etc/kubernetes/pki /tmp/${HOST1}/ - find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete - - kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml - # No need to move the certs because they are for HOST0 - - # clean up certs that should not be copied off this host - find /tmp/${HOST2} -name ca.key -type f -delete - find /tmp/${HOST1} -name ca.key -type f -delete - ``` + ```sh + kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml + kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml + kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml + kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml + cp -R /etc/kubernetes/pki /tmp/${HOST2}/ + # cleanup non-reusable certificates + find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete + + kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml + kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml + kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml + kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml + cp -R /etc/kubernetes/pki /tmp/${HOST1}/ + find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete + + kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml + kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml + kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml + kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml + # No need to move the certs because they are for HOST0 + + # clean up certs that should not be copied off this host + find /tmp/${HOST2} -name ca.key -type f -delete + find /tmp/${HOST1} -name ca.key -type f -delete + ``` 1. Copy certificates and kubeadm configs. - The certificates have been generated and now they must be moved to their - respective hosts. + The certificates have been generated and now they must be moved to their + respective hosts. - ```sh - USER=ubuntu - HOST=${HOST1} - scp -r /tmp/${HOST}/* ${USER}@${HOST}: - ssh ${USER}@${HOST} - USER@HOST $ sudo -Es - root@HOST $ chown -R root:root pki - root@HOST $ mv pki /etc/kubernetes/ - ``` + ```sh + USER=ubuntu + HOST=${HOST1} + scp -r /tmp/${HOST}/* ${USER}@${HOST}: + ssh ${USER}@${HOST} + USER@HOST $ sudo -Es + root@HOST $ chown -R root:root pki + root@HOST $ mv pki /etc/kubernetes/ + ``` 1. Ensure all expected files exist. - The complete list of required files on `$HOST0` is: - - ``` - /tmp/${HOST0} - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── ca.key - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - - On `$HOST1`: - - ``` - $HOME - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - - On `$HOST2`: - - ``` - $HOME - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` + The complete list of required files on `$HOST0` is: + + ``` + /tmp/${HOST0} + └── kubeadmcfg.yaml + --- + /etc/kubernetes/pki + ├── apiserver-etcd-client.crt + ├── apiserver-etcd-client.key + └── etcd + ├── ca.crt + ├── ca.key + ├── healthcheck-client.crt + ├── healthcheck-client.key + ├── peer.crt + ├── peer.key + ├── server.crt + └── server.key + ``` + + On `$HOST1`: + + ``` + $HOME + └── kubeadmcfg.yaml + --- + /etc/kubernetes/pki + ├── apiserver-etcd-client.crt + ├── apiserver-etcd-client.key + └── etcd + ├── ca.crt + ├── healthcheck-client.crt + ├── healthcheck-client.key + ├── peer.crt + ├── peer.key + ├── server.crt + └── server.key + ``` + + On `$HOST2`: + + ``` + $HOME + └── kubeadmcfg.yaml + --- + /etc/kubernetes/pki + ├── apiserver-etcd-client.crt + ├── apiserver-etcd-client.key + └── etcd + ├── ca.crt + ├── healthcheck-client.crt + ├── healthcheck-client.key + ├── peer.crt + ├── peer.key + ├── server.crt + └── server.key + ``` 1. Create the static pod manifests. - Now that the certificates and configs are in place it's time to create the - manifests. On each host run the `kubeadm` command to generate a static manifest - for etcd. + Now that the certificates and configs are in place it's time to create the + manifests. On each host run the `kubeadm` command to generate a static manifest + for etcd. - ```sh - root@HOST0 $ kubeadm init phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml - root@HOST1 $ kubeadm init phase etcd local --config=$HOME/kubeadmcfg.yaml - root@HOST2 $ kubeadm init phase etcd local --config=$HOME/kubeadmcfg.yaml - ``` + ```sh + root@HOST0 $ kubeadm init phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml + root@HOST1 $ kubeadm init phase etcd local --config=$HOME/kubeadmcfg.yaml + root@HOST2 $ kubeadm init phase etcd local --config=$HOME/kubeadmcfg.yaml + ``` 1. Optional: Check the cluster health. - ```sh - docker run --rm -it \ - --net host \ - -v /etc/kubernetes:/etc/kubernetes registry.k8s.io/etcd:${ETCD_TAG} etcdctl \ - --cert /etc/kubernetes/pki/etcd/peer.crt \ - --key /etc/kubernetes/pki/etcd/peer.key \ - --cacert /etc/kubernetes/pki/etcd/ca.crt \ - --endpoints https://${HOST0}:2379 endpoint health --cluster - ... - https://[HOST0 IP]:2379 is healthy: successfully committed proposal: took = 16.283339ms - https://[HOST1 IP]:2379 is healthy: successfully committed proposal: took = 19.44402ms - https://[HOST2 IP]:2379 is healthy: successfully committed proposal: took = 35.926451ms - ``` - - Set `${ETCD_TAG}` to the version tag of your etcd image. For example `3.4.3-0`. To see the etcd image and tag that kubeadm uses execute `kubeadm config images list --kubernetes-version ${K8S_VERSION}`, where `${K8S_VERSION}` is for example `v1.17.0`. - - Set `${HOST0}`to the IP address of the host you are testing. - - + ```sh + docker run --rm -it \ + --net host \ + -v /etc/kubernetes:/etc/kubernetes registry.k8s.io/etcd:${ETCD_TAG} etcdctl \ + --cert /etc/kubernetes/pki/etcd/peer.crt \ + --key /etc/kubernetes/pki/etcd/peer.key \ + --cacert /etc/kubernetes/pki/etcd/ca.crt \ + --endpoints https://${HOST0}:2379 endpoint health --cluster + ... + https://[HOST0 IP]:2379 is healthy: successfully committed proposal: took = 16.283339ms + https://[HOST1 IP]:2379 is healthy: successfully committed proposal: took = 19.44402ms + https://[HOST2 IP]:2379 is healthy: successfully committed proposal: took = 35.926451ms + ``` + + - Set `${ETCD_TAG}` to the version tag of your etcd image. For example `3.4.3-0`. To see the etcd image and tag that kubeadm uses execute `kubeadm config images list --kubernetes-version ${K8S_VERSION}`, where `${K8S_VERSION}` is for example `v1.17.0`. + - Set `${HOST0}`to the IP address of the host you are testing. ## {{% heading "whatsnext" %}} - Once you have an etcd cluster with 3 working members, you can continue setting up a highly available control plane using the [external etcd method with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/). - diff --git a/content/en/docs/tasks/administer-cluster/extended-resource-node.md b/content/en/docs/tasks/administer-cluster/extended-resource-node.md index 3cfb6d4f1951a..3e9aae76d6918 100644 --- a/content/en/docs/tasks/administer-cluster/extended-resource-node.md +++ b/content/en/docs/tasks/administer-cluster/extended-resource-node.md @@ -4,24 +4,16 @@ content_type: task weight: 70 --- - This page shows how to specify extended resources for a Node. Extended resources allow cluster administrators to advertise node-level resources that would otherwise be unknown to Kubernetes. - - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - - ## Get the names of your Nodes @@ -39,7 +31,7 @@ the Kubernetes API server. For example, suppose one of your Nodes has four dongl attached. Here's an example of a PATCH request that advertises four dongle resources for your Node. -```shell +``` PATCH /api/v1/nodes//status HTTP/1.1 Accept: application/json Content-Type: application/json-patch+json @@ -69,9 +61,9 @@ Replace `` with the name of your Node: ```shell curl --header "Content-Type: application/json-patch+json" \ ---request PATCH \ ---data '[{"op": "add", "path": "/status/capacity/example.com~1dongle", "value": "4"}]' \ -http://localhost:8001/api/v1/nodes//status + --request PATCH \ + --data '[{"op": "add", "path": "/status/capacity/example.com~1dongle", "value": "4"}]' \ + http://localhost:8001/api/v1/nodes//status ``` {{< note >}} @@ -100,9 +92,9 @@ Once again, the output shows the dongle resource: ```yaml Capacity: - cpu: 2 - memory: 2049008Ki - example.com/dongle: 4 + cpu: 2 + memory: 2049008Ki + example.com/dongle: 4 ``` Now, application developers can create Pods that request a certain @@ -178,9 +170,9 @@ Replace `` with the name of your Node: ```shell curl --header "Content-Type: application/json-patch+json" \ ---request PATCH \ ---data '[{"op": "remove", "path": "/status/capacity/example.com~1dongle"}]' \ -http://localhost:8001/api/v1/nodes//status + --request PATCH \ + --data '[{"op": "remove", "path": "/status/capacity/example.com~1dongle"}]' \ + http://localhost:8001/api/v1/nodes//status ``` Verify that the dongle advertisement has been removed: @@ -191,20 +183,13 @@ kubectl describe node | grep dongle (you should not see any output) - - - ## {{% heading "whatsnext" %}} - ### For application developers -* [Assign Extended Resources to a Container](/docs/tasks/configure-pod-container/extended-resource/) +- [Assign Extended Resources to a Container](/docs/tasks/configure-pod-container/extended-resource/) ### For cluster administrators -* [Configure Minimum and Maximum Memory Constraints for a Namespace](/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) -* [Configure Minimum and Maximum CPU Constraints for a Namespace](/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) - - - +- [Configure Minimum and Maximum Memory Constraints for a Namespace](/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) +- [Configure Minimum and Maximum CPU Constraints for a Namespace](/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) diff --git a/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md b/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md index 7e711fe0bb77b..ae3d381a86e2c 100644 --- a/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md +++ b/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md @@ -83,8 +83,8 @@ providers: # # A match exists between an image and a matchImage when all of the below are true: # - Both contain the same number of domain parts and each part matches. - # - The URL path of an imageMatch must be a prefix of the target image URL path. - # - If the imageMatch contains a port, then the port must match in the image as well. + # - The URL path of an matchImages must be a prefix of the target image URL path. + # - If the matchImages contains a port, then the port must match in the image as well. # # Example values of matchImages: # - 123456789.dkr.ecr.us-east-1.amazonaws.com @@ -143,7 +143,7 @@ A match exists between an image name and a `matchImage` entry when all of the be * Both contain the same number of domain parts and each part matches. * The URL path of match image must be a prefix of the target image URL path. -* If the imageMatch contains a port, then the port must match in the image as well. +* If the matchImages contains a port, then the port must match in the image as well. Some example values of `matchImages` patterns are: diff --git a/content/en/docs/tasks/administer-cluster/safely-drain-node.md b/content/en/docs/tasks/administer-cluster/safely-drain-node.md index 8168e057b9c2d..456fd02c7d452 100644 --- a/content/en/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/en/docs/tasks/administer-cluster/safely-drain-node.md @@ -66,9 +66,16 @@ kubectl get nodes Next, tell Kubernetes to drain the node: ```shell -kubectl drain +kubectl drain --ignore-daemonsets ``` +If there are pods managed by a DaemonSet, you will need to specify +`--ignore-daemonsets` with `kubectl` to successfully drain the node. The `kubectl drain` subcommand on its own does not actually drain +a node of its DaemonSet pods: +the DaemonSet controller (part of the control plane) immediately replaces missing Pods with +new equivalent Pods. The DaemonSet controller also creates Pods that ignore unschedulable +taints, which allows the new Pods to launch onto a node that you are draining. + Once it returns (without giving an error), you can power down the node (or equivalently, if on a cloud platform, delete the virtual machine backing the node). If you leave the node in the cluster during the maintenance operation, you need to run diff --git a/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md index 393d546623857..802f38a651a87 100644 --- a/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md +++ b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md @@ -52,6 +52,9 @@ plugins: # Array of namespaces to exempt. namespaces: [] ``` +{{< note >}} +The above manifest needs to be specified via the `--admission-control-config-file` to kube-apiserver. +{{< /note >}} {{< note >}} `pod-security.admission.config.k8s.io/v1` configuration requires v1.25+. diff --git a/content/en/docs/tasks/run-application/access-api-from-pod.md b/content/en/docs/tasks/run-application/access-api-from-pod.md index d56f624cd561b..41d6ea478e579 100644 --- a/content/en/docs/tasks/run-application/access-api-from-pod.md +++ b/content/en/docs/tasks/run-application/access-api-from-pod.md @@ -42,10 +42,18 @@ securely with the API server. ### Directly accessing the REST API -While running in a Pod, the Kubernetes apiserver is accessible via a Service named -`kubernetes` in the `default` namespace. Therefore, Pods can use the -`kubernetes.default.svc` hostname to query the API server. Official client libraries -do this automatically. +While running in a Pod, your container can create an HTTPS URL for the Kubernetes API +server by fetching the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT_HTTPS` +environment variables. The API server's in-cluster address is also published to a +Service named `kubernetes` in the `default` namespace so that pods may reference +`kubernetes.default.svc` as a DNS name for the local API server. + +{{< note >}} +Kubernetes does not guarantee that the API server has a valid certificate for +the hostname `kubernetes.default.svc`; +however, the control plane **is** expected to present a valid certificate for the +hostname or IP address that `$KUBERNETES_SERVICE_HOST` represents. +{{< /note >}} The recommended way to authenticate to the API server is with a [service account](/docs/tasks/configure-pod-container/configure-service-account/) diff --git a/content/en/docs/tutorials/security/cluster-level-pss.md b/content/en/docs/tutorials/security/cluster-level-pss.md index 1748ebb19c754..07273c3be8ee9 100644 --- a/content/en/docs/tutorials/security/cluster-level-pss.md +++ b/content/en/docs/tutorials/security/cluster-level-pss.md @@ -41,56 +41,55 @@ that are most appropriate for your configuration, do the following: 1. Create a cluster with no Pod Security Standards applied: - ```shell - kind create cluster --name psa-wo-cluster-pss --image kindest/node:v1.24.0 - ``` + ```shell + kind create cluster --name psa-wo-cluster-pss --image kindest/node:v1.24.0 + ``` The output is similar to this: - ``` - Creating cluster "psa-wo-cluster-pss" ... - ✓ Ensuring node image (kindest/node:v1.24.0) 🖼 - ✓ Preparing nodes 📦 - ✓ Writing configuration 📜 - ✓ Starting control-plane 🕹️ - ✓ Installing CNI 🔌 - ✓ Installing StorageClass 💾 - Set kubectl context to "kind-psa-wo-cluster-pss" - You can now use your cluster with: - - kubectl cluster-info --context kind-psa-wo-cluster-pss - - Thanks for using kind! 😊 - - ``` + ``` + Creating cluster "psa-wo-cluster-pss" ... + ✓ Ensuring node image (kindest/node:v1.24.0) 🖼 + ✓ Preparing nodes 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 + Set kubectl context to "kind-psa-wo-cluster-pss" + You can now use your cluster with: + + kubectl cluster-info --context kind-psa-wo-cluster-pss + + Thanks for using kind! 😊 + ``` 1. Set the kubectl context to the new cluster: - ```shell - kubectl cluster-info --context kind-psa-wo-cluster-pss - ``` + ```shell + kubectl cluster-info --context kind-psa-wo-cluster-pss + ``` The output is similar to this: - ``` - Kubernetes control plane is running at https://127.0.0.1:61350 + ``` + Kubernetes control plane is running at https://127.0.0.1:61350 - CoreDNS is running at https://127.0.0.1:61350/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy - - To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. - ``` - -1. Get a list of namespaces in the cluster: - - ```shell - kubectl get ns - ``` - The output is similar to this: - ``` - NAME STATUS AGE - default Active 9m30s - kube-node-lease Active 9m32s - kube-public Active 9m32s - kube-system Active 9m32s - local-path-storage Active 9m26s - ``` + CoreDNS is running at https://127.0.0.1:61350/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + + To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. + ``` + +1. Get a list of namespaces in the cluster: + + ```shell + kubectl get ns + ``` + The output is similar to this: + ``` + NAME STATUS AGE + default Active 9m30s + kube-node-lease Active 9m32s + kube-public Active 9m32s + kube-system Active 9m32s + local-path-storage Active 9m26s + ``` 1. Use `--dry-run=server` to understand what happens when different Pod Security Standards are applied: @@ -100,7 +99,7 @@ that are most appropriate for your configuration, do the following: kubectl label --dry-run=server --overwrite ns --all \ pod-security.kubernetes.io/enforce=privileged ``` - The output is similar to this: + The output is similar to this: ``` namespace/default labeled namespace/kube-node-lease labeled @@ -113,7 +112,7 @@ that are most appropriate for your configuration, do the following: kubectl label --dry-run=server --overwrite ns --all \ pod-security.kubernetes.io/enforce=baseline ``` - The output is similar to this: + The output is similar to this: ``` namespace/default labeled namespace/kube-node-lease labeled @@ -127,11 +126,11 @@ that are most appropriate for your configuration, do the following: ``` 3. Restricted - ```shell + ```shell kubectl label --dry-run=server --overwrite ns --all \ pod-security.kubernetes.io/enforce=restricted ``` - The output is similar to this: + The output is similar to this: ``` namespace/default labeled namespace/kube-node-lease labeled @@ -179,72 +178,72 @@ following: 1. Create a configuration file that can be consumed by the Pod Security Admission Controller to implement these Pod Security Standards: - ``` - mkdir -p /tmp/pss - cat < /tmp/pss/cluster-level-pss.yaml - apiVersion: apiserver.config.k8s.io/v1 - kind: AdmissionConfiguration - plugins: - - name: PodSecurity - configuration: - apiVersion: pod-security.admission.config.k8s.io/v1 - kind: PodSecurityConfiguration - defaults: - enforce: "baseline" - enforce-version: "latest" - audit: "restricted" - audit-version: "latest" - warn: "restricted" - warn-version: "latest" - exemptions: - usernames: [] - runtimeClasses: [] - namespaces: [kube-system] - EOF - ``` - - {{< note >}} - `pod-security.admission.config.k8s.io/v1` configuration requires v1.25+. - For v1.23 and v1.24, use [v1beta1](https://v1-24.docs.kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-admission-controller/). - For v1.22, use [v1alpha1](https://v1-22.docs.kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-admission-controller/). - {{< /note >}} + ``` + mkdir -p /tmp/pss + cat < /tmp/pss/cluster-level-pss.yaml + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1 + kind: PodSecurityConfiguration + defaults: + enforce: "baseline" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [kube-system] + EOF + ``` + + {{< note >}} + `pod-security.admission.config.k8s.io/v1` configuration requires v1.25+. + For v1.23 and v1.24, use [v1beta1](https://v1-24.docs.kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-admission-controller/). + For v1.22, use [v1alpha1](https://v1-22.docs.kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-admission-controller/). + {{< /note >}} 1. Configure the API server to consume this file during cluster creation: - ``` - cat < /tmp/pss/cluster-config.yaml - kind: Cluster - apiVersion: kind.x-k8s.io/v1alpha4 - nodes: - - role: control-plane - kubeadmConfigPatches: - - | - kind: ClusterConfiguration - apiServer: - extraArgs: - admission-control-config-file: /etc/config/cluster-level-pss.yaml - extraVolumes: - - name: accf - hostPath: /etc/config - mountPath: /etc/config - readOnly: false - pathType: "DirectoryOrCreate" - extraMounts: - - hostPath: /tmp/pss - containerPath: /etc/config - # optional: if set, the mount is read-only. - # default false - readOnly: false - # optional: if set, the mount needs SELinux relabeling. - # default false - selinuxRelabel: false - # optional: set propagation mode (None, HostToContainer or Bidirectional) - # see https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation - # default None - propagation: None - EOF - ``` + ``` + cat < /tmp/pss/cluster-config.yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + extraArgs: + admission-control-config-file: /etc/config/cluster-level-pss.yaml + extraVolumes: + - name: accf + hostPath: /etc/config + mountPath: /etc/config + readOnly: false + pathType: "DirectoryOrCreate" + extraMounts: + - hostPath: /tmp/pss + containerPath: /etc/config + # optional: if set, the mount is read-only. + # default false + readOnly: false + # optional: if set, the mount needs SELinux relabeling. + # default false + selinuxRelabel: false + # optional: set propagation mode (None, HostToContainer or Bidirectional) + # see https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation + # default None + propagation: None + EOF + ``` {{}} If you use Docker Desktop with KinD on macOS, you can @@ -256,56 +255,57 @@ following: these Pod Security Standards: ```shell - kind create cluster --name psa-with-cluster-pss --image kindest/node:v1.24.0 --config /tmp/pss/cluster-config.yaml + kind create cluster --name psa-with-cluster-pss --image kindest/node:v1.24.0 --config /tmp/pss/cluster-config.yaml ``` The output is similar to this: ``` - Creating cluster "psa-with-cluster-pss" ... - ✓ Ensuring node image (kindest/node:v1.24.0) 🖼 - ✓ Preparing nodes 📦 - ✓ Writing configuration 📜 - ✓ Starting control-plane 🕹️ - ✓ Installing CNI 🔌 - ✓ Installing StorageClass 💾 - Set kubectl context to "kind-psa-with-cluster-pss" - You can now use your cluster with: + Creating cluster "psa-with-cluster-pss" ... + ✓ Ensuring node image (kindest/node:v1.24.0) 🖼 + ✓ Preparing nodes 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 + Set kubectl context to "kind-psa-with-cluster-pss" + You can now use your cluster with: - kubectl cluster-info --context kind-psa-with-cluster-pss + kubectl cluster-info --context kind-psa-with-cluster-pss - Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂 - ``` + Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂 + ``` -1. Point kubectl to the cluster +1. Point kubectl to the cluster: ```shell - kubectl cluster-info --context kind-psa-with-cluster-pss - ``` + kubectl cluster-info --context kind-psa-with-cluster-pss + ``` The output is similar to this: - ``` - Kubernetes control plane is running at https://127.0.0.1:63855 - CoreDNS is running at https://127.0.0.1:63855/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + ``` + Kubernetes control plane is running at https://127.0.0.1:63855 + + CoreDNS is running at https://127.0.0.1:63855/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy - To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. - ``` + To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. + ``` 1. Create the following Pod specification for a minimal configuration in the default namespace: - ``` - cat < /tmp/pss/nginx-pod.yaml - apiVersion: v1 - kind: Pod - metadata: - name: nginx - spec: - containers: - - image: nginx - name: nginx - ports: - - containerPort: 80 - EOF - ``` + ``` + cat < /tmp/pss/nginx-pod.yaml + apiVersion: v1 + kind: Pod + metadata: + name: nginx + spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 + EOF + ``` 1. Create the Pod in the cluster: ```shell - kubectl apply -f /tmp/pss/nginx-pod.yaml + kubectl apply -f /tmp/pss/nginx-pod.yaml ``` The output is similar to this: ``` @@ -315,9 +315,14 @@ following: ## Clean up -Run `kind delete cluster --name psa-with-cluster-pss` and -`kind delete cluster --name psa-wo-cluster-pss` to delete the clusters you -created. +Now delete the clusters which you created above by running the following command: + +```shell +kind delete cluster --name psa-with-cluster-pss +``` +```shell +kind delete cluster --name psa-wo-cluster-pss +``` ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tutorials/security/ns-level-pss.md b/content/en/docs/tutorials/security/ns-level-pss.md index d9042596a53d7..64aaf64832a56 100644 --- a/content/en/docs/tutorials/security/ns-level-pss.md +++ b/content/en/docs/tutorials/security/ns-level-pss.md @@ -155,7 +155,11 @@ with no warnings. ## Clean up -Run `kind delete cluster --name psa-ns-level` to delete the cluster created. +Now delete the cluster which you created above by running the following command: + +```shell +kind delete cluster --name psa-ns-level +``` ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tutorials/security/seccomp.md b/content/en/docs/tutorials/security/seccomp.md index 9e99e7da66f40..3a445afacfe41 100644 --- a/content/en/docs/tutorials/security/seccomp.md +++ b/content/en/docs/tutorials/security/seccomp.md @@ -265,6 +265,44 @@ docker exec -it kind-worker bash -c \ } ``` +## Create Pod that uses the container runtime default seccomp profile + +Most container runtimes provide a sane set of default syscalls that are allowed +or not. You can adopt these defaults for your workload by setting the seccomp +type in the security context of a pod or container to `RuntimeDefault`. + +{{< note >}} +If you have the `SeccompDefault` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +enabled, then Pods use the `RuntimeDefault` seccomp profile whenever +no other seccomp profile is specified. Otherwise, the default is `Unconfined`. +{{< /note >}} + +Here's a manifest for a Pod that requests the `RuntimeDefault` seccomp profile +for all its containers: + +{{< codenew file="pods/security/seccomp/ga/default-pod.yaml" >}} + +Create that Pod: +```shell +kubectl apply -f https://k8s.io/examples/pods/security/seccomp/ga/default-pod.yaml +``` + +```shell +kubectl get pod default-pod +``` + +The Pod should be showing as having started successfully: +``` +NAME READY STATUS RESTARTS AGE +default-pod 1/1 Running 0 20s +``` + +Finally, now that you saw that work OK, clean up: + +```shell +kubectl delete pod default-pod --wait --now +``` + ## Create a Pod with a seccomp profile for syscall auditing To start off, apply the `audit.json` profile, which will log all syscalls of the @@ -493,43 +531,6 @@ kubectl delete service fine-pod --wait kubectl delete pod fine-pod --wait --now ``` -## Create Pod that uses the container runtime default seccomp profile - -Most container runtimes provide a sane set of default syscalls that are allowed -or not. You can adopt these defaults for your workload by setting the seccomp -type in the security context of a pod or container to `RuntimeDefault`. - -{{< note >}} -If you have the `SeccompDefault` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) enabled, then Pods use the `RuntimeDefault` seccomp profile whenever -no other seccomp profile is specified. Otherwise, the default is `Unconfined`. -{{< /note >}} - -Here's a manifest for a Pod that requests the `RuntimeDefault` seccomp profile -for all its containers: - -{{< codenew file="pods/security/seccomp/ga/default-pod.yaml" >}} - -Create that Pod: -```shell -kubectl apply -f https://k8s.io/examples/pods/security/seccomp/ga/default-pod.yaml -``` - -```shell -kubectl get pod default-pod -``` - -The Pod should be showing as having started successfully: -``` -NAME READY STATUS RESTARTS AGE -default-pod 1/1 Running 0 20s -``` - -Finally, now that you saw that work OK, clean up: - -```shell -kubectl delete pod default-pod --wait --now -``` - ## {{% heading "whatsnext" %}} You can learn more about Linux seccomp: diff --git a/content/en/docs/tutorials/services/connect-applications-service.md b/content/en/docs/tutorials/services/connect-applications-service.md index dfa3023063920..be8202cd97891 100644 --- a/content/en/docs/tutorials/services/connect-applications-service.md +++ b/content/en/docs/tutorials/services/connect-applications-service.md @@ -15,7 +15,12 @@ weight: 20 Now that you have a continuously running, replicated application you can expose it on a network. -Kubernetes assumes that pods can communicate with other pods, regardless of which host they land on. Kubernetes gives every pod its own cluster-private IP address, so you do not need to explicitly create links between pods or map container ports to host ports. This means that containers within a Pod can all reach each other's ports on localhost, and all pods in a cluster can see each other without NAT. The rest of this document elaborates on how you can run reliable services on such a networking model. +Kubernetes assumes that pods can communicate with other pods, regardless of which host they land on. +Kubernetes gives every pod its own cluster-private IP address, so you do not need to explicitly +create links between pods or map container ports to host ports. This means that containers within +a Pod can all reach each other's ports on localhost, and all pods in a cluster can see each other +without NAT. The rest of this document elaborates on how you can run reliable services on such a +networking model. This tutorial uses a simple nginx web server to demonstrate the concept. @@ -49,16 +54,32 @@ kubectl get pods -l run=my-nginx -o custom-columns=POD_IP:.status.podIPs [map[ip:10.244.2.5]] ``` -You should be able to ssh into any node in your cluster and use a tool such as `curl` to make queries against both IPs. Note that the containers are *not* using port 80 on the node, nor are there any special NAT rules to route traffic to the pod. This means you can run multiple nginx pods on the same node all using the same `containerPort`, and access them from any other pod or node in your cluster using the assigned IP address for the Service. If you want to arrange for a specific port on the host Node to be forwarded to backing Pods, you can - but the networking model should mean that you do not need to do so. +You should be able to ssh into any node in your cluster and use a tool such as `curl` +to make queries against both IPs. Note that the containers are *not* using port 80 on +the node, nor are there any special NAT rules to route traffic to the pod. This means +you can run multiple nginx pods on the same node all using the same `containerPort`, +and access them from any other pod or node in your cluster using the assigned IP +address for the Service. If you want to arrange for a specific port on the host +Node to be forwarded to backing Pods, you can - but the networking model should +mean that you do not need to do so. - -You can read more about the [Kubernetes Networking Model](/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model) if you're curious. +You can read more about the +[Kubernetes Networking Model](/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model) +if you're curious. ## Creating a Service -So we have pods running nginx in a flat, cluster wide, address space. In theory, you could talk to these pods directly, but what happens when a node dies? The pods die with it, and the Deployment will create new ones, with different IPs. This is the problem a Service solves. +So we have pods running nginx in a flat, cluster wide, address space. In theory, +you could talk to these pods directly, but what happens when a node dies? The pods +die with it, and the Deployment will create new ones, with different IPs. This is +the problem a Service solves. -A Kubernetes Service is an abstraction which defines a logical set of Pods running somewhere in your cluster, that all provide the same functionality. When created, each Service is assigned a unique IP address (also called clusterIP). This address is tied to the lifespan of the Service, and will not change while the Service is alive. Pods can be configured to talk to the Service, and know that communication to the Service will be automatically load-balanced out to some pod that is a member of the Service. +A Kubernetes Service is an abstraction which defines a logical set of Pods running +somewhere in your cluster, that all provide the same functionality. When created, +each Service is assigned a unique IP address (also called clusterIP). This address +is tied to the lifespan of the Service, and will not change while the Service is alive. +Pods can be configured to talk to the Service, and know that communication to the +Service will be automatically load-balanced out to some pod that is a member of the Service. You can create a Service for your 2 nginx replicas with `kubectl expose`: @@ -112,8 +133,12 @@ Labels: run=my-nginx Annotations: Selector: run=my-nginx Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 IP: 10.0.162.149 +IPs: 10.0.162.149 Port: 80/TCP +TargetPort: 80/TCP Endpoints: 10.244.2.5:80,10.244.3.4:80 Session Affinity: None Events: @@ -136,10 +161,12 @@ about the [service proxy](/docs/concepts/services-networking/service/#virtual-ip Kubernetes supports 2 primary modes of finding a Service - environment variables and DNS. The former works out of the box while the latter requires the [CoreDNS cluster addon](https://releases.k8s.io/{{< param "fullversion" >}}/cluster/addons/dns/coredns). + {{< note >}} -If the service environment variables are not desired (because possible clashing with expected program ones, -too many variables to process, only using DNS, etc) you can disable this mode by setting the `enableServiceLinks` -flag to `false` on the [pod spec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core). +If the service environment variables are not desired (because possible clashing +with expected program ones, too many variables to process, only using DNS, etc) +you can disable this mode by setting the `enableServiceLinks` flag to `false` on +the [pod spec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core). {{< /note >}} @@ -193,7 +220,8 @@ KUBERNETES_SERVICE_PORT_HTTPS=443 ### DNS -Kubernetes offers a DNS cluster addon Service that automatically assigns dns names to other Services. You can check if it's running on your cluster: +Kubernetes offers a DNS cluster addon Service that automatically assigns dns names +to other Services. You can check if it's running on your cluster: ```shell kubectl get services kube-dns --namespace=kube-system @@ -204,7 +232,13 @@ kube-dns ClusterIP 10.0.0.10 53/UDP,53/TCP 8m ``` The rest of this section will assume you have a Service with a long lived IP -(my-nginx), and a DNS server that has assigned a name to that IP. Here we use the CoreDNS cluster addon (application name `kube-dns`), so you can talk to the Service from any pod in your cluster using standard methods (e.g. `gethostbyname()`). If CoreDNS isn't running, you can enable it referring to the [CoreDNS README](https://github.com/coredns/deployment/tree/master/kubernetes) or [Installing CoreDNS](/docs/tasks/administer-cluster/coredns/#installing-coredns). Let's run another curl application to test this: +(my-nginx), and a DNS server that has assigned a name to that IP. Here we use +the CoreDNS cluster addon (application name `kube-dns`), so you can talk to the +Service from any pod in your cluster using standard methods (e.g. `gethostbyname()`). +If CoreDNS isn't running, you can enable it referring to the +[CoreDNS README](https://github.com/coredns/deployment/tree/master/kubernetes) +or [Installing CoreDNS](/docs/tasks/administer-cluster/coredns/#installing-coredns). +Let's run another curl application to test this: ```shell kubectl run curl --image=radial/busyboxplus:curl -i --tty @@ -227,13 +261,18 @@ Address 1: 10.0.162.149 ## Securing the Service -Till now we have only accessed the nginx server from within the cluster. Before exposing the Service to the internet, you want to make sure the communication channel is secure. For this, you will need: +Till now we have only accessed the nginx server from within the cluster. Before +exposing the Service to the internet, you want to make sure the communication +channel is secure. For this, you will need: * Self signed certificates for https (unless you already have an identity certificate) * An nginx server configured to use the certificates * A [secret](/docs/concepts/configuration/secret/) that makes the certificates accessible to pods -You can acquire all these from the [nginx https example](https://github.com/kubernetes/examples/tree/master/staging/https-nginx/). This requires having go and make tools installed. If you don't want to install those, then follow the manual steps later. In short: +You can acquire all these from the +[nginx https example](https://github.com/kubernetes/examples/tree/master/staging/https-nginx/). +This requires having go and make tools installed. If you don't want to install those, +then follow the manual steps later. In short: ```shell make keys KEY=/tmp/nginx.key CERT=/tmp/nginx.crt @@ -272,7 +311,9 @@ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /d/tmp/nginx.key -ou cat /d/tmp/nginx.crt | base64 cat /d/tmp/nginx.key | base64 ``` -Use the output from the previous commands to create a yaml file as follows. The base64 encoded value should all be on a single line. + +Use the output from the previous commands to create a yaml file as follows. +The base64 encoded value should all be on a single line. ```yaml apiVersion: "v1" @@ -296,7 +337,8 @@ NAME TYPE DATA AGE nginxsecret kubernetes.io/tls 2 1m ``` -Now modify your nginx replicas to start an https server using the certificate in the secret, and the Service, to expose both ports (80 and 443): +Now modify your nginx replicas to start an https server using the certificate +in the secret, and the Service, to expose both ports (80 and 443): {{< codenew file="service/networking/nginx-secure-app.yaml" >}} @@ -327,9 +369,12 @@ node $ curl -k https://10.244.3.5

Welcome to nginx!

``` -Note how we supplied the `-k` parameter to curl in the last step, this is because we don't know anything about the pods running nginx at certificate generation time, -so we have to tell curl to ignore the CName mismatch. By creating a Service we linked the CName used in the certificate with the actual DNS name used by pods during Service lookup. -Let's test this from a pod (the same secret is being reused for simplicity, the pod only needs nginx.crt to access the Service): +Note how we supplied the `-k` parameter to curl in the last step, this is because +we don't know anything about the pods running nginx at certificate generation time, +so we have to tell curl to ignore the CName mismatch. By creating a Service we +linked the CName used in the certificate with the actual DNS name used by pods +during Service lookup. Let's test this from a pod (the same secret is being reused +for simplicity, the pod only needs nginx.crt to access the Service): {{< codenew file="service/networking/curlpod.yaml" >}} @@ -391,7 +436,8 @@ $ curl https://: -k

Welcome to nginx!

``` -Let's now recreate the Service to use a cloud load balancer. Change the `Type` of `my-nginx` Service from `NodePort` to `LoadBalancer`: +Let's now recreate the Service to use a cloud load balancer. +Change the `Type` of `my-nginx` Service from `NodePort` to `LoadBalancer`: ```shell kubectl edit svc my-nginx @@ -407,8 +453,8 @@ curl https:// -k Welcome to nginx! ``` -The IP address in the `EXTERNAL-IP` column is the one that is available on the public internet. The `CLUSTER-IP` is only available inside your -cluster/private cloud network. +The IP address in the `EXTERNAL-IP` column is the one that is available on the public internet. +The `CLUSTER-IP` is only available inside your cluster/private cloud network. Note that on AWS, type `LoadBalancer` creates an ELB, which uses a (long) hostname, not an IP. It's too long to fit in the standard `kubectl get svc` diff --git a/content/en/examples/admin/sched/my-scheduler.yaml b/content/en/examples/admin/sched/my-scheduler.yaml index 5addf9e0e6ad3..fa1c65bf9a462 100644 --- a/content/en/examples/admin/sched/my-scheduler.yaml +++ b/content/en/examples/admin/sched/my-scheduler.yaml @@ -30,6 +30,20 @@ roleRef: name: system:volume-scheduler apiGroup: rbac.authorization.k8s.io --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: my-scheduler-extension-apiserver-authentication-reader + namespace: kube-system +roleRef: + kind: Role + name: extension-apiserver-authentication-reader + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +--- apiVersion: v1 kind: ConfigMap metadata: diff --git a/content/en/examples/application/ssa/nginx-deployment-replicas-only.yaml b/content/en/examples/application/ssa/nginx-deployment-replicas-only.yaml deleted file mode 100644 index 5544a14dcc4f3..0000000000000 --- a/content/en/examples/application/ssa/nginx-deployment-replicas-only.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-deployment - labels: - app: nginx -spec: - selector: - matchLabels: - app: nginx - replicas: 3 - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - image: nginx:1.14.2 diff --git a/content/en/examples/examples_test.go b/content/en/examples/examples_test.go index a087fd220250d..670131237dad9 100644 --- a/content/en/examples/examples_test.go +++ b/content/en/examples/examples_test.go @@ -517,9 +517,8 @@ func TestExampleObjectSchemas(t *testing.T) { "nginx-svc": {&api.Service{}}, }, "application/ssa": { - "nginx-deployment": {&apps.Deployment{}}, - "nginx-deployment-no-replicas": {&apps.Deployment{}}, - "nginx-deployment-replicas-only": {&apps.Deployment{}}, + "nginx-deployment": {&apps.Deployment{}}, + "nginx-deployment-no-replicas": {&apps.Deployment{}}, }, "application/web": { "web": {&api.Service{}, &apps.StatefulSet{}}, diff --git a/content/en/releases/patch-releases.md b/content/en/releases/patch-releases.md index 3b34935738664..dc80e19baf225 100644 --- a/content/en/releases/patch-releases.md +++ b/content/en/releases/patch-releases.md @@ -78,7 +78,6 @@ releases may also occur in between these. | Monthly Patch Release | Cherry Pick Deadline | Target date | | --------------------- | -------------------- | ----------- | -| January 2023 | 2023-01-13 | 2023-01-18 | | February 2023 | 2023-02-10 | 2023-02-15 | | March 2023 | 2023-03-10 | 2023-03-15 | | April 2023 | 2023-04-07 | 2023-04-12 | diff --git a/content/es/docs/concepts/configuration/configmap.md b/content/es/docs/concepts/configuration/configmap.md index ce16f99aca605..d3fb00cf9037c 100644 --- a/content/es/docs/concepts/configuration/configmap.md +++ b/content/es/docs/concepts/configuration/configmap.md @@ -204,7 +204,7 @@ Cuando un ConfigMap está siendo utilizado en un {{< glossary_tooltip text="volu El {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} comprueba si el ConfigMap montado está actualizado cada periodo de sincronización. Sin embargo, el {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} utiliza su caché local para obtener el valor actual del ConfigMap. El tipo de caché es configurable usando el campo `ConfigMapAndSecretChangeDetectionStrategy` en el -[KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go). +[KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go). Un ConfigMap puede ser propagado por vista (default), ttl-based, o simplemente redirigiendo todas las consultas directamente a la API. Como resultado, el retraso total desde el momento que el ConfigMap es actualizado hasta el momento diff --git a/content/es/docs/concepts/configuration/secret.md b/content/es/docs/concepts/configuration/secret.md index 969078a67a4e6..1025ebd78519d 100644 --- a/content/es/docs/concepts/configuration/secret.md +++ b/content/es/docs/concepts/configuration/secret.md @@ -520,7 +520,7 @@ Cuando se actualiza un Secret que ya se está consumiendo en un volumen, las cla Kubelet está verificando si el Secret montado esta actualizado en cada sincronización periódica. Sin embargo, está usando su caché local para obtener el valor actual del Secret. El tipo de caché es configurable usando el (campo `ConfigMapAndSecretChangeDetectionStrategy` en -[KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)). +[KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go)). Puede ser propagado por el reloj (default), ttl-based, o simplemente redirigiendo todas las solicitudes a kube-apiserver directamente. Como resultado, el retraso total desde el momento en que se actualiza el Secret hasta el momento en que se proyectan las nuevas claves en el Pod puede ser tan largo como el periodo de sincronización de kubelet + retraso de diff --git a/content/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index b7b1639f80372..19149f43a4e5e 100644 --- a/content/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -1,6 +1,6 @@ --- -title: Création d'un Cluster a master unique avec kubeadm -description: Création d'un Cluster a master unique avec kubeadm +title: Création d'un Cluster à master unique avec kubeadm +description: Création d'un Cluster à master unique avec kubeadm content_type: task weight: 30 --- @@ -9,7 +9,7 @@ weight: 30 **kubeadm** vous aide à démarrer un cluster Kubernetes minimum, viable et conforme aux meilleures pratiques. Avec kubeadm, votre cluster -doit passer les [tests de Conformance Kubernetes](https://kubernetes.io/blog/2017/10/software-conformance-certification). +doit passer les [tests de Conformité Kubernetes](https://kubernetes.io/blog/2017/10/software-conformance-certification). Kubeadm prend également en charge d'autres fonctions du cycle de vie, telles que les mises à niveau, la rétrogradation et la gestion des [bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/). diff --git a/content/hi/docs/reference/glossary/cidr.md b/content/hi/docs/reference/glossary/cidr.md new file mode 100644 index 0000000000000..4d03824ddcd30 --- /dev/null +++ b/content/hi/docs/reference/glossary/cidr.md @@ -0,0 +1,17 @@ +--- +title: सीआईडीआर (CIDR) +id: cidr +date: 2019-11-12 +full_link: +short_description: > + सीआईडीआर IP पतों के ब्लॉक का वर्णन करने के लिए एक संकेतन है और विभिन्न नेटवर्किंग कॉन्फ़िगरेशन में इसका भारी उपयोग किया जाता है। +aka: +tags: + - networking +--- + +सीआईडीआर (क्लासलेस इंटर-डोमेन रौटिंग) IP पतों के ब्लॉक का वर्णन करने के लिए एक संकेतन है और विभिन्न नेटवर्किंग कॉन्फ़िगरेशन में इसका भारी उपयोग किया जाता है। + + + +कुबेरनेट्स के संदर्भ में, प्रत्येक {{}} को आरंभिक पते के माध्यम से IP पतों की एक श्रृंखला और सीआईडीआर का उपयोग करके एक सबनेट मास्क सौंपा गया है। यह प्रत्येक {{}} को एक अद्वितीय IP पता निर्दिष्ट करने की अनुमति नोड्स को देता है। हालाँकि यह मूल रूप से IPv4 के लिए एक अवधारणा थी, IPv6 को शामिल करने के लिए सीआईडीआर का विस्तार किया गया है | diff --git a/content/hi/docs/reference/glossary/pod-disruption.md b/content/hi/docs/reference/glossary/pod-disruption.md new file mode 100644 index 0000000000000..ca08d1dc78b0a --- /dev/null +++ b/content/hi/docs/reference/glossary/pod-disruption.md @@ -0,0 +1,22 @@ +--- +title: पॉड विघटन (Pod Disruption) +id: pod-disruption +date: 2021-05-12 +full_link: /docs/concepts/workloads/pods/disruptions/ +short_description: > + पॉड विघटन वह प्रक्रिया है जिसके द्वारा नोड्स पर पॉड्स को स्वेच्छा से या अनैच्छिक रूप से समाप्त कर दिया जाता है। + +aka: +related: + - pod + - container +tags: + - operation +--- + +[पॉड विघटन](/docs/concepts/workloads/pods/disruptions/) वह प्रक्रिया है जिसके द्वारा नोड्स पर पॉड्स को स्वेच्छा से या अनैच्छिक रूप से समाप्त कर दिया जाता है। + + + +स्वैच्छिक विघटन एप्लीकेशन मालिक या फिर क्लस्टर प्रशासक अभिप्रायपूर्वक चालू करते है। +अनैच्छिक विघटन अनजाने में होते है और वो अपरिहार्य वजह से उत्पन्न हो सकते हैं जैसे कि नोड्स के पास संसाधन ख़तम हो जाना या आकस्मिक विलोपन। diff --git a/content/id/docs/concepts/cluster-administration/manage-deployment.md b/content/id/docs/concepts/cluster-administration/manage-deployment.md index d67da9c13eb1b..4bdc7f790e6dc 100644 --- a/content/id/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/id/docs/concepts/cluster-administration/manage-deployment.md @@ -319,7 +319,7 @@ Saat beban aplikasi naik maupun turun, mudah untuk mengubah kapasitas dengan `ku kubectl scale deployment/my-nginx --replicas=1 ``` ```shell -deployment.extensions/my-nginx scaled +deployment.apps/my-nginx scaled ``` Sekarang kamu hanya memiliki satu _pod_ yang dikelola oleh deployment. diff --git a/content/id/docs/concepts/configuration/secret.md b/content/id/docs/concepts/configuration/secret.md index c0a1e750bfcee..4a30e2b28ce04 100644 --- a/content/id/docs/concepts/configuration/secret.md +++ b/content/id/docs/concepts/configuration/secret.md @@ -559,7 +559,7 @@ apakah terdapat perubahan pada Secret yang telah di-_mount_. Meskipun demikian, proses pengecekan ini dilakukan dengan menggunakan _cache_ lokal untuk mendapatkan _value_ saat ini dari sebuah Secret. Tipe _cache_ yang ada dapat diatur dengan menggunakan (_field_ `ConfigMapAndSecretChangeDetectionStrategy` pada -[_struct_ KubeletConfiguration](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)). +[KubeletConfiguration](/docs/reference/config-api/kubelet-config.v1beta1/)). Mekanisme ini kemudian dapat diteruskan dengan mekanisme _watch_(_default_), ttl, atau melakukan pengalihan semua _request_ secara langsung pada kube-apiserver. Sebagai hasilnya, _delay_ total dari pertama kali Secret diubah hingga dilakukannya mekanisme diff --git a/content/ja/docs/concepts/architecture/nodes.md b/content/ja/docs/concepts/architecture/nodes.md index 6affe7ccb7096..101e4da14b5d5 100644 --- a/content/ja/docs/concepts/architecture/nodes.md +++ b/content/ja/docs/concepts/architecture/nodes.md @@ -92,7 +92,7 @@ kubectl cordon $ノード名 これは、再起動の準備中にアプリケーションからアプリケーションが削除されている場合でも、DaemonSetがマシンに属していることを前提としているためです。 {{< /note >}} -## ノードのステータス +## ノードのステータス {#node-status} ノードのステータスは以下の情報を含みます: @@ -176,7 +176,7 @@ CapacityとAllocatableについて深く知りたい場合は、ノード上で この情報はノードからkubeletを通じて取得され、Kubernetes APIに公開されます。 -## ハートビート +## ハートビート {#heartbeats} ハートビートは、Kubernetesノードから送信され、ノードが利用可能か判断するのに役立ちます。 以下の2つのハートビートがあります: * Nodeの`.status`の更新 @@ -191,7 +191,7 @@ kubeletが`NodeStatus`とLeaseオブジェクトの作成および更新を担 -## ノードコントローラー +## ノードコントローラー {#node-controller} ノード{{< glossary_tooltip text="コントローラー" term_id="controller" >}}は、ノードのさまざまな側面を管理するKubernetesのコントロールプレーンコンポーネントです。 @@ -206,7 +206,7 @@ kubeletが`NodeStatus`とLeaseオブジェクトの作成および更新を担 ノードコントローラーは、`--node-monitor-period`に設定された秒数ごとに各ノードの状態をチェックします。 -#### 信頼性 +#### 信頼性 {#rate-limits-on-eviction} ほとんどの場合、排除の速度は1秒あたり`--node-eviction-rate`に設定された数値(デフォルトは秒間0.1)です。つまり、10秒間に1つ以上のPodをノードから追い出すことはありません。 @@ -228,7 +228,7 @@ kubeletが`NodeStatus`とLeaseオブジェクトの作成および更新を担 サービスコントローラーの副次的な効果をもたらします。これにより、ロードバランサトラフィックの流入をcordonされたノードから効率的に除去する事ができます。 {{< /caution >}} -### ノードのキャパシティ +### ノードのキャパシティ {#node-capacity} Nodeオブジェクトはノードのリソースキャパシティ(CPUの数とメモリの量)を監視します。 [自己登録](#self-registration-of-nodes)したノードは、Nodeオブジェクトを作成するときにキャパシティを報告します。 @@ -241,7 +241,7 @@ Kubernetes{{< glossary_tooltip text="スケジューラー" term_id="kube-schedu Pod以外のプロセス用にリソースを明示的に予約したい場合は、[Systemデーモン用にリソースを予約](/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved)を参照してください。 {{< /note >}} -## ノードのトポロジー +## ノードのトポロジー {#node-topology} {{< feature-state state="alpha" for_k8s_version="v1.16" >}} `TopologyManager`の[フィーチャーゲート](/ja/docs/reference/command-line-tools-reference/feature-gates/)を有効にすると、 diff --git a/content/ja/docs/concepts/configuration/configmap.md b/content/ja/docs/concepts/configuration/configmap.md index f7d9ea7aa01d9..4956632a5d089 100644 --- a/content/ja/docs/concepts/configuration/configmap.md +++ b/content/ja/docs/concepts/configuration/configmap.md @@ -164,7 +164,7 @@ Pod内に複数のコンテナが存在する場合、各コンテナにそれ #### マウントしたConfigMapの自動的な更新 -ボリューム内で現在使用中のConfigMapが更新されると、射影されたキーも最終的に(eventually)更新されます。kubeletは定期的な同期のたびにマウントされたConfigMapが新しいかどうか確認します。しかし、kubeletが現在のConfigMapの値を取得するときにはローカルキャッシュを使用します。キャッシュの種類は、[KubeletConfiguration構造体](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)の中の`ConfigMapAndSecretChangeDetectionStrategy`フィールドで設定可能です。ConfigMapは、監視(デフォルト)、ttlベース、またはすべてのリクエストを直接APIサーバーへ単純にリダイレクトする方法のいずれかによって伝搬されます。その結果、ConfigMapが更新された瞬間から、新しいキーがPodに射影されるまでの遅延の合計は、最長でkubeletの同期期間+キャッシュの伝搬遅延になります。ここで、キャッシュの伝搬遅延は選択したキャッシュの種類に依存します(監視の伝搬遅延、キャッシュのttl、または0に等しくなります)。 +ボリューム内で現在使用中のConfigMapが更新されると、射影されたキーも最終的に(eventually)更新されます。kubeletは定期的な同期のたびにマウントされたConfigMapが新しいかどうか確認します。しかし、kubeletが現在のConfigMapの値を取得するときにはローカルキャッシュを使用します。キャッシュの種類は、[KubeletConfiguration構造体](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go)の中の`ConfigMapAndSecretChangeDetectionStrategy`フィールドで設定可能です。ConfigMapは、監視(デフォルト)、ttlベース、またはすべてのリクエストを直接APIサーバーへ単純にリダイレクトする方法のいずれかによって伝搬されます。その結果、ConfigMapが更新された瞬間から、新しいキーがPodに射影されるまでの遅延の合計は、最長でkubeletの同期期間+キャッシュの伝搬遅延になります。ここで、キャッシュの伝搬遅延は選択したキャッシュの種類に依存します(監視の伝搬遅延、キャッシュのttl、または0に等しくなります)。 環境変数として使用されるConfigMapは自動的に更新されないため、ポッドを再起動する必要があります。 ## イミュータブルなConfigMap {#configmap-immutable} diff --git a/content/ja/docs/concepts/configuration/secret.md b/content/ja/docs/concepts/configuration/secret.md index f06bb7edf812a..f0e9b248d117c 100644 --- a/content/ja/docs/concepts/configuration/secret.md +++ b/content/ja/docs/concepts/configuration/secret.md @@ -582,7 +582,7 @@ cat /etc/foo/password ボリュームとして使用されているSecretが更新されると、やがて割り当てられたキーも同様に更新されます。 kubeletは定期的な同期のたびにマウントされたSecretが新しいかどうかを確認します。 しかしながら、kubeletはSecretの現在の値の取得にローカルキャッシュを使用します。 -このキャッシュは[KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)内の`ConfigMapAndSecretChangeDetectionStrategy`フィールドによって設定可能です。 +このキャッシュは[KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go)内の`ConfigMapAndSecretChangeDetectionStrategy`フィールドによって設定可能です。 Secretはwatch(デフォルト)、TTLベース、単に全てのリクエストをAPIサーバーへリダイレクトすることのいずれかによって伝搬します。 結果として、Secretが更新された時点からPodに新しいキーが反映されるまでの遅延時間の合計は、kubeletの同期間隔 + キャッシュの伝搬遅延となります。 キャッシュの遅延は、キャッシュの種別により、それぞれwatchの伝搬遅延、キャッシュのTTL、0になります。 diff --git a/content/ja/docs/reference/_index.md b/content/ja/docs/reference/_index.md index 5888dd45110ad..cafca8fe448a6 100644 --- a/content/ja/docs/reference/_index.md +++ b/content/ja/docs/reference/_index.md @@ -32,7 +32,7 @@ content_type: concept * [kubectl](/ja/docs/reference/kubectl/overview/) - コマンドの実行やKubernetesクラスターの管理に使う主要なCLIツールです。 * [JSONPath](/ja/docs/reference/kubectl/jsonpath/) - kubectlで[JSONPath記法](https://goessner.net/articles/JsonPath/)を使うための構文ガイドです。 -* [kubeadm](ja/docs/reference/setup-tools/kubeadm/) - セキュアなKubernetesクラスターを簡単にプロビジョニングするためのCLIツールです。 +* [kubeadm](/ja/docs/reference/setup-tools/kubeadm/) - セキュアなKubernetesクラスターを簡単にプロビジョニングするためのCLIツールです。 ## コンポーネントリファレンス diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 56e0df68ea170..d1ab5ef49b6bf 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -430,7 +430,7 @@ kubeletとコントロールプレーンの間や、他のKubernetesコンポー 対処方法: -* 定期的に[etcdをバックアップ](https://coreos.com/etcd/docs/latest/admin_guide.html)する。kubeadmが設定するetcdのデータディレクトリは、コントロールプレーンノードの`/var/lib/etcd`にあります。 +* 定期的に[etcdをバックアップ](https://etcd.io/docs/v3.5/op-guide/recovery/)する。kubeadmが設定するetcdのデータディレクトリは、コントロールプレーンノードの`/var/lib/etcd`にあります。 * 複数のコントロールプレーンノードを使用する。[高可用性トポロジーのオプション](/ja/docs/setup/production-environment/tools/kubeadm/ha-topology/)では、[より高い可用性](/ja/docs/setup/production-environment/tools/kubeadm/high-availability/)を提供するクラスターのトポロジーの選択について説明してます。 diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 07cbdfa4a6443..a9f5a5571f9c4 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -64,32 +64,6 @@ sysctl --system 詳細は[ネットワークプラグインの要件](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#network-plugin-requirements)を参照してください。 -## iptablesがnftablesバックエンドを使用しないようにする - -Linuxでは、カーネルのiptablesサブシステムの最新の代替品としてnftablesが利用できます。`iptables`ツールは互換性レイヤーとして機能し、iptablesのように動作しますが、実際にはnftablesを設定します。このnftablesバックエンドは現在のkubeadmパッケージと互換性がありません。(ファイアウォールルールが重複し、`kube-proxy`を破壊するためです。) - -もしあなたのシステムの`iptables`ツールがnftablesバックエンドを使用している場合、これらの問題を避けるために`iptables`ツールをレガシーモードに切り替える必要があります。これは、少なくともDebian 10(Buster)、Ubuntu 19.04、Fedora 29、およびこれらのディストリビューションの新しいリリースでのデフォルトです。RHEL 8はレガシーモードへの切り替えをサポートしていないため、現在のkubeadmパッケージと互換性がありません。 - -{{< tabs name="iptables_legacy" >}} -{{% tab name="DebianまたはUbuntu" %}} -```bash -# レガシーバイナリがインストールされていることを確認してください -sudo apt-get install -y iptables arptables ebtables - -# レガシーバージョンに切り替えてください。 -sudo update-alternatives --set iptables /usr/sbin/iptables-legacy -sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy -sudo update-alternatives --set arptables /usr/sbin/arptables-legacy -sudo update-alternatives --set ebtables /usr/sbin/ebtables-legacy -``` -{{% /tab %}} -{{% tab name="Fedora" %}} -```bash -update-alternatives --set iptables /usr/sbin/iptables-legacy -``` -{{% /tab %}} -{{< /tabs >}} - ## 必須ポートの確認 ### コントロールプレーンノード diff --git a/content/ja/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md b/content/ja/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md index 8266904358b1f..772a33ba228c7 100644 --- a/content/ja/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md +++ b/content/ja/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md @@ -178,7 +178,7 @@ kubectl delete namespace default-mem-example * [Configure Default CPU Requests and Limits for a Namespace](/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/) -* [Namespaceに対する最小および最大メモリー制約の構成](ja/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) +* [Namespaceに対する最小および最大メモリー制約の構成](/ja/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) * [Configure Minimum and Maximum CPU Constraints for a Namespace](/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) @@ -190,8 +190,8 @@ kubectl delete namespace default-mem-example ### アプリケーション開発者向け -* [コンテナおよびPodへのメモリーリソースの割り当て](ja/docs/tasks/configure-pod-container/assign-memory-resource/) +* [コンテナおよびPodへのメモリーリソースの割り当て](/ja/docs/tasks/configure-pod-container/assign-memory-resource/) -* [コンテナおよびPodへのCPUリソースの割り当て](ja/docs/tasks/configure-pod-container/assign-cpu-resource/) +* [コンテナおよびPodへのCPUリソースの割り当て](/ja/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [PodにQuality of Serviceを設定する](ja/docs/tasks/configure-pod-container/quality-service-pod/) +* [PodにQuality of Serviceを設定する](/ja/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/pl/docs/tutorials/hello-minikube.md b/content/pl/docs/tutorials/hello-minikube.md index 0f766c039fc99..b3ce7ee2b4b7c 100644 --- a/content/pl/docs/tutorials/hello-minikube.md +++ b/content/pl/docs/tutorials/hello-minikube.md @@ -91,7 +91,7 @@ Użycie Deploymentu to rekomendowana metoda zarządzania tworzeniem i skalowanie wykorzystując podany obraz Dockera. ```shell - kubectl create deployment hello-node --image=registry.k8s.io/echoserver:1.4 + kubectl create deployment hello-node --image=registry.k8s.io/e2e-test-images/agnhost:2.39 -- /agnhost netexec --http-port=808 ``` 2. Sprawdź stan Deploymentu: diff --git a/content/pt-br/docs/contribute/review/_index.md b/content/pt-br/docs/contribute/review/_index.md new file mode 100644 index 0000000000000..377d75cb2cf73 --- /dev/null +++ b/content/pt-br/docs/contribute/review/_index.md @@ -0,0 +1,10 @@ +--- +title: Revisando mudanças +weight: 30 +--- + + + +Esta seção descreve como revisar conteúdo. + + diff --git a/content/pt-br/docs/contribute/style/_index.md b/content/pt-br/docs/contribute/style/_index.md new file mode 100644 index 0000000000000..bd8b09a106c0f --- /dev/null +++ b/content/pt-br/docs/contribute/style/_index.md @@ -0,0 +1,9 @@ +--- +title: Visão geral do estilo da documentação +main_menu: true +weight: 80 +--- + +Os tópicos desta seção fornecem orientações gerais para o estilo de escrita, +formatação e organização do conteúdo, e como utilizar as customizações do Hugo +específicas para a documentação do Kubernetes. diff --git a/content/pt-br/docs/reference/glossary/event.md b/content/pt-br/docs/reference/glossary/event.md new file mode 100644 index 0000000000000..2aed4c3226214 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/event.md @@ -0,0 +1,24 @@ +--- +title: Evento +id: event +date: 2022-01-16 +full_link: /docs/reference/kubernetes-api/cluster-resources/event-v1/ +short_description: > + Um relatório de um evento em algum lugar do cluster. Geralmente denota alguma mudança de estado no sistema. + +aka: +tags: +- core-object +- fundamental +--- +Cada evento é uma informação de um acontecimento em algum lugar do {{< glossary_tooltip text="cluster" term_id="cluster" >}}. +Geralmente denota alguma mudança de estado no sistema. + + + +Os eventos tem um tempo limitado de retenção, e os gatilhos e as mensagens podem evoluir com o tempo. +Os consumidores de um evento não devem confiar que a temporalidade de um evento com um determinado motivo reflita um gatilho com uma causa consistente, ou na existência de eventos continuados com aquele motivo. + +Os eventos devem ser tratados como dados informativos, de melhor esforço, suplementares. + +No Kubernetes, a [auditoria](/docs/tasks/debug/debug-cluster/audit/) gera um tipo diferente de registro de evento (grupo de API `audit.k8s.io`). diff --git a/content/pt-br/docs/reference/glossary/storage-class.md b/content/pt-br/docs/reference/glossary/storage-class.md new file mode 100644 index 0000000000000..479bc50320dea --- /dev/null +++ b/content/pt-br/docs/reference/glossary/storage-class.md @@ -0,0 +1,18 @@ +--- +title: Classe de Armazenamento +id: storageclass +date: 2018-04-12 +full_link: /docs/concepts/storage/storage-classes +short_description: > + Uma Classe de Armazenamento oferece uma maneira para os administradores descreverem diferentes tipos de armazenamento disponíveis. + +aka: +tags: +- core-object +- storage +--- + Uma classe de Armazenamento oferece uma maneira para os administradores descreverem diferentes tipos de armazenamento disponíveis. + + + +Classes de Armazenamento podem mapear para níveis de qualidade de serviço, políticas de backup, ou políticas arbitrárias determinadas por administradores do cluster. Cada objeto StorageClass contém os campos `provisioner`, `parameters`, e `reclaimPolicy`, que são usados quando um {{< glossary_tooltip text="Volume Persistente" term_id="persistent-volume" >}} pertencente à classe precisa ser provisionada dinamicamente. Usuários podem solicitar uma classe específica usando o nome de um objeto StorageClass. diff --git a/content/pt-br/docs/reference/issues-security/security.md b/content/pt-br/docs/reference/issues-security/security.md deleted file mode 100644 index ac4cd388a4d95..0000000000000 --- a/content/pt-br/docs/reference/issues-security/security.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Informações de Segurança e Divulgação do Kubernetes -aliases: [/security/] -content_type: concept -weight: 20 ---- - - - -Esta página descreve informações de segurança e divulgação do Kubernetes. - - - -## Anúncios de Segurança - -Junte-se ao grupo [kubernetes-security-announce](https://groups.google.com/forum/#!forum/kubernetes-security-announce) para receber e-mails sobre segurança e os principais anúncios de API. - - -## Relatar uma Vulnerabilidade - -Somos extremamente gratos pelos pesquisadores de segurança e usuários que relatam vulnerabilidades para a Comunidade Open Source do Kubernetes. Todos os relatos são minuciosamente investigados por um conjunto de voluntários da comunidade. - -Para relatar, envie sua vulnerabilidade para o [programa de recompensa de bug do Kubernetes](https://hackerone.com/kubernetes). Isso possibilita a triagem e o tratamento da vulnerabilidade com tempos de resposta padronizados. - -Você também pode enviar um e-mail para a lista privada [security@kubernetes.io](mailto:security@kubernetes.io) com os detalhes de segurança e os detalhes esperados para [todos os relatos de bugs do Kubernetes](https://github.com/kubernetes/kubernetes/blob/master/.github/ISSUE_TEMPLATE/bug-report.yaml). - -Você pode criptografar seu e-mail para esta lista usando as chaves GPG dos [membros do Comitê de Resposta de Segurança](https://git.k8s.io/security/README.md#product-security-committee-psc). A criptografia usando GPG NÃO é requerida para fazer a divulgação. - -### Quando Devo Relatar uma Vulnerabilidade? - -- Você acha que você descobriu uma potencial vulnerabilidade de segurança no Kubernetes -- Você não tem certeza como a vulnerabilidade afeta o Kubernetes -- Você acha que você descobriu uma vulnerabilidade em outro projeto que o Kubernetes depende - - Para projetos com seus próprios processos de relatar e divulgar vulnerabilidade, por favor informe o problema diretamente para os responsáveis deste projeto - - -### Quando Não Devo Relatar uma Vulnerabilidade? - -- Você precisa de ajuda para ajustar os componentes do Kubernetes para segurança -- Você precisa de ajuda para aplicar as atualizações relacionadas a segurança -- Seu problema não é relacionado a segurança - -## Resposta de Vulnerabilidade de Segurança - -Cada relato é confirmado e analisado pelos membros do Comitê de Resposta de Segurança com 3 dias úteis. Isso iniciará o [Processo de Release de Segurança](https://git.k8s.io/security/security-release-process.md#disclosures). - -Qualquer informação de vulnerabilidade compartilhada com o Comitê de Resposta de Segurança fica dentro do projeto Kubernetes e não vai ser disseminada para outro projeto, a menos que seja necessário para corrigir o problema. - -À medida que o problema de segurança passa da triagem, para correção identificada e para o planejamento da release, vamos manter o relator atualizado. - -## Momento de Divulgação Pública - -Uma data de divulgação pública é negociada pelo Comitê de Resposta de Segurança do Kubernetes e pelo relator do bug. Preferimos divulgar totalmente o bug mais rápido possível assim que uma mitigação estiver disponível para os usuários. É razoável adiar a divulgação quando o bug ou a correção não estiver completamente compreendida, a solução não estiver bem testada, ou para coordenação do fornecedor. O prazo para a divulgação é de imediato (especialmente se já for conhecido publicamente) para algumas semanas. Para uma vulnerabilidade com uma mitigação simples, esperamos que a data do relato até a data de divulgação seja da ordem de 7 dias. O Comitê de Resposta de Segurança do Kubernetes possui a palavra final ao estabelecer uma data de divulgação. - diff --git a/content/pt-br/docs/reference/node/_index.md b/content/pt-br/docs/reference/node/_index.md new file mode 100644 index 0000000000000..e03e1e36daf9d --- /dev/null +++ b/content/pt-br/docs/reference/node/_index.md @@ -0,0 +1,16 @@ +--- +title: Informações de referência do Node +weight: 80 +no_list: true +--- + +Esta seção contém os seguintes tópicos de referência sobre os nodes: + +* O [checkpoint API](/docs/reference/node/kubelet-checkpoint-api/) do kubelet + +* Uma lista de [Artigos sobre a remoção do dockershim e sobre o uso de tempos de execução compatíveis com o CRI](/docs/reference/node/topics-on-dockershim-and-cri-compatible-runtimes/) + +Você também pode ler os detalhes de referência do node em outros lugares na +documentação do Kubernetes, incluindo: + +* [Dados de métricas do Node](/docs/reference/instrumentation/node-metrics). \ No newline at end of file diff --git a/content/pt-br/docs/tasks/access-application-cluster/configure-dns-cluster.md b/content/pt-br/docs/tasks/access-application-cluster/configure-dns-cluster.md new file mode 100644 index 0000000000000..5707855689591 --- /dev/null +++ b/content/pt-br/docs/tasks/access-application-cluster/configure-dns-cluster.md @@ -0,0 +1,13 @@ +--- +title: Configurar DNS em um cluster +weight: 120 +content_type: concept +--- + + +O Kubernetes oferece um complemento de DNS para os clusters, que a maioria dos ambientes suportados habilitam por padrão. Na versão do Kubernetes 1.11 e posterior, o CoreDNS é recomendado e instalado por padrão com o kubeadm. + + +Para mais informações sobre como configurar o CoreDNS para um cluster Kubernetes, veja [Personalização do Serviço de DNS](/docs/tasks/administer-cluster/dns-custom-nameservers/). Para ver um exemplo que demonstra como usar o DNS do Kubernetes com o kube-dns, consulte [Plugin de exemplo para DNS](https://github.com/kubernetes/examples/tree/master/staging/cluster-dns). + + diff --git a/content/pt-br/docs/tutorials/hello-minikube.md b/content/pt-br/docs/tutorials/hello-minikube.md index 0db5d20ddcea5..55d8fc6c5c283 100644 --- a/content/pt-br/docs/tutorials/hello-minikube.md +++ b/content/pt-br/docs/tutorials/hello-minikube.md @@ -40,7 +40,7 @@ Este tutorial disponibiliza uma imagem de contêiner que utiliza o NGINX para re {{< kat-button >}} {{< note >}} -Se você instalou o Minikube localmente, execute: `minikube start`. +Se você instalou o Minikube localmente, execute: `minikube start`. Antes de executar `minikube dashboard`, abra um novo terminal, execute `minikube dashboard` nele, e retorne para o terminal anterior. {{< /note >}} 2. Abra o painel do Kubernetes em um navegador: @@ -49,7 +49,32 @@ Se você instalou o Minikube localmente, execute: `minikube start`. minikube dashboard ``` -3. Apenas no ambiente do Katacoda: Na parte superior do terminal, clique em **Preview Port 30000**. +3. Apenas no ambiente do Katacoda: Na parte superior to painel do terminal, clique no sinal de mais (+), e selecione **Select port to view on Host 1**. + +4. Apenas no ambiente do Katacoda: Digite `30000`, e clique em **Display + Port**. + +{{< note >}} +O comando `dashboard` habilita o complemento (_addon_) de dashboard e abre o proxy no navegador padrão. +Voce pode criar recursos no Kubernetes, como Deployment e Service, pela dashboard. + +Se você está executando em um ambiente como administrador (_root_), veja [Acessando a Dashboard via URL](#acessando-a-dashboard-via-url). + +Por padrão, a dashboard só é accesível internamente pela rede virtual do Kubernetes. +O comando `dashboard` cria um proxy temporário que permite que a dashboard seja acessada externamente à rede virtual do Kubernetes. + +Para parar o proxy, execute `Ctrl+C` para terminar o processo. +A dashboard permanece sendo executada no cluster Kubernetes depois do comando ter sido terminado. +Você pode executar o comando `dashboard` novamente para criar outro proxy para accessar a dashboard +{{< /note >}} + +## Acessando a Dashboard via URL + +Caso não queira abrir o navegador, execute o comando `dashboard` com a flag `--url` para ver a URL: + +```shell +minikube dashboard --url +``` ## Criando um Deployment @@ -144,7 +169,7 @@ Por padrão, um Pod só é acessível utilizando o seu endereço IP interno no c 5. (**Apenas no ambiente do Katacoda**) Observe o número da porta com 5 dígitos exibido ao lado de `8080` na saída do serviço. Este número de porta é gerado aleatoriamente e pode ser diferente para você. Digite seu número na caixa de texto do número da porta e clique em **Display Port**. Usando o exemplo anterior, você digitaria `30369`. -Isso abre uma janela do navegador, acessa o seu aplicativo e mostra o retorno da requisição. + Isso abre uma janela do navegador, acessa o seu aplicativo e mostra o retorno da requisição. ## Habilitando Complementos (addons) @@ -255,4 +280,3 @@ minikube delete * Aprender mais sobre [Deployment objects](/docs/concepts/workloads/controllers/deployment/). * Aprender mais sobre [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/). * Aprender mais sobre [Service objects](/docs/concepts/services-networking/service/). - diff --git a/content/pt-br/includes/task-tutorial-prereqs.md b/content/pt-br/includes/task-tutorial-prereqs.md index 66b20b849f2f1..be00c38c8b0d0 100644 --- a/content/pt-br/includes/task-tutorial-prereqs.md +++ b/content/pt-br/includes/task-tutorial-prereqs.md @@ -1,6 +1,4 @@ -Você precisa de um cluster Kubernetes e a ferramenta de linha de comando kubectl -precisa estar configurada para acessar o seu cluster. Se você ainda não tem um -cluster, pode criar um usando o [minikube](/docs/tasks/tools/#minikube) -ou você pode usar um dos seguintes ambientes: +Você precisa ter um cluster do Kubernetes e a ferramenta de linha de comando kubectl deve estar configurada para se comunicar com seu cluster. É recomendado executar esse tutorial em um cluster com pelo menos dois nós que não estejam atuando como hosts de camada de gerenciamento. Se você ainda não possui um cluster, pode criar um usando o [minikube](/docs/tasks/tools/#minikube) ou pode usar um dos seguintes ambientes: + * [Killercoda](https://killercoda.com/playgrounds/scenario/kubernetes) * [Play with Kubernetes](http://labs.play-with-k8s.com/) diff --git a/content/ru/_index.html b/content/ru/_index.html index 8763c548ecaaa..305f652aea368 100644 --- a/content/ru/_index.html +++ b/content/ru/_index.html @@ -43,12 +43,12 @@

О сложности миграции 150+ микросервисов в Ku

- Посетите KubeCon в Северной Америке, 24-28 октября 2022 года + Посетите KubeCon + CloudNativeCon в Европе, 18-21 апреля 2023 года



- Посетите KubeCon в Европе, 17-21 апреля 2023 года + Посетите KubeCon + CloudNativeCon в Северной Америке, 6-9 ноября 2023 года
diff --git a/content/ru/docs/concepts/cluster-administration/proxies.md b/content/ru/docs/concepts/cluster-administration/proxies.md new file mode 100644 index 0000000000000..a2aa66a4aae2a --- /dev/null +++ b/content/ru/docs/concepts/cluster-administration/proxies.md @@ -0,0 +1,62 @@ +--- +title: Типы прокси-серверов в Kubernetes +content_type: concept +weight: 90 +--- + + +На этой странице рассказывается о различных типах прокси-серверов, которые используются в Kubernetes. + + + + +## Прокси-серверы + +При работе с Kubernetes можно столкнуться со следующими типами прокси-серверов: + +1. [kubectl](/docs/tasks/access-application-cluster/access-cluster/#directly-accessing-the-rest-api): + + - работает на локальной машине или в Pod'е; + - поднимает канал связи от локальной машины к интерфейсу API-сервера Kubernetes; + - данные от клиента к прокси-серверу передаются по HTTP; + - данные от прокси к серверу API передаются по HTTPS; + - отвечает за обнаружение сервера API; + - добавляет заголовки аутентификации. + +1. [Прокси-сервер API](/docs/tasks/access-application-cluster/access-cluster-services/#discovering-builtin-services): + + - бастион, встроенный в API-сервер; + - подключает пользователя за пределами кластера к IP-адресам кластера, которые в ином случае могут оказаться недоступными; + - входит в процессы сервера API; + - данные от клиента к прокси-серверу передаются по HTTPS (или по HTTP, если сервер API настроен соответствующим образом); + - данные от прокси-сервера к цели передаются по HTTP или HTTPS в зависимости от настроек прокси; + - используется для доступа к узлам, Pod'ам или сервисам; + - при подключении к сервису выступает балансировщиком нагрузки. + +1. [kube proxy](/docs/concepts/services-networking/service/#ips-and-vips): + + - работает на каждом узле; + - обрабатывает трафик UDP, TCP и SCTP; + - "не понимает" HTTP; + - выполняет функции балансировщика нагрузки; + - используется только для доступа к сервисам. + +1. Прокси-сервер/балансировщик нагрузки перед API-сервером(-ами): + + - наличие и тип (например, nginx) определяется конфигурацией кластера; + - располагается между клиентами и одним или несколькими серверами API; + - балансирует запросы при наличии нескольких серверов API. + +1. Облачные балансировщики нагрузки на внешних сервисах: + + - предоставляются некоторыми облачными провайдерами (например, AWS ELB, Google Cloud Load Balancer); + - создаются автоматически для сервисов Kubernetes с типом `LoadBalancer`; + - как правило, поддерживают только UDP/TCP; + - наличие поддержки SCTP зависит от реализации балансировщика нагрузки облачного провайдера; + - реализация варьируется в зависимости от поставщика облачных услуг. + +Пользователи Kubernetes, как правило, в своей работе сталкиваются только с прокси-серверами первых двух типов. За настройку остальных типов обычно отвечает администратор кластера. + +## Запросы на перенаправления + +На смену функциям перенаправления (редиректам) пришли прокси-серверы. Перенаправления устарели. diff --git a/content/zh-cn/_index.html b/content/zh-cn/_index.html index 949301196f5a4..8958fae537ca6 100644 --- a/content/zh-cn/_index.html +++ b/content/zh-cn/_index.html @@ -66,14 +66,14 @@

将 150+ 微服务迁移到 Kubernetes 上的挑战



- - 参加 2022 年 10 月 24-28 日的北美 KubeCon + + 参加 2023 年 4 月 18-21 日的欧洲 KubeCon + CloudNativeCon



- - 参加 2023 年 4 月 17-21 日的欧洲 KubeCon + + 参加 2023 年 11 月 6-9 日的北美 KubeCon + CloudNativeCon
diff --git a/content/zh-cn/blog/_posts/2022-12-27-cpumanager-goes-GA.md b/content/zh-cn/blog/_posts/2022-12-27-cpumanager-goes-GA.md new file mode 100644 index 0000000000000..9e7b876d103b5 --- /dev/null +++ b/content/zh-cn/blog/_posts/2022-12-27-cpumanager-goes-GA.md @@ -0,0 +1,154 @@ +--- +layout: blog +title: 'Kubernetes v1.26:CPUManager 正式发布' +date: 2022-12-27 +slug: cpumanager-ga +--- + + + +**作者:** Francesco Romani (Red Hat) + +**译者:** Michael Yao (DaoCloud) + + +CPU 管理器是 kubelet 的一部分;kubelet 是 Kubernetes 的节点代理,能够让用户给容器分配独占 CPU。 +CPU 管理器自从 Kubernetes v1.10 [进阶至 Beta](/blog/2018/07/24/feature-highlight-cpu-manager/), +已证明了它本身的可靠性,能够充分胜任将独占 CPU 分配给容器,因此采用率稳步增长, +使其成为性能关键型和低延迟场景的基本组件。随着时间的推移,大多数变更均与错误修复或内部重构有关, +以下列出了几个值得关注、用户可见的变更: + + +- [支持显式保留 CPU](https://github.com/Kubernetes/Kubernetes/pull/83592): + 之前已经可以请求为系统资源(包括 kubelet 本身)保留给定数量的 CPU,这些 CPU 将不会被用于独占 CPU 分配。 + 现在还可以显式选择保留哪些 CPU,而不是让 kubelet 自动拣选 CPU。 +- 使用 kubelet 本地 + [PodResources API](/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) + [向容器报告独占分配的 CPU](https://github.com/Kubernetes/Kubernetes/pull/97415),就像已为设备所做的一样。 +- [优化系统资源的使用](https://github.com/Kubernetes/Kubernetes/pull/101771),消除不必要的 sysfs 变更。 + + +CPU 管理器达到了“能胜任”的水平,因此在 Kubernetes v1.26 中,它进阶至正式发布(GA)状态。 + + +## CPU 管理器的自定义选项 {#cpu-managed-customization} + +CPU 管理器支持两种操作模式,使用其**策略**进行配置。 +使用 `none` 策略,CPU 管理器将 CPU 分配给容器,除了 Pod 规约中设置的(可选)配额外,没有任何特定限制。 +使用 `static` 策略,假设 Pod 属于 Guaranteed QoS 类,并且该 Pod 中的每个容器都请求一个整数核数的 vCPU, +则 CPU 管理器将独占分配 CPU。独占分配意味着(无论是来自同一个 Pod 还是来自不同的 Pod)其他容器都不会被调度到该 CPU 上。 + + +这种简单的操作模型很好地服务了用户群体,但随着 CPU 管理器越来越成熟, +用户开始关注更复杂的使用场景以及如何更好地支持这些使用场景。 + +社区没有添加更多策略,而是意识到几乎所有新颖的用例都是 `static` CPU 管理器策略所赋予的一些行为变化。 +因此,决定添加[调整静态策略行为的选项](https://github.com/Kubernetes/enhancements/tree/master/keps/sig-node/2625-cpumanager-policies-thread-placement #proposed-change)。 +这些选项都达到了不同程度的成熟度,类似于其他的所有 Kubernetes 特性, +为了能够被接受,每个新选项在禁用时都能提供向后兼容的行为,并能在需要进行交互时记录彼此如何交互。 + + +这使得 Kubernetes 项目能够将 CPU 管理器核心组件和核心 CPU 分配算法进阶至 GA,同时也开启了该领域新的实验时代。 +在 Kubernetes v1.26 中,CPU +管理器支持[三个不同的策略选项](/zh-cn/docs/tasks/administer-cluster/cpu-management-policies.md#static-policy-options): + + +`full-pcpus-only` +: 将 CPU 管理器核心分配算法限制为仅支持完整的物理核心,从而减少允许共享核心的硬件技术带来的嘈杂邻居问题。 + +`distribute-cpus-across-numa` +: 驱动 CPU 管理器跨 NUMA 节点均匀分配 CPU,以应对需要多个 NUMA 节点来满足分配的情况。 + +`align-by-socket` +: 更改 CPU 管理器将 CPU 分配给容器的方式:考虑 CPU 按插槽而不是 NUMA 节点边界对齐。 + + +## 后续发展 {#further-development} + +在主要 CPU 管理器特性进阶后,每个现有的策略选项将遵循其进阶过程,独立于 CPU 管理器和其他选项。 +添加新选项的空间虽然存在,但随着对更高灵活性的需求不断增长,CPU 管理器及其策略选项当前所提供的灵活性也有不足。 + +社区中正在讨论如何将 CPU 管理器和当前属于 kubelet 可执行文件的其他资源管理器拆分为可插拔的独立 kubelet 插件。 +如果你对这项努力感兴趣,请加入 SIG Node 交流频道(Slack、邮件列表、每周会议)进行讨论。 + + +## 进一步阅读 {#further-reading} + +请查阅[控制节点上的 CPU 管理策略](/zh-cn/docs/tasks/administer-cluster/cpu-management-policies/)任务页面以了解有关 +CPU 管理器的更多信息及其如何适配其他节点级别资源管理器。 + + +## 参与其中 {#getting-involved} + +此特性由 [SIG Node](https://github.com/Kubernetes/community/blob/master/sig-node/README.md) 社区驱动。 +请加入我们与社区建立联系,就上述特性和更多内容分享你的想法和反馈。我们期待你的回音! diff --git a/content/zh-cn/docs/concepts/architecture/nodes.md b/content/zh-cn/docs/concepts/architecture/nodes.md index c7b9c247a23be..3c85499a27d43 100644 --- a/content/zh-cn/docs/concepts/architecture/nodes.md +++ b/content/zh-cn/docs/concepts/architecture/nodes.md @@ -15,7 +15,7 @@ weight: 10 -Kubernetes 通过将容器放入在节点(Node)上运行的 Pod 中来执行你的工作负载。 +Kubernetes 通过将容器放入在节点(Node)上运行的 Pod +中来执行你的{{< glossary_tooltip text="工作负载" term_id="workload" >}}。 节点可以是一个虚拟机或者物理机器,取决于所在的集群配置。 每个节点包含运行 {{< glossary_tooltip text="Pod" term_id="pod" >}} 所需的服务; 这些节点由{{< glossary_tooltip text="控制面" term_id="control-plane" >}}负责管理。 @@ -372,7 +373,7 @@ Condition,被保护起来的节点在其规约中被标记为不可调度(Un In the Kubernetes API, a node's condition is represented as part of the `.status` of the Node resource. For example, the following JSON structure describes a healthy node: --> -在 Kubernetes API 中,节点的状况表示节点资源中`.status` 的一部分。 +在 Kubernetes API 中,节点的状况表示节点资源中 `.status` 的一部分。 例如,以下 JSON 结构描述了一个健康节点: ```json @@ -425,7 +426,7 @@ names. --> 节点控制器在确认 Pod 在集群中已经停止运行前,不会强制删除它们。 你可以看到可能在这些无法访问的节点上运行的 Pod 处于 `Terminating` 或者 `Unknown` 状态。 -如果 kubernetes 不能基于下层基础设施推断出某节点是否已经永久离开了集群, +如果 Kubernetes 不能基于下层基础设施推断出某节点是否已经永久离开了集群, 集群管理员可能需要手动删除该节点对象。 从 Kubernetes 删除节点对象将导致 API 服务器删除节点上所有运行的 Pod 对象并释放它们的名字。 @@ -1029,7 +1030,7 @@ section [Graceful Node Shutdown](#graceful-node-shutdown) for more details. 当某节点关闭但 kubelet 的节点关闭管理器未检测到这一事件时, -在那个已关闭节点上、属于 StatefulSet 的 Pod 将停滞于终止状态,并且不能移动到新的运行节点上。 +在那个已关闭节点上、属于 {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} +的 Pod 将停滞于终止状态,并且不能移动到新的运行节点上。 这是因为已关闭节点上的 kubelet 已不存在,亦无法删除 Pod, 因此 StatefulSet 无法创建同名的新 Pod。 如果 Pod 使用了卷,则 VolumeAttachments 不会从原来的已关闭节点上删除, @@ -1054,14 +1056,15 @@ these pods will be stuck in terminating status on the shutdown node forever. To mitigate the above situation, a user can manually add the taint `node.kubernetes.io/out-of-service` with either `NoExecute` or `NoSchedule` effect to a Node marking it out-of-service. If the `NodeOutOfServiceVolumeDetach`[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -is enabled on `kube-controller-manager`, and a Node is marked out-of-service with this taint, the +is enabled on {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}}, and a Node is marked out-of-service with this taint, the pods on the node will be forcefully deleted if there are no matching tolerations on it and volume detach operations for the pods terminating on the node will happen immediately. This allows the Pods on the out-of-service node to recover quickly on a different node. --> 为了缓解上述情况,用户可以手动将具有 `NoExecute` 或 `NoSchedule` 效果的 `node.kubernetes.io/out-of-service` 污点添加到节点上,标记其无法提供服务。 -如果在 `kube-controller-manager` 上启用了 `NodeOutOfServiceVolumeDetach` +如果在 {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}} +上启用了 `NodeOutOfServiceVolumeDetach` [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/), 并且节点被通过污点标记为无法提供服务,如果节点 Pod 上没有设置对应的容忍度, 那么这样的 Pod 将被强制删除,并且该在节点上被终止的 Pod 将立即进行卷分离操作。 @@ -1186,15 +1189,21 @@ see [KEP-2400](https://github.com/kubernetes/enhancements/issues/2400) and its ## {{% heading "whatsnext" %}} -* 进一步了解节点[组件](/zh-cn/docs/concepts/overview/components/#node-components)。 -* 阅读 [Node 的 API 定义](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core)。 -* 阅读架构设计文档中有关 +进一步了解以下资料: + +* 构成节点的[组件](/zh-cn/docs/concepts/overview/components/#node-components)。 +* [Node 的 API 定义](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core)。 +* 架构设计文档中有关 [Node](https://git.k8s.io/design-proposals-archive/architecture/architecture.md#the-kubernetes-node) 的章节。 -* 了解[污点和容忍度](/zh-cn/docs/concepts/scheduling-eviction/taint-and-toleration/)。 +* [污点和容忍度](/zh-cn/docs/concepts/scheduling-eviction/taint-and-toleration/)。 +* [节点资源管理器](/zh-cn/docs/concepts/policy/node-resource-managers/)。 +* [Windows 节点的资源管理](/zh-cn/docs/concepts/configuration/windows-resource-management/)。 diff --git a/content/zh-cn/docs/concepts/cluster-administration/manage-deployment.md b/content/zh-cn/docs/concepts/cluster-administration/manage-deployment.md index 37ddd62048f9c..6d0681ace14cb 100644 --- a/content/zh-cn/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/zh-cn/docs/concepts/cluster-administration/manage-deployment.md @@ -14,7 +14,11 @@ weight: 40 你已经部署了应用并通过服务暴露它。然后呢? Kubernetes 提供了一些工具来帮助管理你的应用部署,包括扩缩容和更新。 @@ -27,7 +31,9 @@ Kubernetes 提供了一些工具来帮助管理你的应用部署,包括扩缩 ## 组织资源配置 {#organizing-resource-config} @@ -46,13 +52,15 @@ Multiple resources can be created the same way as a single resource: kubectl apply -f https://k8s.io/examples/application/nginx-app.yaml ``` -``` +```none service/my-nginx-svc created deployment.apps/my-nginx created ``` 资源将按照它们在文件中的顺序创建。 因此,最好先指定服务,这样在控制器(例如 Deployment)创建 Pod 时能够 @@ -64,13 +72,18 @@ The resources will be created in the order they appear in the file. Therefore, i `kubectl apply` 也接受多个 `-f` 参数: ```shell -kubectl apply -f https://k8s.io/examples/application/nginx/nginx-svc.yaml -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://k8s.io/examples/application/nginx/nginx-svc.yaml \ + -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml ``` 建议的做法是,将同一个微服务或同一应用层相关的资源放到同一个文件中, 将同一个应用相关的所有文件按组存放到同一个目录中。 @@ -79,17 +92,19 @@ A URL can also be specified as a configuration source, which is handy for deploy 还可以使用 URL 作为配置源,便于直接使用已经提交到 GitHub 上的配置文件进行部署: ```shell -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/zh-cn/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml ``` -``` +```none deployment.apps/my-nginx created ``` ## kubectl 中的批量操作 {#bulk-operations-in-kubectl} @@ -101,13 +116,14 @@ Resource creation isn't the only operation that `kubectl` can perform in bulk. I kubectl delete -f https://k8s.io/examples/application/nginx-app.yaml ``` -``` +```none deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` 在仅有两种资源的情况下,你可以使用"资源类型/资源名"的语法在命令行中 同时指定这两个资源: @@ -117,7 +133,8 @@ kubectl delete deployments/my-nginx services/my-nginx-svc ``` 对于资源数目较大的情况,你会发现使用 `-l` 或 `--selector` 指定筛选器(标签查询)能很容易根据标签筛选资源: @@ -126,13 +143,14 @@ For larger numbers of resources, you'll find it easier to specify the selector ( kubectl delete deployment,services -l app=nginx ``` -``` +```none deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` 由于 `kubectl` 用来输出资源名称的语法与其所接受的资源名称语法相同, 你可以使用 `$()` 或 `xargs` 进行链式操作: @@ -142,32 +160,37 @@ kubectl get $(kubectl create -f docs/concepts/cluster-administration/nginx/ -o n kubectl create -f docs/concepts/cluster-administration/nginx/ -o name | grep service | xargs -i kubectl get {} ``` -``` +```none NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE my-nginx-svc LoadBalancer 10.0.0.208 80/TCP 0s ``` 上面的命令中,我们首先使用 `examples/application/nginx/` 下的配置文件创建资源, 并使用 `-o name` 的输出格式(以"资源/名称"的形式打印每个资源)打印所创建的资源。 然后,我们通过 `grep` 来过滤 "service",最后再打印 `kubectl get` 的内容。 如果你碰巧在某个路径下的多个子路径中组织资源,那么也可以递归地在所有子路径上 执行操作,方法是在 `--filename,-f` 后面指定 `--recursive` 或者 `-R`。 例如,假设有一个目录路径为 `project/k8s/development`,它保存开发环境所需的 所有{{< glossary_tooltip text="清单" term_id="manifest" >}},并按资源类型组织: -``` +```none project/k8s/development ├── configmap │   └── my-configmap.yaml @@ -178,7 +201,9 @@ project/k8s/development ``` 默认情况下,对 `project/k8s/development` 执行的批量操作将停止在目录的第一级, 而不是处理所有子目录。 @@ -188,7 +213,7 @@ By default, performing a bulk operation on `project/k8s/development` will stop a kubectl apply -f project/k8s/development ``` -``` +```none error: you must provide one or more resources by argument or filename (.json|.yaml|.yml|stdin) ``` @@ -201,14 +226,15 @@ Instead, specify the `--recursive` or `-R` flag with the `--filename,-f` flag as kubectl apply -f project/k8s/development --recursive ``` -``` +```none configmap/my-config created deployment.apps/my-deployment created persistentvolumeclaim/my-pvc created ``` @@ -221,7 +247,7 @@ The `--recursive` flag also works when multiple `-f` arguments are provided: kubectl apply -f project/k8s/namespaces -f project/k8s/development --recursive ``` -``` +```none namespace/development created namespace/staging created configmap/my-config created @@ -230,15 +256,16 @@ persistentvolumeclaim/my-pvc created ``` -如果你有兴趣进一步学习关于 `kubectl` 的内容,请阅读 -[命令行工具(kubectl)](/zh-cn/docs/reference/kubectl/)。 +如果你有兴趣进一步学习关于 `kubectl` 的内容,请阅读[命令行工具(kubectl)](/zh-cn/docs/reference/kubectl/)。 ## 有效地使用标签 {#using-labels-effectively} @@ -246,7 +273,9 @@ The examples we've used so far apply at most a single label to any resource. The 在许多情况下,应使用多个标签来区分集合。 例如,不同的应用可能会为 `app` 标签设置不同的值。 但是,类似 [guestbook 示例](https://github.com/kubernetes/examples/tree/master/guestbook/) @@ -259,7 +288,8 @@ For instance, different applications would use different values for the `app` la ``` Redis 的主节点和从节点会有不同的 `tier` 标签,甚至还有一个额外的 `role` 标签: @@ -292,7 +322,7 @@ kubectl apply -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml kubectl get pods -Lapp -Ltier -Lrole ``` -``` +```none NAME READY STATUS RESTARTS AGE APP TIER ROLE guestbook-fe-4nlpb 1/1 Running 0 1m guestbook frontend guestbook-fe-ght6d 1/1 Running 0 1m guestbook frontend @@ -308,7 +338,7 @@ my-nginx-o0ef1 1/1 Running 0 29m nginx kubectl get pods -lapp=guestbook,role=slave ``` -``` +```none NAME READY STATUS RESTARTS AGE guestbook-redis-slave-2q2yf 1/1 Running 0 3m guestbook-redis-slave-qgazl 1/1 Running 0 3m @@ -317,7 +347,11 @@ guestbook-redis-slave-qgazl 1/1 Running 0 3m ## 金丝雀部署(Canary Deployments) {#canary-deployments} @@ -335,51 +369,56 @@ The primary, stable release would have a `track` label with value as `stable`: 主要稳定的发行版将有一个 `track` 标签,其值为 `stable`: -```yaml - name: frontend - replicas: 3 - ... - labels: - app: guestbook - tier: frontend - track: stable - ... - image: gb-frontend:v3 +```none +name: frontend +replicas: 3 +... +labels: + app: guestbook + tier: frontend + track: stable +... +image: gb-frontend:v3 ``` 然后,你可以创建 guestbook 前端的新版本,让这些版本的 `track` 标签带有不同的值 (即 `canary`),以便两组 Pod 不会重叠: -```yaml - name: frontend-canary - replicas: 1 - ... - labels: - app: guestbook - tier: frontend - track: canary - ... - image: gb-frontend:v4 +```none +name: frontend-canary +replicas: 1 +... +labels: + app: guestbook + tier: frontend + track: canary +... +image: gb-frontend:v4 ``` 前端服务通过选择标签的公共子集(即忽略 `track` 标签)来覆盖两组副本, 以便流量可以转发到两个应用: ```yaml - selector: - app: guestbook - tier: frontend +selector: + app: guestbook + tier: frontend ``` 你可以调整 `stable` 和 `canary` 版本的副本数量,以确定每个版本将接收 实时生产流量的比例(在本例中为 3:1)。 @@ -387,7 +426,8 @@ Once you're confident, you can update the stable track to the new application re `canary` 替换为 `stable`,并且将老版本应用删除。 想要了解更具体的示例,请查看 [Ghost 部署教程](https://github.com/kelseyhightower/talks/tree/master/kubecon-eu-2016/demo#deploy-a-canary)。 @@ -395,7 +435,8 @@ For a more concrete example, check the [tutorial of deploying Ghost](https://git ## 更新标签 {#updating-labels} @@ -408,7 +449,7 @@ For example, if you want to label all your nginx pods as frontend tier, run: kubectl label pods -l app=nginx tier=fe ``` -``` +```none pod/my-nginx-2035384211-j5fhi labeled pod/my-nginx-2035384211-u2c7e labeled pod/my-nginx-2035384211-u3t6x labeled @@ -425,7 +466,7 @@ To see the pods you labeled, run: kubectl get pods -l app=nginx -L tier ``` -``` +```none NAME READY STATUS RESTARTS AGE TIER my-nginx-2035384211-j5fhi 1/1 Running 0 23m fe my-nginx-2035384211-u2c7e 1/1 Running 0 23m fe @@ -433,22 +474,25 @@ my-nginx-2035384211-u3t6x 1/1 Running 0 23m fe ``` 这将输出所有 "app=nginx" 的 Pod,并有一个额外的描述 Pod 的 tier 的标签列 (用参数 `-L` 或者 `--label-columns` 标明)。 -想要了解更多信息,请参考 -[标签](/zh-cn/docs/concepts/overview/working-with-objects/labels/) 和 +想要了解更多信息,请参考[标签](/zh-cn/docs/concepts/overview/working-with-objects/labels/)和 [`kubectl label`](/docs/reference/generated/kubectl/kubectl-commands/#label) 命令文档。 ## 更新注解 {#updating-annotations} @@ -460,7 +504,7 @@ kubectl annotate pods my-nginx-v4-9gw19 description='my frontend running nginx' kubectl get pods my-nginx-v4-9gw19 -o yaml ``` -``` +```shell apiVersion: v1 kind: pod metadata: @@ -470,17 +514,18 @@ metadata: ``` -想要了解更多信息,请参考 -[注解](/zh-cn/docs/concepts/overview/working-with-objects/annotations/)和 +想要了解更多信息,请参考[注解](/zh-cn/docs/concepts/overview/working-with-objects/annotations/)和 [`kubectl annotate`](/docs/reference/generated/kubectl/kubectl-commands/#annotate) 命令文档。 ## 扩缩你的应用 {#scaling-your-app} @@ -491,7 +536,7 @@ When load on your application grows or shrinks, use `kubectl` to scale your appl kubectl scale deployment/my-nginx --replicas=1 ``` -``` +```none deployment.apps/my-nginx scaled ``` @@ -504,13 +549,14 @@ Now you only have one pod managed by the deployment. kubectl get pods -l app=nginx ``` -``` +```none NAME READY STATUS RESTARTS AGE my-nginx-2035384211-j5fhi 1/1 Running 0 30m ``` 想要让系统自动选择需要 nginx 副本的数量,范围从 1 到 3,请执行以下操作: @@ -518,21 +564,23 @@ To have the system automatically choose the number of nginx replicas as needed, kubectl autoscale deployment/my-nginx --min=1 --max=3 ``` -``` +```none horizontalpodautoscaler.autoscaling/my-nginx autoscaled ``` 现在,你的 nginx 副本将根据需要自动地增加或者减少。 想要了解更多信息,请参考 [kubectl scale](/docs/reference/generated/kubectl/kubectl-commands/#scale)命令文档、 -[kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale) 命令文档和 -[水平 Pod 自动伸缩](/zh-cn/docs/tasks/run-application/horizontal-pod-autoscale/) 文档。 +[kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale) +命令文档和[水平 Pod 自动伸缩](/zh-cn/docs/tasks/run-application/horizontal-pod-autoscale/)文档。 建议在源代码管理中维护一组配置文件 (参见[配置即代码](https://martinfowler.com/bliki/InfrastructureAsCode.html)), @@ -558,25 +607,36 @@ Then, you can use [`kubectl apply`](/docs/reference/generated/kubectl/kubectl-co 将配置变更应用到集群中。 这个命令将会把推送的版本与以前的版本进行比较,并应用你所做的更改, 但是不会自动覆盖任何你没有指定更改的属性。 ```shell kubectl apply -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml +``` + +```none deployment.apps/my-nginx configured ``` 注意,`kubectl apply` 将为资源增加一个额外的注解,以确定自上次调用以来对配置的更改。 执行时,`kubectl apply` 会在以前的配置、提供的输入和资源的当前配置之间 找出三方差异,以确定如何修改资源。 目前,新创建的资源是没有这个注解的,所以,第一次调用 `kubectl apply` 时 将使用提供的输入和资源的当前配置双方之间差异进行比较。 @@ -584,19 +644,14 @@ Currently, resources are created without this annotation, so the first invocatio 因此,kubectl 不会删除它们。 所有后续的 `kubectl apply` 操作以及其他修改配置的命令,如 `kubectl replace` 和 `kubectl edit`,都将更新注解,并允许随后调用的 `kubectl apply` 使用三方差异进行检查和执行删除。 -{{< note >}} - -想要使用 apply,请始终使用 `kubectl apply` 或 `kubectl create --save-config` 创建资源。 -{{< /note >}} - ### kubectl edit 这相当于首先 `get` 资源,在文本编辑器中编辑它,然后用更新的版本 `apply` 资源: @@ -625,7 +681,8 @@ rm /tmp/nginx.yaml ``` @@ -645,15 +702,17 @@ and [kubectl patch](/docs/reference/generated/kubectl/kubectl-commands/#patch). --> 你可以使用 `kubectl patch` 来更新 API 对象。此命令支持 JSON patch、 -JSON merge patch、以及 strategic merge patch。 请参考 -[使用 kubectl patch 更新 API 对象](/zh-cn/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/) -和 -[kubectl patch](/docs/reference/generated/kubectl/kubectl-commands/#patch). +JSON merge patch、以及 strategic merge patch。 +请参考[使用 kubectl patch 更新 API 对象](/zh-cn/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/)和 +[kubectl patch](/docs/reference/generated/kubectl/kubectl-commands/#patch)。 ## 破坏性的更新 {#disruptive-updates} @@ -665,7 +724,7 @@ In some cases, you may need to update resource fields that cannot be updated onc kubectl replace -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml --force ``` -``` +```none deployment.apps/my-nginx deleted deployment.apps/my-nginx replaced ``` @@ -676,7 +735,9 @@ deployment.apps/my-nginx replaced ## 在不中断服务的情况下更新应用 {#updating-your-app-without-a-service-outage} 在某些时候,你最终需要更新已部署的应用,通常都是通过指定新的镜像或镜像标签, 如上面的金丝雀发布的场景中所示。`kubectl` 支持几种更新操作, @@ -696,26 +757,26 @@ Let's say you were running version 1.14.2 of nginx: kubectl create deployment my-nginx --image=nginx:1.14.2 ``` -``` +```none deployment.apps/my-nginx created ``` - 运行 3 个副本(这样新旧版本可以同时存在) ```shell kubectl scale deployment my-nginx --current-replicas=1 --replicas=3 ``` -``` +```none deployment.apps/my-nginx scaled ``` 要更新到 1.16.1 版本,只需使用我们前面学到的 kubectl 命令将 `.spec.template.spec.containers[0].image` 从 `nginx:1.14.2` 修改为 `nginx:1.16.1`。 @@ -725,7 +786,10 @@ kubectl edit deployment/my-nginx ``` 没错,就是这样!Deployment 将在后台逐步更新已经部署的 nginx 应用。 它确保在更新过程中,只有一定数量的旧副本被开闭,并且只有一定基于所需 Pod 数量的新副本被创建。 diff --git a/content/zh-cn/docs/concepts/cluster-administration/system-traces.md b/content/zh-cn/docs/concepts/cluster-administration/system-traces.md index 8c5b3f5ff49cf..d313716a21004 100644 --- a/content/zh-cn/docs/concepts/cluster-administration/system-traces.md +++ b/content/zh-cn/docs/concepts/cluster-administration/system-traces.md @@ -143,7 +143,7 @@ The kubelet CRI interface and authenticated http servers are instrumented to gen trace spans. As with the apiserver, the endpoint and sampling rate are configurable. Trace context propagation is also configured. A parent span's sampling decision is always respected. A provided tracing configuration sampling rate will apply to spans without a parent. -Enabled without a configured endpoint, the default OpenTelemetry Collector reciever address of "localhost:4317" is set. +Enabled without a configured endpoint, the default OpenTelemetry Collector receiver address of "localhost:4317" is set. --> kubelet CRI 接口和实施身份验证的 HTTP 服务器被插桩以生成追踪 span。 与 API 服务器一样,端点和采样率是可配置的。 diff --git a/content/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 8e56823da2965..751560ef429db 100644 --- a/content/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -140,111 +140,121 @@ spec: The general workflow of a device plugin includes the following steps: -* Initialization. During this phase, the device plugin performs vendor specific +1. Initialization. During this phase, the device plugin performs vendor-specific initialization and setup to make sure the devices are in a ready state. -* The plugin starts a gRPC service, with a Unix socket under host path +1. The plugin starts a gRPC service, with a Unix socket under the host path `/var/lib/kubelet/device-plugins/`, that implements the following interfaces: --> ## 设备插件的实现 {#device-plugin-implementation} 设备插件的常规工作流程包括以下几个步骤: -* 初始化。在这个阶段,设备插件将执行供应商特定的初始化和设置, - 以确保设备处于就绪状态。 -* 插件使用主机路径 `/var/lib/kubelet/device-plugins/` 下的 Unix 套接字启动一个 - gRPC 服务,该服务实现以下接口: - - - ```gRPC - service DevicePlugin { - // GetDevicePluginOptions 返回与设备管理器沟通的选项。 - rpc GetDevicePluginOptions(Empty) returns (DevicePluginOptions) {} - - // ListAndWatch 返回 Device 列表构成的数据流。 - // 当 Device 状态发生变化或者 Device 消失时,ListAndWatch - // 会返回新的列表。 - rpc ListAndWatch(Empty) returns (stream ListAndWatchResponse) {} - - // Allocate 在容器创建期间调用,这样设备插件可以运行一些特定于设备的操作, - // 并告诉 kubelet 如何令 Device 可在容器中访问的所需执行的具体步骤 - rpc Allocate(AllocateRequest) returns (AllocateResponse) {} - - // GetPreferredAllocation 从一组可用的设备中返回一些优选的设备用来分配, - // 所返回的优选分配结果不一定会是设备管理器的最终分配方案。 - // 此接口的设计仅是为了让设备管理器能够在可能的情况下做出更有意义的决定。 - rpc GetPreferredAllocation(PreferredAllocationRequest) returns (PreferredAllocationResponse) {} - - // PreStartContainer 在设备插件注册阶段根据需要被调用,调用发生在容器启动之前。 - // 在将设备提供给容器使用之前,设备插件可以运行一些诸如重置设备之类的特定于 - // 具体设备的操作, - rpc PreStartContainer(PreStartContainerRequest) returns (PreStartContainerResponse) {} - } - ``` - - {{< note >}} - - 插件并非必须为 `GetPreferredAllocation()` 或 `PreStartContainer()` 提供有用的实现逻辑, - 调用 `GetDevicePluginOptions()` 时所返回的 `DevicePluginOptions` - 消息中应该设置这些调用是否可用。`kubelet` 在真正调用这些函数之前,总会调用 - `GetDevicePluginOptions()` 来查看是否存在这些可选的函数。 - {{< /note >}} - - -* 插件通过 Unix socket 在主机路径 `/var/lib/kubelet/device-plugins/kubelet.sock` - 处向 kubelet 注册自身。 -* 成功注册自身后,设备插件将以服务模式运行,在此期间,它将持续监控设备运行状况, - 并在设备状态发生任何变化时向 kubelet 报告。它还负责响应 `Allocate` gRPC 请求。 - 在 `Allocate` 期间,设备插件可能还会做一些设备特定的准备;例如 GPU 清理或 QRNG 初始化。 - 如果操作成功,则设备插件将返回 `AllocateResponse`,其中包含用于访问被分配的设备容器运行时的配置。 - kubelet 将此信息传递到容器运行时。 +1. 初始化。在这个阶段,设备插件将执行特定于供应商的初始化和设置,以确保设备处于就绪状态。 + +2. 插件使用主机路径 `/var/lib/kubelet/device-plugins/` 下的 UNIX 套接字启动一个 + gRPC 服务,该服务实现以下接口: + + + ```gRPC + service DevicePlugin { + // GetDevicePluginOptions 返回与设备管理器沟通的选项。 + rpc GetDevicePluginOptions(Empty) returns (DevicePluginOptions) {} + + // ListAndWatch 返回 Device 列表构成的数据流。 + // 当 Device 状态发生变化或者 Device 消失时,ListAndWatch + // 会返回新的列表。 + rpc ListAndWatch(Empty) returns (stream ListAndWatchResponse) {} + + // Allocate 在容器创建期间调用,这样设备插件可以运行一些特定于设备的操作, + // 并告诉 kubelet 如何令 Device 可在容器中访问的所需执行的具体步骤 + rpc Allocate(AllocateRequest) returns (AllocateResponse) {} + + // GetPreferredAllocation 从一组可用的设备中返回一些优选的设备用来分配, + // 所返回的优选分配结果不一定会是设备管理器的最终分配方案。 + // 此接口的设计仅是为了让设备管理器能够在可能的情况下做出更有意义的决定。 + rpc GetPreferredAllocation(PreferredAllocationRequest) returns (PreferredAllocationResponse) {} + + // PreStartContainer 在设备插件注册阶段根据需要被调用,调用发生在容器启动之前。 + // 在将设备提供给容器使用之前,设备插件可以运行一些诸如重置设备之类的特定于 + // 具体设备的操作, + rpc PreStartContainer(PreStartContainerRequest) returns (PreStartContainerResponse) {} + } + ``` + + {{< note >}} + + 插件并非必须为 `GetPreferredAllocation()` 或 `PreStartContainer()` 提供有用的实现逻辑, + 调用 `GetDevicePluginOptions()` 时所返回的 `DevicePluginOptions` + 消息中应该设置一些标志,表明这些调用(如果有)是否可用。`kubelet` 在直接调用这些函数之前,总会调用 + `GetDevicePluginOptions()` 来查看哪些可选的函数可用。 + {{< /note >}} + + +3. 插件通过位于主机路径 `/var/lib/kubelet/device-plugins/kubelet.sock` 下的 UNIX 套接字 + 向 kubelet 注册自身。 + + {{< note >}} + + 工作流程的顺序很重要。插件必须在向 kubelet 注册自己之前开始提供 gRPC 服务,才能保证注册成功。 + {{< /note >}} + + +4. 成功注册自身后,设备插件将以提供服务的模式运行,在此期间,它将持续监控设备运行状况, + 并在设备状态发生任何变化时向 kubelet 报告。它还负责响应 `Allocate` gRPC 请求。 + 在 `Allocate` 期间,设备插件可能还会做一些特定于设备的准备;例如 GPU 清理或 QRNG 初始化。 + 如果操作成功,则设备插件将返回 `AllocateResponse`,其中包含用于访问被分配的设备容器运行时的配置。 + kubelet 将此信息传递到容器运行时。 @@ -232,7 +233,8 @@ you implement yourself * [kubebuilder](https://book.kubebuilder.io/) * [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (.NET operator SDK) * [KUDO](https://kudo.dev/)(Kubernetes 通用声明式 Operator) -* [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html),可与 Webhooks 结合使用,以实现自己的功能。 +* [Mast](https://docs.ansi.services/mast/user_guide/operator/) +* [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html),可与 Webhook 结合使用,以实现自己的功能。 * [Operator Framework](https://operatorframework.io) * [shell-operator](https://github.com/flant/shell-operator) diff --git a/content/zh-cn/docs/concepts/overview/components.md b/content/zh-cn/docs/concepts/overview/components.md index 8d3af794bc806..97c4bbcc17b67 100644 --- a/content/zh-cn/docs/concepts/overview/components.md +++ b/content/zh-cn/docs/concepts/overview/components.md @@ -3,7 +3,7 @@ title: Kubernetes 组件 content_type: concept description: > Kubernetes 集群由控制平面的组件和一组称为节点的机器组成。 -weight: 20 +weight: 30 card: name: concepts weight: 20 @@ -16,7 +16,7 @@ content_type: concept description: > A Kubernetes cluster consists of the components that are a part of the control plane and a set of machines called nodes. -weight: 20 +weight: 30 card: name: concepts weight: 20 diff --git a/content/zh-cn/docs/concepts/overview/kubernetes-api.md b/content/zh-cn/docs/concepts/overview/kubernetes-api.md index be6515458f54a..b4272751b7ea8 100644 --- a/content/zh-cn/docs/concepts/overview/kubernetes-api.md +++ b/content/zh-cn/docs/concepts/overview/kubernetes-api.md @@ -1,7 +1,7 @@ --- title: Kubernetes API content_type: concept -weight: 30 +weight: 40 description: > Kubernetes API 使你可以查询和操纵 Kubernetes 中对象的状态。 Kubernetes 控制平面的核心是 API 服务器和它暴露的 HTTP API。 @@ -15,7 +15,7 @@ reviewers: - chenopis title: The Kubernetes API content_type: concept -weight: 30 +weight: 40 description: > The Kubernetes API lets you query and manipulate the state of objects in Kubernetes. The core of Kubernetes' control plane is the API server and the HTTP API that it exposes. Users, the different parts of your cluster, and external components all communicate with one another through the API server. diff --git a/content/zh-cn/docs/concepts/overview/working-with-objects/namespaces.md b/content/zh-cn/docs/concepts/overview/working-with-objects/namespaces.md index df549488184ad..a872a08ea9117 100644 --- a/content/zh-cn/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/zh-cn/docs/concepts/overview/working-with-objects/namespaces.md @@ -268,7 +268,7 @@ kubectl api-resources --namespaced=false --> ## 自动打标签 {#automatic-labelling} -{{< feature-state state="beta" for_k8s_version="1.21" >}} +{{< feature-state state="beta" for_k8s_version="stable" >}} diff --git a/content/zh-cn/docs/concepts/security/pod-security-admission.md b/content/zh-cn/docs/concepts/security/pod-security-admission.md index c7fea290ee23f..8c02233f3c7cf 100644 --- a/content/zh-cn/docs/concepts/security/pod-security-admission.md +++ b/content/zh-cn/docs/concepts/security/pod-security-admission.md @@ -167,10 +167,10 @@ applied to workload resources, only to the resulting pod objects. Pod 通常是通过创建 {{< glossary_tooltip term_id="deployment" >}} 或 {{< glossary_tooltip term_id="job">}} 这类[工作负载对象](/zh-cn/docs/concepts/workloads/controllers/) -来间接创建的。工作负载对象为工作负载资源定义一个 **Pod 模板** -和一个对应的负责基于该模板来创建 Pod 的{{< glossary_tooltip term_id="controller" text="控制器" >}}。 +来间接创建的。工作负载对象为工作负载资源定义一个 **Pod 模板**和一个对应的负责基于该模板来创建 +Pod 的{{< glossary_tooltip term_id="controller" text="控制器" >}}。 为了尽早地捕获违例状况,`audit` 和 `warn` 模式都应用到负载资源。 -不过,`enforce` 模式并 **不** 应用到工作负载资源,仅应用到所生成的 Pod 对象上。 +不过,`enforce` 模式并**不**应用到工作负载资源,仅应用到所生成的 Pod 对象上。 ## 豁免 {#exemptions} -你可以为 Pod 安全性的实施设置 **豁免(Exemptions)** 规则, +你可以为 Pod 安全性的实施设置**豁免(Exemptions)**规则, 从而允许创建一些本来会被与给定名字空间相关的策略所禁止的 Pod。 豁免规则可以在[准入控制器配置](/zh-cn/docs/tasks/configure-pod-container/enforce-standards-admission-controller/#configure-the-admission-controller) 中静态配置。 @@ -191,7 +191,7 @@ Exemptions can be statically configured in the Exemptions must be explicitly enumerated. Requests meeting exemption criteria are _ignored_ by the Admission Controller (all `enforce`, `audit` and `warn` behaviors are skipped). Exemption dimensions include: --> -豁免规则可以显式枚举。满足豁免标准的请求会被准入控制器 **忽略** +豁免规则必须显式枚举。满足豁免标准的请求会被准入控制器**忽略** (所有 `enforce`、`audit` 和 `warn` 行为都会被略过)。 豁免的维度包括: diff --git a/content/zh-cn/docs/concepts/security/pod-security-standards.md b/content/zh-cn/docs/concepts/security/pod-security-standards.md index 6b0a461beaac4..f5e043d68dcd8 100644 --- a/content/zh-cn/docs/concepts/security/pod-security-standards.md +++ b/content/zh-cn/docs/concepts/security/pod-security-standards.md @@ -22,8 +22,8 @@ The Pod Security Standards define three different _policies_ to broadly cover th spectrum. These policies are _cumulative_ and range from highly-permissive to highly-restrictive. This guide outlines the requirements of each policy. --> -Pod 安全性标准定义了三种不同的 **策略(Policy)**,以广泛覆盖安全应用场景。 -这些策略是 **叠加式的(Cumulative)**,安全级别从高度宽松至高度受限。 +Pod 安全性标准定义了三种不同的**策略(Policy)**,以广泛覆盖安全应用场景。 +这些策略是**叠加式的(Cumulative)**,安全级别从高度宽松至高度受限。 本指南概述了每个策略的要求。 在下述表格中,通配符(`*`)意味着一个列表中的所有元素。 -例如 `spec.containers[*].securityContext` 表示 _所定义的所有容器_ 的安全性上下文对象。 +例如 `spec.containers[*].securityContext` 表示**所定义的所有容器**的安全性上下文对象。 如果所列出的任一容器不能满足要求,整个 Pod 将无法通过校验。 {{< /note >}} @@ -575,7 +575,7 @@ to a particular OS can be relaxed for the other OS. --> ### 限制性的 Pod Security Standard 变更 {#restricted-pod-security-standard-changes} -Kubernetes v1.25 中的另一个重要变化是 **限制性的(Restricted)** Pod 安全性已更新, +Kubernetes v1.25 中的另一个重要变化是**限制性的(Restricted)** Pod 安全性已更新, 能够处理 `pod.spec.os.name` 字段。根据 OS 名称,专用于特定 OS 的某些策略对其他 OS 可以放宽限制。 ### 持久卷的创建 {#persistent-volume-creation} -如 [PodSecurityPolicy](/zh-cn/docs/concepts/security/pod-security-policy/#volumes-and-file-systems) -文档中所述,创建 PersistentVolumes 的权限可以提权访问底层主机。 -如果需要访问 PersistentVolume,受信任的管理员应该创建 `PersistentVolume`, -受约束的用户应该使用 `PersistentVolumeClaim` 访问该存储。 +如果允许某人或某个应用创建任意的 PersistentVolume,则这种访问权限包括创建 `hostPath` 卷, +这意味着 Pod 将可以访问对应节点上的下层主机文件系统。授予该能力会带来安全风险。 + + +不受限制地访问主机文件系统的容器可以通过多种方式提升特权,包括从其他容器读取数据以及滥用系统服务(例如 Kubelet)的凭据。 + +你应该只允许以下实体具有创建 PersistentVolume 对象的访问权限: + + +- 需要此访问权限才能工作的用户(集群操作员)以及你信任的人, +- Kubernetes 控制平面组件,这些组件基于已配置为自动制备的 PersistentVolumeClaim 创建 PersistentVolume。 + 这通常由 Kubernetes 提供商或操作员在安装 CSI 驱动程序时进行设置。 + + +在需要访问持久存储的地方,受信任的管理员应创建 PersistentVolume,而受约束的用户应使用 +PersistentVolumeClaim 来访问该存储。 -Kubernetes DNS 除了在集群上调度 DNS Pod 和 Service, -还配置 kubelet 以告知各个容器使用 DNS Service 的 IP 来解析 DNS 名称。 +Kubernetes 发布有关 Pod 和 Service 的信息,这些信息被用来对 DNS 进行编程。 +Kubelet 配置 Pod 的 DNS,以便运行中的容器可以通过名称而不是 IP 来查找服务。 -集群中定义的每个 Service (包括 DNS 服务器自身)都被赋予一个 DNS 名称。 + +集群中定义的 Service 被赋予 DNS 名称。 默认情况下,客户端 Pod 的 DNS 搜索列表会包含 Pod 自身的名字空间和集群的默认域。 -DNS 查询可以使用 Pod 中的 `/etc/resolv.conf` 展开。kubelet 会为每个 Pod -生成此文件。例如,对 `data` 的查询可能被展开为 `data.test.svc.cluster.local`。 +DNS 查询可以使用 Pod 中的 `/etc/resolv.conf` 展开。 +Kubelet 为每个 Pod 配置此文件。 +例如,对 `data` 的查询可能被展开为 `data.test.svc.cluster.local`。 `search` 选项的取值会被用来展开查询。要进一步了解 DNS 查询,可参阅 [`resolv.conf` 手册页面](https://www.man7.org/linux/man-pages/man5/resolv.conf.5.html)。 @@ -91,10 +94,10 @@ options ndots:5 ``` -概括起来,名字空间 `test` 中的 Pod 可以成功地解析 `data.prod` 或者 +概括起来,名字空间 _test_ 中的 Pod 可以成功地解析 `data.prod` 或者 `data.prod.svc.cluster.local`。 @@ -143,40 +146,39 @@ selection from the set. #### A/AAAA 记录 {#a-aaaa-records} -“普通” Service(除了无头 Service)会以 `my-svc.my-namespace.svc.cluster-domain.example` -这种名字的形式被分配一个 DNS A 或 AAAA 记录,取决于 Service 的 IP 协议族。 +除了无头 Service 之外的 “普通” Service 会被赋予一个形如 `my-svc.my-namespace.svc.cluster-domain.example` +的 DNS A 和/或 AAAA 记录,取决于 Service 的 IP 协议族(可能有多个)设置。 该名称会解析成对应 Service 的集群 IP。 -“无头(Headless)” Service (没有集群 IP)也会以 -`my-svc.my-namespace.svc.cluster-domain.example` 这种名字的形式被指派一个 DNS A 或 AAAA 记录, -具体取决于 Service 的 IP 协议族。 +没有集群 IP 的[无头 Service](/zh-cn/docs/concepts/services-networking/service/#headless-services) +也会被赋予一个形如 `my-svc.my-namespace.svc.cluster-domain.example` 的 DNS A 和/或 AAAA 记录。 与普通 Service 不同,这一记录会被解析成对应 Service 所选择的 Pod IP 的集合。 客户端要能够使用这组 IP,或者使用标准的轮转策略从这组 IP 中进行选择。 #### SRV 记录 {#srv-records} -Kubernetes 根据普通 Service 或 -[Headless Service](/zh-cn/docs/concepts/services-networking/service/#headless-services) +Kubernetes 根据普通 Service 或无头 Service 中的命名端口创建 SRV 记录。每个命名端口, -SRV 记录格式为 `_my-port-name._my-port-protocol.my-svc.my-namespace.svc.cluster-domain.example`。 +SRV 记录格式为 `_port-name._port-protocol.my-svc.my-namespace.svc.cluster-domain.example`。 普通 Service,该记录会被解析成端口号和域名:`my-svc.my-namespace.svc.cluster-domain.example`。 无头 Service,该记录会被解析成多个结果,及该服务的每个后端 Pod 各一个 SRV 记录, -其中包含 Pod 端口号和格式为 `auto-generated-name.my-svc.my-namespace.svc.cluster-domain.example` +其中包含 Pod 端口号和格式为 `hostname.my-svc.my-namespace.svc.cluster-domain.example` 的域名。 + ## Pod +### Pod 的 hostname 和 subdomain 字段 {#pod-s-hostname-and-subdomain-fields} -The Pod spec also has an optional `subdomain` field which can be used to specify -its subdomain. For example, a Pod with `hostname` set to "`foo`", and `subdomain` -set to "`bar`", in namespace "`my-namespace`", will have the fully qualified -domain name (FQDN) "`foo.bar.my-namespace.svc.cluster-domain.example`". +当前,创建 Pod 时其主机名(从 Pod 内部观察)取自 Pod 的 `metadata.name` 值。 -Example: + -### Pod 的 hostname 和 subdomain 字段 {#pod-s-hostname-and-subdomain-fields} -当前,创建 Pod 时其主机名取自 Pod 的 `metadata.name` 值。 +Pod 规约中包含一个可选的 `hostname` 字段,可以用来指定一个不同的主机名。 +当这个字段被设置时,它将优先于 Pod 的名字成为该 Pod 的主机名(同样是从 Pod 内部观察)。 +举个例子,给定一个 `spec.hostname` 设置为 `“my-host”` 的 Pod, +该 Pod 的主机名将被设置为 `“my-host”`。 + + -Pod 规约中包含一个可选的 `hostname` 字段,可以用来指定 Pod 的主机名。 -当这个字段被设置时,它将优先于 Pod 的名字成为该 Pod 的主机名。 -举个例子,给定一个 `hostname` 设置为 "`my-host`" 的 Pod, -该 Pod 的主机名将被设置为 "`my-host`"。 +Pod 规约还有一个可选的 `subdomain` 字段,可以用来表明该 Pod 是名字空间的子组的一部分。 +举个例子,某 Pod 的 `spec.hostname` 设置为 `“foo”`,`spec.subdomain` 设置为 `“bar”`, +在名字空间 `“my-namespace”` 中,主机名称被设置成 `“foo”` 并且对应的完全限定域名(FQDN)为 +“`foo.bar.my-namespace.svc.cluster-domain.example`”(还是从 Pod 内部观察)。 -Pod 规约还有一个可选的 `subdomain` 字段,可以用来指定 Pod 的子域名。 -举个例子,某 Pod 的 `hostname` 设置为 “`foo`”,`subdomain` 设置为 “`bar`”, -在名字空间 “`my-namespace`” 中对应的完全限定域名(FQDN)为 -“`foo.bar.my-namespace.svc.cluster-domain.example`”。 + +如果 Pod 所在的名字空间中存在一个无头服务,其名称与子域相同, +则集群的 DNS 服务器还会为 Pod 的完全限定主机名返回 A 和/或 AAAA 记录。 示例: @@ -247,7 +264,7 @@ Pod 规约还有一个可选的 `subdomain` 字段,可以用来指定 Pod 的 apiVersion: v1 kind: Service metadata: - name: default-subdomain + name: busybox-subdomain spec: selector: name: busybox @@ -255,7 +272,6 @@ spec: ports: - name: foo # 实际上不需要指定端口号 port: 1234 - targetPort: 1234 --- apiVersion: v1 kind: Pod @@ -265,7 +281,7 @@ metadata: name: busybox spec: hostname: busybox-1 - subdomain: default-subdomain + subdomain: busybox-subdomain containers: - image: busybox:1.28 command: @@ -281,7 +297,7 @@ metadata: name: busybox spec: hostname: busybox-2 - subdomain: default-subdomain + subdomain: busybox-subdomain containers: - image: busybox:1.28 command: @@ -291,24 +307,16 @@ spec: ``` -如果某无头 Service 与某 Pod 在同一个名字空间中,且它们具有相同的子域名, -集群的 DNS 服务器也会为该 Pod 的全限定主机名返回 A 记录或 AAAA 记录。 -例如,在同一个名字空间中,给定一个主机名为 “busybox-1”、 -子域名设置为 “default-subdomain” 的 Pod,和一个名称为 “`default-subdomain`” -的无头 Service,Pod 将看到自己的 FQDN 为 -"`busybox-1.default-subdomain.my-namespace.svc.cluster-domain.example`"。 -DNS 会为此名字提供一个 A 记录或 AAAA 记录,指向该 Pod 的 IP。 -“`busybox1`” 和 “`busybox2`” 这两个 Pod 分别具有它们自己的 A 或 AAAA 记录。 +鉴于上述服务 `“busybox-subdomain”` 和将 `spec.subdomain` 设置为 `“busybox-subdomain”` 的 Pod, +第一个 Pod 将看到自己的 FQDN 为 `“busybox-1.busybox-subdomain.my-namespace.svc.cluster-domain.example”`。 +DNS 会为此名字提供一个 A 记录和/或 AAAA 记录,指向该 Pod 的 IP。 +Pod “`busybox1`” 和 “`busybox2`” 都将有自己的地址记录。 {{< note >}} -由于不是为 Pod 名称创建 A 或 AAAA 记录的,因此 Pod 的 A 或 AAAA 需要 `hostname`。 +由于 A 和 AAAA 记录不是基于 Pod 名称创建,因此需要设置了 `hostname` 才会生成 Pod 的 A 或 AAAA 记录。 没有设置 `hostname` 但设置了 `subdomain` 的 Pod 只会为 -无头 Service 创建 A 或 AAAA 记录(`default-subdomain.my-namespace.svc.cluster-domain.example`) +无头 Service 创建 A 或 AAAA 记录(`busybox-subdomain.my-namespace.svc.cluster-domain.example`) 指向 Pod 的 IP 地址。 -另外,除非在服务上设置了 `publishNotReadyAddresses=True`,否则只有 Pod 进入就绪状态 +另外,除非在服务上设置了 `publishNotReadyAddresses=True`,否则只有 Pod 准备就绪 才会有与之对应的记录。 {{< /note >}} @@ -341,12 +349,16 @@ record unless `publishNotReadyAddresses=True` is set on the Service. {{< feature-state for_k8s_version="v1.22" state="stable" >}} 当 Pod 配置为具有全限定域名 (FQDN) 时,其主机名是短主机名。 -例如,如果你有一个具有完全限定域名 `busybox-1.default-subdomain.my-namespace.svc.cluster-domain.example` 的 Pod, +例如,如果你有一个具有完全限定域名 `busybox-1.busybox-subdomain.my-namespace.svc.cluster-domain.example` 的 Pod, 则默认情况下,该 Pod 内的 `hostname` 命令返回 `busybox-1`,而 `hostname --fqdn` 命令返回 FQDN。 当你在 Pod 规约中设置了 `setHostnameAsFQDN: true` 时,kubelet 会将 Pod @@ -526,7 +538,7 @@ options ndots:2 edns0 ``` 对于 IPv6 设置,搜索路径和名称服务器应按以下方式设置: diff --git a/content/zh-cn/docs/concepts/services-networking/ingress-controllers.md b/content/zh-cn/docs/concepts/services-networking/ingress-controllers.md index 599190fec29fa..36ea9abd1182e 100644 --- a/content/zh-cn/docs/concepts/services-networking/ingress-controllers.md +++ b/content/zh-cn/docs/concepts/services-networking/ingress-controllers.md @@ -5,7 +5,7 @@ description: >- 必须有一个 Ingress 控制器正在运行。你需要选择至少一个 Ingress 控制器并确保其已被部署到你的集群中。 本页列出了你可以部署的常见 Ingress 控制器。 content_type: concept -weight: 30 +weight: 50 --- @@ -136,6 +136,7 @@ Kubernetes 作为一个项目,目前支持和维护 * [Tyk Operator](https://github.com/TykTechnologies/tyk-operator) extends Ingress with Custom Resources to bring API Management capabilities to Ingress. Tyk Operator works with the Open Source Tyk Gateway & Tyk Cloud control plane. * [Voyager](https://appscode.com/products/voyager) is an ingress controller for [HAProxy](https://www.haproxy.org/#desc). +* [Wallarm Ingress Controller](https://www.wallarm.com/solutions/waf-for-kubernetes) is an Ingress Controller that provides WAAP (WAF) and API Security capabilities. --> * [Traefik Kubernetes Ingress 提供程序](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) 是一个用于 [Traefik](https://traefik.io/traefik/) 代理的 Ingress 控制器。 @@ -144,6 +145,8 @@ Kubernetes 作为一个项目,目前支持和维护 使用开源的 Tyk Gateway & Tyk Cloud 控制面。 * [Voyager](https://appscode.com/products/voyager) 是一个针对 [HAProxy](https://www.haproxy.org/#desc) 的 Ingress 控制器。 +* [Wallarm Ingress Controller](https://www.wallarm.com/solutions/waf-for-kubernetes) 是提供 WAAP(WAF) + 和 API 安全功能的 Ingress Controller。 你可以使用 [Ingress 类](/zh-cn/docs/concepts/services-networking/ingress/#ingress-class)在集群中部署任意数量的 diff --git a/content/zh-cn/docs/concepts/services-networking/windows-networking.md b/content/zh-cn/docs/concepts/services-networking/windows-networking.md index ea2df2e8154ec..9feb413991746 100644 --- a/content/zh-cn/docs/concepts/services-networking/windows-networking.md +++ b/content/zh-cn/docs/concepts/services-networking/windows-networking.md @@ -1,7 +1,7 @@ --- title: Windows 网络 content_type: concept -weight: 75 +weight: 110 --- ## 在你开始之前 {#before-you-begin} -* 创建一个 Kubernetes 集群,其中包含一个控制平面和一个[运行 Windows Server 的工作节点](/zh-cn/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/) +* 创建一个 Kubernetes 集群,其中包含一个控制平面和一个运行 Windows Server 的工作节点。 * 务必请注意,在 Kubernetes 上创建和部署服务和工作负载的行为方式与 Linux 和 Windows 容器的行为方式大致相同。 与集群交互的 [kubectl 命令](/zh-cn/docs/reference/kubectl/)是一致的。 下一小节的示例旨在帮助你快速开始使用 Windows 容器。 @@ -164,7 +163,7 @@ port 80 of the container directly to the Service. 命令进入容器,并在 Pod 之间(以及跨主机,如果你有多个 Windows 节点)相互进行 ping 操作。 * Service 到 Pod 的通信,在 Linux 控制平面所在的节点以及独立的 Pod 中执行 `curl` 命令来访问虚拟的服务 IP(在 `kubectl get services` 命令下查看)。 - * 服务发现,执行 `curl` 命令来访问带有 Kubernetes + * 服务发现,执行 `curl` 命令来访问带有 Kubernetes [默认 DNS 后缀](/zh-cn/docs/concepts/services-networking/dns-pod-service/#services)的服务名称。 * 入站连接,在 Linux 控制平面所在的节点上或集群外的机器上执行 `curl` 命令来访问 NodePort 服务。 * 出站连接,使用 `kubectl exec`,从 Pod 内部执行 `curl` 访问外部 IP。 @@ -242,7 +241,8 @@ Windows 容器工作负载可以配置为使用组托管服务帐户(Group Man 组托管服务帐户是一种特定类型的活动目录(Active Directory)帐户,可提供自动密码管理、 简化的服务主体名称(Service Principal Name,SPN)管理,以及将管理委派给多个服务器上的其他管理员的能力。 配置了 GMSA 的容器可以携带使用 GMSA 配置的身份访问外部活动目录域资源。 -在[此处](/zh-cn/docs/tasks/configure-pod-container/configure-gmsa/)了解有关为 Windows 容器配置和使用 GMSA 的更多信息。 +在[此处](/zh-cn/docs/tasks/configure-pod-container/configure-gmsa/)了解有关为 Windows +容器配置和使用 GMSA 的更多信息。 **CronJob** 创建基于时隔重复调度的 {{< glossary_tooltip term_id="job" text="Job" >}}。 -一个 CronJob 对象就像 **crontab** (cron table) 文件中的一行。 -它用 [Cron](https://en.wikipedia.org/wiki/Cron) 格式进行编写, +CronJob 用于执行排期操作,例如备份、生成报告等。 +一个 CronJob 对象就像 Unix 系统上的 **crontab**(cron table)文件中的一行。 +它用 [Cron](https://zh.wikipedia.org/wiki/Cron) 格式进行编写, 并周期性地在给定的调度时间执行 Job。 -{{< caution >}} - -所有 **CronJob** 的 `schedule:` 时间都是基于 -{{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}} -的时区。 - -如果你的控制平面在 Pod 或是裸容器中运行了 kube-controller-manager, -那么为该容器所设置的时区将会决定 Cron Job 的控制器所使用的时区。 -{{< /caution >}} - -{{< caution >}} -如 [v1 CronJob API](/zh-cn/docs/reference/kubernetes-api/workload-resources/cron-job-v1/) 所述,官方并不支持设置时区。 - -Kubernetes 项目官方并不支持设置如 `CRON_TZ` 或者 `TZ` 等变量。 -`CRON_TZ` 或者 `TZ` 是用于解析和计算下一个 Job 创建时间所使用的内部库中一个实现细节。 -不建议在生产集群中使用它。 -{{< /caution>}} +CronJob 有所限制,也比较特殊。 +例如在某些情况下,单个 CronJob 可以创建多个并发任务。 +请参阅下面的[限制](#cron-job-limitations)。 -## CronJob {#cronjob} - -CronJob 用于执行周期性的动作,例如备份、报告生成等。 -这些任务中的每一个都应该配置为周期性重复的(例如:每天/每周/每月一次); -你可以定义任务开始执行的时间间隔。 - - -### 示例 {#example} +## 示例 {#example} 下面的 CronJob 示例清单会在每分钟打印出当前时间和问候消息: {{< codenew file="application/job/cronjob.yaml" >}} + [使用 CronJob 运行自动化任务](/zh-cn/docs/tasks/job/automated-tasks-with-cron-jobs/)一文会为你详细讲解此例。 +## 编写 CronJob 声明信息 {#writing-a-cronjob-spec} + ### Cron 时间表语法 {#cron-schedule-syntax} +`.spec.schedule` 字段是必需的。该字段的值遵循 [Cron](https://zh.wikipedia.org/wiki/Cron) 语法: + + ``` # ┌───────────── 分钟 (0 - 59) # │ ┌───────────── 小时 (0 - 23) @@ -129,6 +116,44 @@ This example CronJob manifest prints the current time and a hello message every # * * * * * ``` + +例如 `0 0 13 * 5` 表示此任务必须在每个星期五的午夜以及每个月的 13 日的午夜开始。 + + +该格式也包含了扩展的 “Vixie cron” 步长值。 +[FreeBSD 手册](https://www.freebsd.org/cgi/man.cgi?crontab%285%29)中解释如下: + + +> 步长可被用于范围组合。范围后面带有 `/<数字>` 可以声明范围内的步幅数值。 +> 例如,`0-23/2` 可被用在小时字段来声明命令在其他数值的小时数执行 +> (V7 标准中对应的方法是 `0,2,4,6,8,10,12,14,16,18,20,22`)。 +> 步长也可以放在通配符后面,因此如果你想表达 “每两小时”,就用 `*/2` 。 + +{{< note >}} + +时间表中的问号 (`?`) 和星号 `*` 含义相同,它们用来表示给定字段的任何可用值。 +{{< /note >}} + + +除了标准语法,还可以使用一些类似 `@monthly` 的宏: + -例如,下面这行指出必须在每个星期五的午夜以及每个月 13 号的午夜开始任务: +为了生成 CronJob 时间表的表达式,你还可以使用 [crontab.guru](https://crontab.guru/) 这类 Web 工具。 -`0 0 13 * 5` + +### 任务模板 {#job-template} - -要生成 CronJob 时间表表达式,你还可以使用 [crontab.guru](https://crontab.guru/) 之类的 Web 工具。 +### 任务延迟开始的最后期限 {#starting-deadline} + +`.spec.startingDeadlineSeconds` 字段是可选的。 +它表示任务如果由于某种原因错过了调度时间,开始该任务的截止时间的秒数。 + +过了截止时间,CronJob 就不会开始该任务的实例(未来的任务仍在调度之中)。 +例如,如果你有一个每天运行两次的备份任务,你可能会允许它最多延迟 8 小时开始,但不能更晚, +因为更晚进行的备份将变得没有意义:你宁愿等待下一次计划的运行。 + + +对于错过已配置的最后期限的 Job,Kubernetes 将其视为失败的任务。 +如果你没有为 CronJob 指定 `startingDeadlineSeconds`,那 Job 就没有最后期限。 + +如果 `.spec.startingDeadlineSeconds` 字段被设置(非空), +CronJob 控制器将会计算从预期创建 Job 到当前时间的时间差。 +如果时间差大于该限制,则跳过此次执行。 + +例如,如果将其设置为 `200`,则 Job 控制器允许在实际调度之后最多 200 秒内创建 Job。 + + +### 并发性规则 {#concurrency-policy} + +`.spec.concurrencyPolicy` 也是可选的。它声明了 CronJob 创建的任务执行时发生重叠如何处理。 +spec 仅能声明下列规则中的一种: + +* `Allow`(默认):CronJob 允许并发任务执行。 +* `Forbid`: CronJob 不允许并发任务执行;如果新任务的执行时间到了而老任务没有执行完,CronJob 会忽略新任务的执行。 +* `Replace`:如果新任务的执行时间到了而老任务没有执行完,CronJob 会用新任务替换当前正在运行的任务。 + +请注意,并发性规则仅适用于相同 CronJob 创建的任务。如果有多个 CronJob,它们相应的任务总是允许并发执行的。 + + +### 调度挂起 {#schedule-suspension} + +通过将可选的 `.spec.suspend` 字段设置为 `true`,可以挂起针对 CronJob 执行的任务。 + +这个设置**不**会影响 CronJob 已经开始的任务。 + + +如果你将此字段设置为 `true`,后续发生的执行都会被挂起 +(这些任务仍然在调度中,但 CronJob 控制器不会启动这些 Job 来运行任务),直到你取消挂起 CronJob 为止。 + +{{< caution >}} + +在调度时间内挂起的执行都会被统计为错过的任务。当现有的 CronJob 将 `.spec.suspend` 从 `true` 改为 `false` 时, +且没有[开始的最后期限](#starting-deadline),错过的任务会被立即调度。 +{{< /caution >}} + + +### 任务历史限制 {#jobs-history-limits} + +`.spec.successfulJobsHistoryLimit` 和 `.spec.failedJobsHistoryLimit` 字段是可选的。 +这两个字段指定应保留多少已完成和失败的任务。 +默认设置分别为 3 和 1。将限制设置为 `0` 代表相应类型的任务完成后不会保留。 + +有关自动清理任务的其他方式, +请参见[自动清理完成的 Job](/zh-cn/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically)。 ## 时区 {#time-zones} -对于没有指定时区的 CronJob,kube-controller-manager 基于本地时区解释排期表(Schedule)。 +对于没有指定时区的 CronJob, +{{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}} +基于本地时区解释排期表(Schedule)。 {{< feature-state for_k8s_version="v1.25" state="beta" >}} @@ -174,10 +335,8 @@ you can specify a time zone for a CronJob (if you don't enable that feature gate Kubernetes that does not have experimental time zone support, all CronJobs in your cluster have an unspecified timezone). -When you have the feature enabled, you can set `spec.timeZone` to the name of a valid [time zone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). For example, setting -`spec.timeZone: "Etc/UTC"` instructs Kubernetes to interpret the schedule relative to Coordinated Universal Time. - -A time zone database from the Go standard library is included in the binaries and used as a fallback in case an external database is not available on the system. +When you have the feature enabled, you can set `.spec.timeZone` to the name of a valid [time zone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). For example, setting +`.spec.timeZone: "Etc/UTC"` instructs Kubernetes to interpret the schedule relative to Coordinated Universal Time. --> 如果启用了 `CronJobTimeZone` [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/), 你可以为 CronJob 指定一个时区(如果你没有启用该特性门控,或者你使用的是不支持试验性时区功能的 @@ -187,17 +346,63 @@ Kubernetes 版本,集群中所有 CronJob 的时区都是未指定的)。 设置为有效[时区](https://zh.wikipedia.org/wiki/%E6%97%B6%E5%8C%BA%E4%BF%A1%E6%81%AF%E6%95%B0%E6%8D%AE%E5%BA%93)名称。 例如,设置 `spec.timeZone: "Etc/UTC"` 指示 Kubernetes 采用 UTC 来解释排期表。 +{{< caution >}} + +Kubernetes {{< skew currentVersion >}} 中 CronJob API 的实现允许你设置 +`.spec.schedule` 字段以包含时区;例如:`CRON_TZ=UTC * * * * *` 或 `TZ=UTC * * * * *`。 + +以这种方式指定时区是**未正式支持**(而且从来没有)。 + +如果你尝试设置包含 `TZ` 或 `CRON_TZ` 时区规范的排期表, +Kubernetes 会向客户端报告[警告](/zh-cn/blog/2020/09/03/warnings/)。 +Kubernetes 的未来版本可能根本不会实现这种非正式的时区机制。 +{{< /caution >}} + + Go 标准库中的时区数据库包含在二进制文件中,并用作备用数据库,以防系统上没有可用的外部数据库。 ## CronJob 限制 {#cronjob-limitations} +### 修改 CronJob {#modifying-a-cronjob} + +按照设计,CronJob 包含一个用于**新** Job 的模板。 +如果你修改现有的 CronJob,你所做的更改将应用于修改完成后开始运行的新任务。 +已经开始的任务(及其 Pod)将继续运行而不会发生任何变化。 +也就是说,CronJob **不** 会更新现有任务,即使这些任务仍在运行。 + + +### Job 创建 {#job-creation} + CronJob 根据其计划编排,在每次该执行任务的时候大约会创建一个 Job。 我们之所以说 "大约",是因为在某些情况下,可能会创建两个 Job,或者不会创建任何 Job。 我们试图使这些情况尽量少发生,但不能完全杜绝。因此,Job 应该是 **幂等的**。 @@ -271,50 +476,24 @@ the Job in turn is responsible for the management of the Pods it represents. --> CronJob 仅负责创建与其调度时间相匹配的 Job,而 Job 又负责管理其代表的 Pod。 - -## 控制器版本 {#new-controller} - -从 Kubernetes v1.21 版本开始,CronJob 控制器的第二个版本被用作默认实现。 -要禁用此默认 CronJob 控制器而使用原来的 CronJob 控制器,请在 -{{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}} -中设置[特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/) -`CronJobControllerV2`,将此标志设置为 `false`。例如: - -``` ---feature-gates="CronJobControllerV2=false" -``` - ## {{% heading "whatsnext" %}} * 了解 CronJob 所依赖的 [Pod](/zh-cn/docs/concepts/workloads/pods/) 与 [Job](/zh-cn/docs/concepts/workloads/controllers/job/) 的概念。 -* 阅读 CronJob `.spec.schedule` 字段的[格式](https://pkg.go.dev/github.com/robfig/cron/v3#hdr-CRON_Expression_Format)。 -* 有关创建和使用 CronJob 的说明及示例规约文件, +* 阅读 CronJob `.spec.schedule` 字段的详细[格式](https://pkg.go.dev/github.com/robfig/cron/v3#hdr-CRON_Expression_Format)。 +* 有关创建和使用 CronJob 的说明及 CronJob 清单的示例, 请参见[使用 CronJob 运行自动化任务](/zh-cn/docs/tasks/job/automated-tasks-with-cron-jobs/)。 -* 有关自动清理失败或完成作业的说明,请参阅[自动清理作业](/zh-cn/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) * `CronJob` 是 Kubernetes REST API 的一部分, - 阅读 {{< api-reference page="workload-resources/cron-job-v1" >}} - 对象定义以了解关于该资源的 API。 + 阅读 {{< api-reference page="workload-resources/cron-job-v1" >}} API 参考了解更多细节。 diff --git a/content/zh-cn/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/zh-cn/docs/concepts/workloads/controllers/ttlafterfinished.md index 7887151b9aa82..afea6daeccaf6 100644 --- a/content/zh-cn/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/zh-cn/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -2,11 +2,17 @@ title: 已完成 Job 的自动清理 content_type: concept weight: 70 +description: >- + 一种用于清理已完成执行的旧 Job 的 TTL 机制。 --- @@ -14,101 +20,123 @@ weight: 70 {{< feature-state for_k8s_version="v1.23" state="stable" >}} -TTL-after-finished {{}} 提供了一种 TTL 机制来限制已完成执行的资源对象的生命周期。 -TTL 控制器目前只处理 {{< glossary_tooltip text="Job" term_id="job" >}}。 +当你的 Job 已结束时,将 Job 保留在 API 中(而不是立即删除 Job)很有用, +这样你就可以判断 Job 是成功还是失败。 +Kubernetes TTL-after-finished {{}}提供了一种 +TTL 机制来限制已完成执行的 Job 对象的生命期。 -## TTL-after-finished 控制器 +## 清理已完成的 Job {#cleanup-for-finished-jobs} -TTL-after-finished 控制器只支持 Job。集群操作员可以通过指定 Job 的 `.spec.ttlSecondsAfterFinished` -字段来自动清理已结束的作业(`Complete` 或 `Failed`),如 -[示例](/zh-cn/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) -所示。 +TTL-after-finished 控制器只支持 Job。你可以通过指定 Job 的 `.spec.ttlSecondsAfterFinished` +字段来自动清理已结束的 Job(`Complete` 或 `Failed`), +如[示例](/zh-cn/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically)所示。 -TTL-after-finished 控制器假设作业能在执行完成后的 TTL 秒内被清理,也就是当 TTL 过期后。 -当 TTL 控制器清理作业时,它将做级联删除操作,即删除资源对象的同时也删除其依赖对象。 -注意,当资源被删除时,由该资源的生命周期保证其终结器(Finalizers)等被执行。 +TTL-after-finished 控制器假设 Job 能在执行完成后的 TTL 秒内被清理。一旦 Job +的状态条件发生变化表明该 Job 是 `Complete` 或 `Failed`,计时器就会启动;一旦 TTL 已过期,该 Job +就能被[级联删除](/zh-cn/docs/concepts/architecture/garbage-collection/#cascading-deletion)。 +当 TTL 控制器清理作业时,它将做级联删除操作,即删除 Job 的同时也删除其依赖对象。 -可以随时设置 TTL 秒。以下是设置 Job 的 `.spec.ttlSecondsAfterFinished` 字段的一些示例: +Kubernetes 尊重 Job 对象的生命周期保证,例如等待 +[Finalizer](/zh-cn/docs/concepts/overview/working-with-objects/finalizers/)。 + +你可以随时设置 TTL 秒。以下是设置 Job 的 `.spec.ttlSecondsAfterFinished` 字段的一些示例: +* 在 Job 清单(manifest)中指定此字段,以便 Job 在完成后的某个时间被自动清理。 +* 手动设置现有的、已完成的 Job 的此字段,以便这些 Job 可被清理。 +* 在创建 Job 时使用[修改性质的准入 Webhook](/zh-cn/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) + 动态设置该字段。集群管理员可以使用它对已完成的作业强制执行 TTL 策略。 + -* 在作业清单(manifest)中指定此字段,以便 Job 在完成后的某个时间被自动清除。 -* 将此字段设置为现有的、已完成的作业,以采用此新功能。 -* 在创建作业时使用 [mutating admission webhook](/zh-cn/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) - 动态设置该字段。集群管理员可以使用它对完成的作业强制执行 TTL 策略。 -* 使用 [mutating admission webhook](/zh-cn/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) - 在作业完成后动态设置该字段,并根据作业状态、标签等选择不同的 TTL 值。 +* 使用[修改性质的准入 Webhook](/zh-cn/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) + 在 Job 完成后动态设置该字段,并根据 Job 状态、标签等选择不同的 TTL 值。 + 对于这种情况,Webhook 需要检测 Job 的 `.status` 变化,并且仅在 Job 被标记为已完成时设置 TTL。 +* 编写你自己的控制器来管理与特定{{< glossary_tooltip term_id="selector" text="选择算符" >}}匹配的 + Job 的清理 TTL。 -## 警告 +## 警告 {#caveats} -### 更新 TTL 秒数 +### 更新已完成 Job 的 TTL {#updating-ttl-for-finished-jobs} -请注意,在创建 Job 或已经执行结束后,仍可以修改其 TTL 周期,例如 Job 的 +在创建 Job 或已经执行结束后,你仍可以修改其 TTL 周期,例如 Job 的 `.spec.ttlSecondsAfterFinished` 字段。 -但是一旦 Job 变为可被删除状态(当其 TTL 已过期时),即使你通过 API 增加其 TTL -时长得到了成功的响应,系统也不保证 Job 将被保留。 +如果你在当前 `ttlSecondsAfterFinished` 时长已过期后延长 TTL 周期, +即使延长 TTL 的更新得到了成功的 API 响应,Kubernetes 也不保证保留此 Job, ### 时间偏差 {#time-skew} -由于 TTL-after-finished 控制器使用存储在 Kubernetes 资源中的时间戳来确定 TTL 是否已过期, -因此该功能对集群中的时间偏差很敏感,这可能导致 TTL-after-finished 控制器在错误的时间清理资源对象。 +由于 TTL-after-finished 控制器使用存储在 Kubernetes Job 中的时间戳来确定 TTL 是否已过期, +因此该功能对集群中的时间偏差很敏感,这可能导致控制平面在错误的时间清理 Job 对象。 -* [自动清理 Job](/zh-cn/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) -* [设计文档](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/592-ttl-after-finish/README.md) +* 阅读[自动清理 Job](/zh-cn/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) + +* 参阅 [Kubernetes 增强提案](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/592-ttl-after-finish/README.md) + (KEP) 了解此机制的演进过程。 diff --git a/content/zh-cn/docs/concepts/workloads/pods/user-namespaces.md b/content/zh-cn/docs/concepts/workloads/pods/user-namespaces.md new file mode 100644 index 0000000000000..3cda1c0bf9ab6 --- /dev/null +++ b/content/zh-cn/docs/concepts/workloads/pods/user-namespaces.md @@ -0,0 +1,297 @@ +--- +title: 用户命名空间 +content_type: concept +weight: 160 +min-kubernetes-server-version: v1.25 +--- + + + +{{< feature-state for_k8s_version="v1.25" state="alpha" >}} + +本页解释了在 Kubernetes pods 中如何使用用户命名空间。 +用户命名空间允许将容器内运行的用户与主机内的用户隔离开来。 + +在容器中以 root 身份运行的进程可以在主机中以不同的(非 root)用户身份运行; +换句话说,该进程在用户命名空间内的操作具有完全的权限, +但在命名空间外的操作是无特权的。 + + +你可以使用这个功能来减少被破坏的容器对主机或同一节点中的其他 Pod 的破坏。 +有[几个安全漏洞][KEP-vulns]被评为 **高** 或 **重要**, +当用户命名空间处于激活状态时,这些漏洞是无法被利用的。 +预计用户命名空间也会减轻一些未来的漏洞。 + +[KEP-vulns]: https://github.com/kubernetes/enhancements/tree/217d790720c5aef09b8bd4d6ca96284a0affe6c2/keps/sig-node/127-user-namespaces#motivation + + +## {{% heading "prerequisites" %}} + +{{% thirdparty-content single="true" %}} + + + + +这是一个只对 Linux 有效的功能特性。此外,需要在{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}提供支持, +才能在 Kubernetes 无状态 Pod 中使用这一功能: + +* CRI-O:v1.25 版已经支持用户命名空间。 +* containerd:计划在 1.7 版本中支持。更多细节请参见 containerd 问题 [#7063][containerd-userns-issue]。 + +目前 [cri-dockerd 没有计划][CRI-dockerd-issue]支持此功能。 + +[CRI-dockerd-issue]: https://github.com/Mirantis/cri-dockerd/issues/74 +[containerd-userns-issue]: https://github.com/containerd/containerd/issues/7063 + + +## 介绍 {#introduction} + + +用户命名空间是一个 Linux 功能,允许将容器中的用户映射到主机中的不同用户。 +此外,在某用户命名空间中授予 Pod 的权能只在该命名空间中有效,在该命名空间之外无效。 + +一个 Pod 可以通过将 `pod.spec.hostUsers` 字段设置为 `false` 来选择使用用户命名空间。 + + +kubelet 将挑选 Pod 所映射的主机 UID/GID, +并将以保证同一节点上没有两个无状态 Pod 使用相同的映射的方式进行。 + +`pod.spec` 中的 `runAsUser`、`runAsGroup`、`fsGroup` 等字段总是指的是容器内的用户。 +启用该功能时,有效的 UID/GID 在 0-65535 范围内。这以限制适用于文件和进程(`runAsUser`、`runAsGroup` 等)。 + + +使用这个范围之外的 UID/GID 的文件将被视为属于溢出 ID, +通常是 65534(配置在 `/proc/sys/kernel/overflowuid和/proc/sys/kernel/overflowgid`)。 +然而,即使以 65534 用户/组的身份运行,也不可能修改这些文件。 + +大多数需要以 root 身份运行但不访问其他主机命名空间或资源的应用程序, +在用户命名空间被启用时,应该可以继续正常运行,不需要做任何改变。 + + +## 了解无状态 Pod 的用户命名空间 {#understanding-user-namespaces-for-stateless-pods} + + +一些容器运行时的默认配置(如 Docker Engine、containerd、CRI-O)使用 Linux 命名空间进行隔离。 +其他技术也存在,也可以与这些运行时(例如,Kata Containers 使用虚拟机而不是 Linux 命名空间)结合使用。 +本页适用于使用 Linux 命名空间进行隔离的容器运行时。 + +在创建 Pod 时,默认情况下会使用几个新的命名空间进行隔离: +一个网络命名空间来隔离容器网络,一个 PID 命名空间来隔离进程视图等等。 +如果使用了一个用户命名空间,这将把容器中的用户与节点中的用户隔离开来。 + + +这意味着容器可以以 root 身份运行,并将该身份映射到主机上的一个非 root 用户。 +在容器内,进程会认为它是以 root 身份运行的(因此像 `apt`、`yum` 等工具可以正常工作), +而实际上该进程在主机上没有权限。 +你可以验证这一点,例如,如果你从主机上执行 `ps aux` 来检查容器进程是以哪个用户运行的。 +`ps` 显示的用户与你在容器内执行 `id` 命令时看到的用户是不一样的。 + +这种抽象限制了可能发生的情况,例如,容器设法逃逸到主机上时的后果。 +鉴于容器是作为主机上的一个非特权用户运行的,它能对主机做的事情是有限的。 + + +此外,由于每个 Pod 上的用户将被映射到主机中不同的非重叠用户, +他们对其他 Pod 可以执行的操作也是有限的。 + +授予一个 Pod 的权能也被限制在 Pod 的用户命名空间内, +并且在这一命名空间之外大多无效,有些甚至完全无效。这里有两个例子: + + - `CAP_SYS_MODULE` 若被授予一个使用用户命名空间的 Pod 则没有任何效果,这个 Pod 不能加载内核模块。 + - `CAP_SYS_ADMIN` 只限于 Pod 所在的用户命名空间,在该命名空间之外无效。 + + +在不使用用户命名空间的情况下,以 root 账号运行的容器,在容器逃逸时,在节点上有 root 权限。 +而且如果某些权能被授予了某容器,这些权能在宿主机上也是有效的。 +当我们使用用户命名空间时,这些都不再成立。 + +如果你想知道关于使用用户命名空间时的更多变化细节,请参见 `man 7 user_namespaces`。 + + +## 设置一个节点以支持用户命名空间 {#set-up-a-node-to-support-user-namespaces} + + +建议主机的文件和主机的进程使用 0-65535 范围内的 UID/GID。 + +kubelet 会把高于这个范围的 UID/GID 分配给 Pod。 +因此,为了保证尽可能多的隔离,主机的文件和主机的进程所使用的 UID/GID 应该在 0-65535 范围内。 + +请注意,这个建议对减轻 [CVE-2021-25741][CVE-2021-25741] 等 CVE 的影响很重要; +在这些 CVE 中,Pod 有可能读取主机中的任意文件。 +如果 Pod 和主机的 UID/GID 不重叠,Pod 能够做的事情就会受到限制: +Pod的 UID/GID 不会与主机的文件所有者/组相匹配。 + +[CVE-2021-25741]: https://github.com/kubernetes/kubernetes/issues/104980 + + +## 限制 {#limitations} + +当 Pod 使用用户命名空间时,不允许 Pod 使用其他主机命名空间。 +特别是,如果你设置了 `hostUsers: false`,那么你就不可以设置如下属性: + + * `hostNetwork: true` + * `hostIPC: true` + * `hostPID: true` + + +Pod 完全不使用卷是被允许的;如果使用卷,只允许使用以下卷类型: + + * configmap + * secret + * projected + * downwardAPI + * emptyDir + + +为了保证 Pod 可以读取这些卷中的文件,卷的创建操作就像你为 Pod 指定了 `.spec.securityContext.fsGroup` 为 `0` 一样。 +如果该属性被设定为不同值,那么这个不同值当然也会被使用。 + +作为一个副产品,这些卷的文件夹和文件将具有所给组的权限, +即使 `defaultMode` 或 volumes 的特定项目的 `mode` 被指定为没有组的权限。 +例如,不可以在挂载这些卷时使其文件只允许所有者访问。 \ No newline at end of file diff --git a/content/zh-cn/docs/reference/access-authn-authz/authentication.md b/content/zh-cn/docs/reference/access-authn-authz/authentication.md index 12b16a784a85e..7767df5846265 100644 --- a/content/zh-cn/docs/reference/access-authn-authz/authentication.md +++ b/content/zh-cn/docs/reference/access-authn-authz/authentication.md @@ -275,9 +275,9 @@ Authorization: Bearer 781292.db7bc3a58fc5f07e 你必须在 API 服务器上设置 `--enable-bootstrap-token-auth` 标志来启用基于启动引导令牌的身份认证组件。 @@ -495,26 +495,26 @@ sequenceDiagram {{< /mermaid >}} -1. 登录到你的身份服务(Identity Provider) -2. 你的身份服务将为你提供 `access_token`、`id_token` 和 `refresh_token` -3. 在使用 `kubectl` 时,将 `id_token` 设置为 `--token` 标志值,或者将其直接添加到 - `kubeconfig` 中 -4. `kubectl` 将你的 `id_token` 放到一个称作 `Authorization` 的头部,发送给 API 服务器 -5. API 服务器将负责通过检查配置中引用的证书来确认 JWT 的签名是合法的 -6. 检查确认 `id_token` 尚未过期 -7. 确认用户有权限执行操作 -8. 鉴权成功之后,API 服务器向 `kubectl` 返回响应 -9. `kubectl` 向用户提供反馈信息 +1. Login to your identity provider +2. Your identity provider will provide you with an `access_token`, `id_token` and a `refresh_token` +3. When using `kubectl`, use your `id_token` with the `--token` flag or add it directly to your `kubeconfig` +4. `kubectl` sends your `id_token` in a header called Authorization to the API server +5. The API server will make sure the JWT signature is valid by checking against the certificate named in the configuration +6. Check to make sure the `id_token` hasn't expired +7. Make sure the user is authorized +8. Once authorized the API server returns a response to `kubectl` +9. `kubectl` provides feedback to the user +--> +1. 登录到你的身份服务(Identity Provider) +2. 你的身份服务将为你提供 `access_token`、`id_token` 和 `refresh_token` +3. 在使用 `kubectl` 时,将 `id_token` 设置为 `--token` 标志值,或者将其直接添加到 + `kubeconfig` 中 +4. `kubectl` 将你的 `id_token` 放到一个称作 `Authorization` 的头部,发送给 API 服务器 +5. API 服务器将负责通过检查配置中引用的证书来确认 JWT 的签名是合法的 +6. 检查确认 `id_token` 尚未过期 +7. 确认用户有权限执行操作 +8. 鉴权成功之后,API 服务器向 `kubectl` 返回响应 +9. `kubectl` 向用户提供反馈信息 ### Webhook 令牌身份认证 {#webhook-token-authentication} @@ -744,6 +746,9 @@ Webhook 身份认证是一种用来验证持有者令牌的回调机制。 其中描述如何访问远程的 Webhook 服务。 * `--authentication-token-webhook-cache-ttl` 用来设定身份认证决定的缓存时间。 默认时长为 2 分钟。 +* `--authentication-token-webhook-version` 决定是使用 `authentication.k8s.io/v1beta1` 还是 + `authenticationk8s.io/v1` 版本的 `TokenReview` 对象从 webhook 发送/接收信息。 + 默认为“v1beta1”。 +## 为客户端提供的对身份验证信息的 API 访问 {#self-subject-review} + +{{< feature-state for_k8s_version="v1.26" state="alpha" >}} + + +如果集群启用了此 API,你可以使用 `SelfSubjectReview` API 来了解 Kubernetes +集群如何映射你的身份验证信息从而将你识别为某客户端。无论你是作为用户(通常代表一个真的人)还是作为 +ServiceAccount 进行身份验证,这一 API 都可以使用。 + +`SelfSubjectReview` 对象没有任何可配置的字段。 +Kubernetes API 服务器收到请求后,将使用用户属性填充 status 字段并将其返回给用户。 + +请求示例(主体将是 `SelfSubjectReview`): + +``` +POST /apis/authentication.k8s.io/v1alpha1/selfsubjectreviews +``` + +```json +{ + "apiVersion": "authentication.k8s.io/v1alpha1", + "kind": "SelfSubjectReview" +} +``` + + +响应示例: + +```json +{ + "apiVersion": "authentication.k8s.io/v1alpha1", + "kind": "SelfSubjectReview", + "status": { + "userInfo": { + "name": "jane.doe", + "uid": "b6c7cfd4-f166-11ec-8ea0-0242ac120002", + "groups": [ + "viewers", + "editors", + "system:authenticated" + ], + "extra": { + "provider_id": ["token.company.example"] + } + } + } +} +``` + + +为了方便,Kubernetes 提供了 `kubectl alpha auth whoami` 命令。 +执行此命令将产生以下输出(但将显示不同的用户属性): + +* 简单的输出示例 + + ``` + ATTRIBUTE VALUE + Username jane.doe + Groups [system:authenticated] + ``` + + +* 包括额外属性的复杂示例 + + ``` + ATTRIBUTE VALUE + Username jane.doe + UID b79dbf30-0c6a-11ed-861d-0242ac120002 + Groups [students teachers system:authenticated] + Extra: skills [reading learning] + Extra: subjects [math sports] + ``` + + +通过提供 output 标志,也可以打印结果的 JSON 或 YAML 表现形式: + +{{< tabs name="self_subject_attributes_review_Example_1" >}} +{{% tab name="JSON" %}} +```json +{ + "apiVersion": "authentication.k8s.io/v1alpha1", + "kind": "SelfSubjectReview", + "status": { + "userInfo": { + "username": "jane.doe", + "uid": "b79dbf30-0c6a-11ed-861d-0242ac120002", + "groups": [ + "students", + "teachers", + "system:authenticated" + ], + "extra": { + "skills": [ + "reading", + "learning" + ], + "subjects": [ + "math", + "sports" + ] + } + } + } +} +``` +{{% /tab %}} + +{{% tab name="YAML" %}} +```yaml +apiVersion: authentication.k8s.io/v1alpha1 +kind: SelfSubjectReview +status: + userInfo: + username: jane.doe + uid: b79dbf30-0c6a-11ed-861d-0242ac120002 + groups: + - students + - teachers + - system:authenticated + extra: + skills: + - reading + - learning + subjects: + - math + - sports +``` +{{% /tab %}} +{{< /tabs >}} + + +在 Kubernetes 集群中使用复杂的身份验证流程时,例如如果你使用 +[Webhook 令牌身份验证](/zh-cn/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)或[身份验证代理](/zh-cn/docs/reference/access-authn-authz/authentication/#authenticating-proxy)时, +此特性极其有用。 + +{{< note >}} + +Kubernetes API 服务器在所有身份验证机制 +(包括[伪装](/zh-cn/docs/reference/access-authn-authz/authentication/#user-impersonation)), +被应用后填充 `userInfo`, +如果你或某个身份验证代理使用伪装进行 SelfSubjectReview,你会看到被伪装用户的用户详情和属性。 +{{< /note >}} + + +默认情况下,所有经过身份验证的用户都可以在 `APISelfSubjectReview` 特性被启用时创建 `SelfSubjectReview` 对象。 +这是 `system:basic-user` 集群角色允许的操作。 + +{{< note >}} + +你只能在以下情况下进行 `SelfSubjectReview` 请求: + +* 集群启用了 `APISelfSubjectReview` + [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/) +* 集群的 API 服务器已启用 `authentication.k8s.io/v1alpha1` + {{< glossary_tooltip term_id="api-group" text="API 组" >}}。。 +{{< /note >}} + ## {{% heading "whatsnext" %}} 该清单片段定义了由三个数据源组成的投射卷。在当前场景中,每个数据源也代表该卷内的一条独立路径。这三个数据源是: @@ -315,7 +315,7 @@ it does the following when a Pod is created: `/var/run/secrets/kubernetes.io/serviceaccount`. For Linux containers, that volume is mounted at `/var/run/secrets/kubernetes.io/serviceaccount`; on Windows nodes, the mount is at the equivalent path. -1. If the spec of the incoming Pod does already contain any `imagePullSecrets`, then the +1. If the spec of the incoming Pod doesn't already contain any `imagePullSecrets`, then the admission controller adds `imagePullSecrets`, copying them from the `ServiceAccount`. --> 3. 如果服务账号的 `automountServiceAccountToken` 字段或 Pod 的 @@ -326,7 +326,7 @@ it does the following when a Pod is created: 忽略已为 `/var/run/secrets/kubernetes.io/serviceaccount` 路径定义的卷挂载的所有容器。 对于 Linux 容器,此卷挂载在 `/var/run/secrets/kubernetes.io/serviceaccount`; 在 Windows 节点上,此卷挂载在等价的路径上。 -4. 如果新来 Pod 的规约已包含任何 `imagePullSecrets`,则准入控制器添加 `imagePullSecrets`, +4. 如果新来 Pod 的规约不包含任何 `imagePullSecrets`,则准入控制器添加 `imagePullSecrets`, 并从 `ServiceAccount` 进行复制。 ### TokenRequest API @@ -392,14 +392,14 @@ kubelet 确保该卷包含允许容器作为正确 ServiceAccount 进行身份 该清单片段定义了由三个数据源信息组成的投射卷。 @@ -536,7 +536,7 @@ metadata: selfLink: /api/v1/namespaces/examplens/serviceaccounts/example-automated-thing uid: f23fd170-66f2-4697-b049-e1e266b7f835 secrets: -- name: example-automated-thing-token-zyxwv + - name: example-automated-thing-token-zyxwv ``` - `CSIVolumeHealth`:启用对节点上的 CSI volume 运行状况监控的支持。 +- `ComponentSLIs`: 在 kubelet、kube-scheduler、kube-proxy、kube-controller-manager、cloud-controller-manager + 等 Kubernetes 组件上启用 `/metrics/slis` 端点,从而允许你抓取健康检查指标。 - `ConsistentHTTPGetHandlers`:使用探测器为生命周期处理程序规范化 HTTP get URL 和标头传递。 - `ContextualLogging`:当你启用这个特性门控,支持日志上下文记录的 Kubernetes 组件会为日志输出添加额外的详细内容。 @@ -927,8 +933,7 @@ Each feature gate is designed for enabling/disabling a specific feature: 完成情况,而不是永远从集群剩余 Pod 来获取信息判断完成情况。Job 控制器使用 Pod finalizers 和 Job 状态中的一个字段来跟踪已完成的 Pod 以计算完成。 -- `SELinuxMountReadWriteOncePod`:通过使用正确的 SELinux - 标签挂载卷而不是以递归方式更改这些卷上的每个文件来加速容器启动。最初的实现侧重 ReadWriteOncePod 卷。 +- `SELinuxMountReadWriteOncePod`:通过允许 kubelet 直接用正确的 SELinux + 标签为 Pod 挂载卷而不是以递归方式更改这些卷上的每个文件来加速容器启动。最初的实现侧重 ReadWriteOncePod 卷。 - `SeccompDefault`: 允许将所有工作负载的默认 seccomp 配置文件为 `RuntimeDefault`。 seccomp 配置在 Pod 或者容器的 `securityContext` 字段中指定。 -- `SELinuxMountReadWriteOncePod`:允许 kubelet 直接用合适的 SELinux 标签为 Pod 挂载卷, - 而不是将 SELinux 标签以递归方式应用到卷上的每个文件。 +

TopologyManagerPolicyOptions 是一组 key=value 键值映射,容许设置额外的选项来微调拓扑管理器策略的行为。需要同时启用 "TopologyManager" 和 "TopologyManagerPolicyOptions" 特性门控。 +默认值:nil

+ + + qosReserved
map[string]string @@ -994,13 +1009,13 @@ Default: true

cpuCFSQuotaPeriod设置 CPU CFS 配额周期值,cpu.cfs_period_us。 -此值需要介于 1 微秒和 1 秒之间,包含 1 微秒和 1 秒。 -此功能要求启用CustomCPUCFSQuotaPeriod特性门控被启用。

+此值需要介于 1 毫秒和 1 秒之间,包含 1 毫秒和 1 秒。 +此功能要求启用 CustomCPUCFSQuotaPeriod 特性门控被启用。

默认值:"100ms"

@@ -1794,19 +1809,19 @@ Default: false when setting the cgroupv2 memory.high value to enforce MemoryQoS. Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure while increasing will put less reclaim pressure. -See http://kep.k8s.io/2570 for more details. +See https://kep.k8s.io/2570 for more details. Default: 0.8 -->

当设置 cgroupv2 memory.high以实施MemoryQoS特性时, memoryThrottlingFactor用来作为内存限制或节点可分配内存的系数。

减小此系数会为容器控制组设置较低的 high 限制值,从而增大回收压力;反之, -增大此系数会降低回收压力。更多细节参见 http://kep.k8s.io/2570。

+增大此系数会降低回收压力。更多细节参见 https://kep.k8s.io/2570。

默认值:0.8

registerWithTaints
-[]core/v1.Taint +[]core/v1.Taint +See https://kep.k8s.io/2832 for more details. -->

tracing 为 OpenTelemetry 追踪客户端设置版本化的配置信息。 -参阅 http://kep.k8s.io/2832 了解更多细节。

+参阅 https://kep.k8s.io/2832 了解更多细节。

localStorageCapacityIsolation
@@ -1885,7 +1900,7 @@ SerializedNodeConfigSource 允许对 `v1.NodeConfigSource` 执行序列化操作 kind
stringSerializedNodeConfigSource source
-core/v1.NodeConfigSource +core/v1.NodeConfigSource diff --git a/content/zh-cn/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md b/content/zh-cn/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md index 62f936f2644b3..9ddbc12cc083a 100644 --- a/content/zh-cn/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md +++ b/content/zh-cn/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md @@ -24,6 +24,7 @@ weight: 5 `import "k8s.io/api/core/v1"` ## PersistentVolume {#PersistentVolume} + @@ -437,7 +438,7 @@ PersistentVolumeSpec 是持久卷的规约。 cephfs 表示在主机上挂载的 Ceph FS,该文件系统挂载与 Pod 的生命周期相同。 - + **表示在 Pod 的生命周期内持续的 Ceph Filesystem 挂载。cephfs 卷不支持所有权管理或 SELinux 重新打标签。** - -- **glusterfs** (GlusterfsPersistentVolumeSource) - - glusterfs 表示挂接到主机并暴露给 Pod 的 Glusterfs 卷。由管理员进行制备。更多信息: - https://examples.k8s.io/volumes/glusterfs/README.md - - - **表示与 Pod 生命周期相同的 Glusterfs 挂载。Glusterfs 卷不支持所有权管理或 SELinux 重新打标签。** - - - - - **glusterfs.endpoints** (string),必需 - - endpoints 是详细说明 Glusterfs 拓扑的端点名称。更多信息: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - - - **glusterfs.path** (string),必需 - - path 是 Glusterfs 卷路径。更多信息: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - - - **glusterfs.endpointsNamespace** (string) - - endpointsNamespace 是包含 Glusterfs 端点的名字空间。 - 如果此字段为空,则 EndpointNamespace 默认为与绑定的 PVC 相同的名字空间。 - 更多信息: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - - - **glusterfs.readOnly** (boolean) - - 此处 readOnly 将强制使用只读权限挂载 Glusterfs 卷。默认为 false。更多信息: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - + unhealthyPodEvictionPolicy 定义不健康的 Pod 应被考虑驱逐时的标准。 + 当前的实现将健康的 Pod 视为具有 status.conditions 项且 type="Ready"、status="True" 的 Pod。 + + 有效的策略是 IfHealthyBudget 和 AlwaysAllow。 + 如果没有策略被指定,则使用与 IfHealthyBudget 策略对应的默认行为。 + + + IfHealthyBudget 策略意味着正在运行(status.phase="Running")但还不健康的 Pod + 只有在被守护的应用未受干扰(status.currentHealthy 至少等于 status.desiredHealthy) + 时才能被驱逐。健康的 Pod 将受到 PDB 的驱逐。 + + AlwaysAllow 策略意味着无论是否满足 PDB 中的条件,所有正在运行(status.phase="Running")但还不健康的 + Pod 都被视为受干扰且可以被驱逐。这意味着受干扰应用的透视运行 Pod 可能没有机会变得健康。 + 健康的 Pod 将受到 PDB 的驱逐。 + + + 将来可能会添加其他策略。如果客户端在该字段遇到未识别的策略,则做出驱逐决定的客户端应禁止驱逐不健康的 Pod。 + + 该字段是 Alpha 级别的。当特性门控 PDBUnhealthyPodEvictionPolicy 被启用(默认禁用)时,驱逐 API 使用此字段。 + ## PodDisruptionBudgetStatus {#PodDisruptionBudgetStatus} +--> - - +--> Kubernetes 集群中的每个{{< glossary_tooltip term_id="node" text="节点" >}}会运行一个 [kube-proxy](/zh-cn/docs/reference/command-line-tools-reference/kube-proxy/) (除非你已经部署了自己的替换组件来替代 `kube-proxy`)。 - - +--> `kube-proxy` 组件负责除 `type` 为 [`ExternalName`](/zh-cn/docs/concepts/services-networking/service/#externalname) 以外的{{< glossary_tooltip term_id="service" text="服务">}},实现**虚拟 IP** 机制。 - - +--> 一个时不时出现的问题是,为什么 Kubernetes 依赖代理将入站流量转发到后端。 其他方案呢?例如,是否可以配置具有多个 A 值(或 IPv6 的 AAAA)的 DNS 记录, 使用轮询域名解析? - - +--> 使用代理转发方式实现 Service 的原因有以下几个: * DNS 的实现不遵守记录的 TTL 约定的历史由来已久,在记录过期后可能仍有结果缓存。 @@ -62,7 +58,7 @@ There are a few reasons for using proxying for Services: * 即使应用程序和库进行了适当的重新解析,TTL 取值较低或为零的 DNS 记录可能会给 DNS 带来很大的压力, 从而变得难以管理。 - +--> 在下文中,你可以了解到 kube-proxy 各种实现方式的工作原理。 总的来说,你应该注意到,在运行 `kube-proxy` 时, 可能会修改内核级别的规则(例如,可能会创建 iptables 规则), @@ -79,15 +75,14 @@ to use as-is. 因此,运行 kube-proxy 这件事应该只由了解在计算机上使用低级别、特权网络代理服务会带来的后果的管理员执行。 尽管 `kube-proxy` 可执行文件支持 `cleanup` 功能,但这个功能并不是官方特性,因此只能根据具体情况使用。 - - +--> 本文中的一些细节会引用这样一个例子: 运行了 3 个 Pod 副本的无状态图像处理后端工作负载。 @@ -95,15 +90,14 @@ nor should they need to keep track of the set of backends themselves. 即使组成这一组后端程序的 Pod 实际上可能会发生变化, 前端客户端不应该也没必要知道,而且也不需要跟踪这一组后端的状态。 - - -## 代理模式{#proxy-modes} +--> +## 代理模式 {#proxy-modes} - - +--> 注意,kube-proxy 会根据不同配置以不同的模式启动。 - kube-proxy 的配置是通过 ConfigMap 完成的,kube-proxy 的 ConfigMap 实际上弃用了 kube-proxy 大部分标志的行为。 @@ -125,240 +118,270 @@ Note that the kube-proxy starts up in different modes, which are determined by i 例如,如果你的操作系统不允许你运行 iptables 命令,标准的 kube-proxy 内核实现将无法工作。 同样,如果你的操作系统不支持 `netsh`,它也无法在 Windows 用户空间模式下运行。 - -### 用户空间代理模式 {#proxy-mode-userspace} - -{{< feature-state for_k8s_version="v1.23" state="deprecated" >}} - - - -这种(遗留)模式使用 iptables 添加拦截规则,然后使用 kube-proxy 工具执行流量转发。 -kube-proxy 监视 Kubernetes 控制平面对 Service 和 EndpointSlice 对象的增加、修改和删除。 -对于每个 Service,kube-proxy 在本地节点上打开一个端口(随机选择)。 -任何对这个**代理端口**的连接都将代理到 Service 的一个后端 Pod(通过 EndpointSlices 报告)。 -kube-proxy 在决定使用哪个后端 Pod 时会考虑 Service 的 `sessionAffinity` 设置。 - - -用户空间代理添加 iptables 规则,这些规则捕获流向 Service 的 `clusterIP`(虚拟 IP)和 `port` 的流量。 -这些规则将这些流量重定向到代理后端 Pod 的代理端口。 - - -默认情况下,用户空间模式下的 kube-proxy 通过轮询算法选择后端。 - -{{< figure src="/images/docs/services-userspace-overview.svg" title="用户空间代理的 Service 概览" class="diagram-medium" >}} - - -#### 示例 {#packet-processing-userspace} - - -例如,考虑本文[前面](#example)描述的图像处理应用的例子。 -当创建后端 Service 时,Kubernetes 控制平面分配一个虚拟 IP 地址,例如 10.0.0.1。 -假设 Service 端口是 1234,那么集群中的所有 kube-proxy 实例都会观察到该 Service。 -当一个 kube-proxy 观察到新 Service 时,它会随机打开一个新端口, -建立从虚拟 IP 地址到这个新端口的 iptables 重定向,并开始在其上接受连接。 - - -当客户端连接到 Service 的虚拟 IP 地址时,iptables 规则会生效,将数据包重定向到代理自身的端口。 -“Service 代理” 选择一个后端,并开始代理客户端到后端的流量。 - - -这意味着 Service 所有者可以选择任何他们想要的端口而不会发生冲突。 -客户端可以连接到 IP 和端口,也不需要知道它们实际访问的是哪些 Pod。 - - -#### 扩缩容挑战 {#scaling-challenges-userspace} - - - -在中小型规模集群中使用用户空间代理的 VIP 是有效的,但是不能拓展到具有数千个 Service 的大型集群。 -[针对门户的初始设计提案](https://github.com/kubernetes/kubernetes/issues/1107) -中有更多的细节。 - - -使用用户空间代理会隐藏访问 Service 的数据包的源 IP 地址。 -这使得某些类型的网络过滤(防火墙)失效。 -iptables 代理模式不会隐藏集群内的源 IP 地址, -但仍会隐藏通过负载均衡器或节点端口进入的客户端数据包源 IP 地址。 - - - ### `iptables` 代理模式 {#proxy-mode-iptables} +--> +### `iptables` 代理模式 {#proxy-mode-iptables} - +--> 在这种模式下,kube-proxy 监视 Kubernetes 控制平面,获知对 Service 和 EndpointSlice 对象的添加和删除操作。 对于每个 Service,kube-proxy 会添加 iptables 规则,这些规则捕获流向 Service 的 `clusterIP` 和 `port` 的流量, 并将这些流量重定向到 Service 后端集合中的其中之一。 对于每个端点,它会添加指向一个特定后端 Pod 的 iptables 规则。 - +--> 默认情况下,iptables 模式下的 kube-proxy 会随机选择一个后端。 使用 iptables 处理流量的系统开销较低,因为流量由 Linux netfilter 处理, 无需在用户空间和内核空间之间切换。这种方案也更为可靠。 - - +--> 如果 kube-proxy 以 iptables 模式运行,并且它选择的第一个 Pod 没有响应, 那么连接会失败。这与用户空间模式不同: 在后者这种情况下,kube-proxy 会检测到与第一个 Pod 的连接失败, 并会自动用不同的后端 Pod 重试。 - - +--> 你可以使用 Pod [就绪探针](/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)来验证后端 Pod 是否健康。 这样可以避免 kube-proxy 将流量发送到已知失败的 Pod 上。 - -{{< figure src="/images/docs/services-iptables-overview.svg" title="iptables 代理的 Service 概览" class="diagram-medium" >}} + +{{< figure src="/images/docs/services-iptables-overview.svg" title="iptables 模式下 Service 的虚拟 IP 机制" class="diagram-medium" >}} - +--> #### 示例 {#packet-processing-iptables} - -仍以[前面](#example)描述的图像处理应用程序为例。 +--> +例如,考虑本页中[前面](#example)描述的图像处理应用程序。 当创建后端 Service 时,Kubernetes 控制平面会分配一个虚拟 IP 地址,例如 10.0.0.1。 对于这个例子而言,假设 Service 端口是 1234。 集群中的所有 kube-proxy 实例都会观察到新 Service 的创建。 - +--> 当节点上的 kube-proxy 观察到新 Service 时,它会添加一系列 iptables 规则, 这些规则从虚拟 IP 地址重定向到更多 iptables 规则,每个 Service 都定义了这些规则。 每个 Service 规则链接到每个后端端点的更多规则, 并且每个端点规则将流量重定向(使用目标 NAT)到后端。 - +redirected to the backend without rewriting the client IP address. +--> 当客户端连接到 Service 的虚拟 IP 地址时,iptables 规则会生效。 -会选择一个后端(基于会话亲和性或随机选择),并将数据包重定向到后端。 -与用户空间代理不同,数据包不会被复制到用户空间, -不需要 kube-proxy 参与,虚拟 IP 地址就可以正常工作, -节点可以看到来自未更改的客户端 IP 地址的流量。 +会选择一个后端(基于会话亲和性或随机选择),并将数据包重定向到后端,无需重写客户端 IP 地址。 - +--> 当流量通过节点端口或负载均衡器进入时,也会执行相同的基本流程, 只是在这些情况下,客户端 IP 地址会被更改。 - +#### 优化 iptables 模式性能 {#optimizing-iptables-mode-performance} + +在大型集群(有数万个 Pod 和 Service)中,当 Service(或其 EndpointSlices)发生变化时 +iptables 模式的 kube-proxy 在更新内核中的规则时可能要用较长时间。 +你可以通过(`kube-proxy --config ` 指定的)kube-proxy +[配置文件](/zh-cn/docs/reference/config-api/kube-proxy-config.v1alpha1/)的 +[`iptables` 节](/zh-cn/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPTablesConfiguration)中的选项来调整 +kube-proxy 的同步行为: + +```none +... +iptables: + minSyncPeriod: 1s + syncPeriod: 30s +... +``` + +##### `minSyncPeriod` + + +`minSyncPeriod` 参数设置尝试同步 iptables 规则与内核之间的最短时长。 +如果是 `0s`,那么每次有任一 Service 或 Endpoint 发生变更时,kube-proxy 都会立即同步这些规则。 +这种方式在较小的集群中可以工作得很好,但如果在很短的时间内很多东西发生变更时,它会导致大量冗余工作。 +例如,如果你有一个由 Deployment 支持的 Service,共有 100 个 Pod,你删除了这个 Deployment, +且设置了 `minSyncPeriod: 0s`,kube-proxy 最终会从 iptables 规则中逐个删除 Service 的 Endpoint, +总共更新 100 次。使用较大的 `minSyncPeriod` 值时,多个 Pod 删除事件将被聚合在一起, +因此 kube-proxy 最终可能会进行例如 5 次更新,每次移除 20 个端点, +这样在 CPU 利用率方面更有效率,能够更快地同步所有变更。 + + +`minSyncPeriod` 的值越大,可以聚合的工作越多, +但缺点是每个独立的变更可能最终要等待整个 `minSyncPeriod` 周期后才能被处理, +这意味着 iptables 规则要用更多时间才能与当前的 apiserver 状态同步。 + + +默认值 `1s` 对于中小型集群是一个很好的折衷方案。 +在大型集群中,可能需要将其设置为更大的值。 +(特别是,如果 kube-proxy 的 `sync_proxy_rules_duration_seconds` 指标表明平均时间远大于 1 秒, +那么提高 `minSyncPeriod` 可能会使更新更有效率。) + +##### `syncPeriod` + + +`syncPeriod` 参数控制与单次 Service 和 Endpoint 的变更没有直接关系的少数同步操作。 +特别是,它控制 kube-proxy 在外部组件已干涉 kube-proxy 的 iptables 规则时通知的速度。 +在大型集群中,kube-proxy 也仅在每隔 `syncPeriod` 时长执行某些清理操作,以避免不必要的工作。 + + +在大多数情况下,提高 `syncPeriod` 预计不会对性能产生太大影响, +但在过去,有时将其设置为非常大的值(例如 `1h`)很有用。 +现在不再推荐这种做法,因为它对功能的破坏可能会超过对性能的改进。 + + +##### 实验性的性能改进 {#minimize-iptables-restore} + +{{< feature-state for_k8s_version="v1.26" state="alpha" >}} + + +在 Kubernetes 1.26 中,社区对 iptables 代理模式进行了一些新的性能改进, +但默认未启用(并且可能还不应该在生产集群中启用)。要试用它们, +请使用 `--feature-gates=MinimizeIPTablesRestore=true,…` 为 kube-proxy 启用 `MinimizeIPTablesRestore` +[特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/)。 + + +如果你启用该特性门控并且之前覆盖了 `minSyncPeriod`, +你应该尝试移除该覆盖并让 kube-proxy 使用默认值 (`1s`) 或至少使用比之前更小的值。 + + +如果你注意到 kube-proxy 的 `sync_proxy_rules_iptables_restore_failures_total` 或 +`sync_proxy_rules_iptables_partial_restore_failures_total` 指标在启用此特性后升高, +这可能表明你发现了该特性的错误,你应该提交错误报告。 + + +--> ### IPVS 代理模式 {#proxy-mode-ipvs} - +When accessing a Service, IPVS directs traffic to one of the backend Pods. +--> 在 `ipvs` 模式下,kube-proxy 监视 Kubernetes Service 和 EndpointSlice, 然后调用 `netlink` 接口创建 IPVS 规则, 并定期与 Kubernetes Service 和 EndpointSlice 同步 IPVS 规则。 该控制回路确保 IPVS 状态与期望的状态保持一致。 +访问 Service 时,IPVS 会将流量导向到某一个后端 Pod。 - - -访问 Service 时,IPVS 会将流量导向到某一个后端 Pod 。 - +--> IPVS 代理模式基于 netfilter 回调函数,类似于 iptables 模式, 但它使用哈希表作为底层数据结构,在内核空间中生效。 这意味着 IPVS 模式下的 kube-proxy 比 iptables 模式下的 kube-proxy 重定向流量的延迟更低,同步代理规则时性能也更好。 与其他代理模式相比,IPVS 模式还支持更高的网络流量吞吐量。 - - +--> IPVS 为将流量均衡到后端 Pod 提供了更多选择: * `rr`:轮询 @@ -397,209 +416,225 @@ IPVS 为将流量均衡到后端 Pod 提供了更多选择: * `sed`:最短预期延迟 * `nq`:最少队列 - {{< note >}} - - +--> 要在 IPVS 模式下运行 kube-proxy,必须在启动 kube-proxy 之前确保节点上的 IPVS 可用。 当 kube-proxy 以 IPVS 代理模式启动时,它会验证 IPVS 内核模块是否可用。 如果未检测到 IPVS 内核模块,则 kube-proxy 会退回到 iptables 代理模式运行。 {{< /note >}} - - -{{< figure src="/images/docs/services-ipvs-overview.svg" title="IPVS 代理的 Service 概览" class="diagram-medium" >}} + +{{< figure src="/images/docs/services-ipvs-overview.svg" title="IPVS 模式下 Service 的虚拟 IP 地址机制" class="diagram-medium" >}} - +--> ## 会话亲和性 {#session-affinity} - +--> 在这些代理模型中,绑定到 Service IP:Port 的流量被代理到合适的后端, 客户端不需要知道任何关于 Kubernetes、Service 或 Pod 的信息。 - +--> 如果要确保来自特定客户端的连接每次都传递给同一个 Pod, 你可以通过设置 Service 的 `.spec.sessionAffinity` 为 `ClientIP` 来设置基于客户端 IP 地址的会话亲和性(默认为 `None`)。 - +--> ### 会话粘性超时 {#session-stickiness-timeout} - +--> 你还可以通过设置 Service 的 `.spec.sessionAffinityConfig.clientIP.timeoutSeconds` 来设置最大会话粘性时间(默认值为 10800,即 3 小时)。 {{< note >}} - +--> 在 Windows 上不支持为 Service 设置最大会话粘性时间。 {{< /note >}} - +--> ## 将 IP 地址分配给 Service {#ip-address-assignment-to-services} - +--> 与实际路由到固定目标的 Pod IP 地址不同,Service IP 实际上不是由单个主机回答的。 相反,kube-proxy 使用数据包处理逻辑(例如 Linux 的 iptables) 来定义**虚拟** IP 地址,这些地址会按需被透明重定向。 - +--> 当客户端连接到 VIP 时,其流量会自动传输到适当的端点。 实际上,Service 的环境变量和 DNS 是根据 Service 的虚拟 IP 地址(和端口)填充的。 - +--> ### 避免冲突 {#avoiding-collisions} - +--> Kubernetes 的主要哲学之一是, 你不应需要在完全不是你的问题的情况下面对可能导致你的操作失败的情形。 对于 Service 资源的设计,也就是如果你选择的端口号可能与其他人的选择冲突, 就不应该让你自己选择端口号。这是一种失败隔离。 - +--> 为了允许你为 Service 选择端口号,我们必须确保没有任何两个 Service 会发生冲突。 Kubernetes 通过从为 API 服务器配置的 `service-cluster-ip-range` CIDR 范围内为每个 Service 分配自己的 IP 地址来实现这一点。 - +--> 为了确保每个 Service 都获得唯一的 IP,内部分配器在创建每个 Service 之前更新 {{< glossary_tooltip term_id="etcd" >}} 中的全局分配映射,这种更新操作具有原子性。 映射对象必须存在于数据库中,这样 Service 才能获得 IP 地址分配, 否则创建将失败,并显示无法分配 IP 地址。 - +--> 在控制平面中,后台控制器负责创建该映射(从使用内存锁定的旧版本的 Kubernetes 迁移时需要这一映射)。 Kubernetes 还使用控制器来检查无效的分配(例如,因管理员干预而导致无效分配) 以及清理已分配但没有 Service 使用的 IP 地址。 - +--> #### Service 虚拟 IP 地址的地址段 {#service-ip-static-sub-range} {{< feature-state for_k8s_version="v1.25" state="beta" >}} - +--> Kubernetes 根据配置的 `service-cluster-ip-range` 的大小使用公式 - `min(max(16, cidrSize / 16), 256)` 将 `ClusterIP` 范围分为两段。 +`min(max(16, cidrSize / 16), 256)` 将 `ClusterIP` 范围分为两段。 该公式可以解释为:介于 16 和 256 之间,并在上下界之间存在渐进阶梯函数的分配。 - +--> Kubernetes 优先通过从高段中选择来为 Service 分配动态 IP 地址, 这意味着如果要将特定 IP 地址分配给 `type: ClusterIP` Service, 则应手动从**低**段中分配 IP 地址。 该方法降低了分配导致冲突的风险。 - +--> 如果你禁用 `ServiceIPStaticSubrange` [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/), 则 Kubernetes 用于手动分配和动态分配的 IP 共享单个地址池,这适用于 `type: ClusterIP` 的 Service。 - +--> ## 流量策略 {#traffic-policies} - +--> 你可以设置 `.spec.internalTrafficPolicy` 和 `.spec.externalTrafficPolicy` 字段来控制 Kubernetes 如何将流量路由到健康(“就绪”)的后端。 - +### 内部流量策略 {#internal-traffic-policy} + +{{< feature-state for_k8s_version="v1.22" state="beta" >}} + + +你可以设置 `.spec.internalTrafficPolicy` 字段来控制来自内部源的流量如何被路由。 +有效值为 `Cluster` 和 `Local`。 +将字段设置为 `Cluster` 会将内部流量路由到所有准备就绪的端点, +将字段设置为 `Local` 仅会将流量路由到本地节点准备就绪的端点。 +如果流量策略为 `Local` 但没有本地节点端点,那么 kube-proxy 会丢弃该流量。 + + +--> ### 外部流量策略 {#external-traffic-policy} - - +--> 你可以设置 `.spec.externalTrafficPolicy` 字段来控制从外部源路由的流量。 有效值为 `Cluster` 和 `Local`。 将字段设置为 `Cluster` 会将外部流量路由到所有准备就绪的端点, @@ -607,79 +642,70 @@ relevant Service. 如果流量策略为 `Local` 并且没有本地节点端点, 那么 kube-proxy 不会转发与相关 Service 相关的任何流量。 -{{< note >}} -{{< feature-state for_k8s_version="v1.22" state="alpha" >}} - +### 流向正终止的端点的流量 {#traffic-to-terminating-endpoints} + +{{< feature-state for_k8s_version="v1.26" state="beta" >}} + + +If there are local endpoints and **all** of them are terminating, then kube-proxy +will forward traffic to those terminating endpoints. Otherwise, kube-proxy will always +prefer forwarding traffic to endpoints that are not terminating. +--> 如果为 kube-proxy 启用了 `ProxyTerminatingEndpoints` -[特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/), -kube-proxy 会检查节点是否具有本地端点以及是否所有本地端点都标记为终止。 -如果有本地端点并且**所有**本地端点都被标记为终止,则 kube-proxy 忽略所有取值为 `Local` 的外部流量策略。 -相反,当所有本地节点端点均处于终止中时, -kube-proxy 将该 Service 的流量转发到其他健康端点, -就好像外部流量策略设置为 `Cluster` 一样。 - - - -这种对处于终止中的端点的转发行为使得外部负载均衡器能优雅地排空由 -`NodePort` 服务支持的连接,即使在健康检查节点端口开始出现故障时也是如此。 -否则,在节点仍然在负载均衡器的节点池情况下,在 Pod 终止期间,流量可能会丢失。 -{{< /note >}} +[特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/)且流量策略为 `Local`, +则节点的 kube-proxy 将使用更复杂的算法为 Service 选择端点。 +启用此特性时,kube-proxy 会检查节点是否具有本地端点以及是否所有本地端点都标记为正在终止过程中。 +如果有本地端点并且**所有**本地端点都被标记为处于终止过程中, +则 kube-proxy 会将转发流量到这些正在终止过程中的端点。 +否则,kube-proxy 会始终选择将流量转发到并未处于终止过程中的端点。 - - -### 内部流量策略 {#internal-traffic-policy} + +这种对处于终止过程中的端点的转发行为使得 `NodePort` 和 `LoadBalancer` Service +能有条不紊地腾空设置了 `externalTrafficPolicy: Local` 时的连接。 -{{< feature-state for_k8s_version="v1.22" state="beta" >}} +当一个 Deployment 被滚动更新时,处于负载均衡器后端的节点可能会将该 Deployment 的 N 个副本缩减到 +0 个副本。在某些情况下,外部负载均衡器可能在两次执行健康检查探针之间将流量发送到具有 0 个副本的节点。 +将流量路由到处于终止过程中的端点可确保正在缩减 Pod 的节点能够正常接收流量, +并逐渐降低指向那些处于终止过程中的 Pod 的流量。 +到 Pod 完成终止时,外部负载均衡器应该已经发现节点的健康检查失败并从后端池中完全移除该节点。 - -你可以设置 `.spec.internalTrafficPolicy` 字段来控制从内部源路由的流量。 -有效值为 `Cluster` 和 `Local`。 -将字段设置为 `Cluster` 会将内部流量路由到所有准备就绪的端点, -将字段设置为 `Local` 仅会将流量路由到本地节点准备就绪的端点。 -如果流量策略为 `Local` 但没有本地节点端点,那么 kube-proxy 会丢弃该流量。 - - ## {{% heading "whatsnext" %}} - +read [Connecting Applications with Services](/docs/tutorials/services/connect-applications-service/). +--> 要了解有关 Service 的更多信息, 请阅读[使用 Service 连接应用](/zh-cn/docs/tutorials/services/connect-applications-service/)。 - - 也可以: -* 阅读 [Service](/zh-cn/docs/concepts/services-networking/service/) + +* 阅读 [Service](/zh-cn/docs/concepts/services-networking/service/) 了解其概念 +* 阅读 [Ingress](/zh-cn/docs/concepts/services-networking/ingress/) 了解其概念 * 阅读 [API 参考](/zh-cn/docs/reference/kubernetes-api/service-resources/service-v1/)进一步了解 Service API diff --git a/content/zh-cn/docs/reference/using-api/server-side-apply.md b/content/zh-cn/docs/reference/using-api/server-side-apply.md index 288b49ce43e9a..8fe599fba3150 100644 --- a/content/zh-cn/docs/reference/using-api/server-side-apply.md +++ b/content/zh-cn/docs/reference/using-api/server-side-apply.md @@ -645,7 +645,25 @@ First, the user defines a new configuration containing only the `replicas` field 首先,用户新定义一个只包含 `replicas` 字段的配置文件: -{{< codenew file="application/ssa/nginx-deployment-replicas-only.yaml" >}} +```yaml +# 将此文件另存为 'nginx-deployment-replicas-only.yaml' +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + replicas: 3 +``` + +{{< note >}} + +此场景中针对 SSA 的 YAML 文件仅包含你要更改的字段。 +如果只想使用 SSA 来修改 `spec.replicas` 字段,你无需提供完全兼容的 Deployment 清单。 +{{< /note >}} - - 输出类似于: + 输出类似于: - ```sh - ... - You can now join any number of control-plane node by running the following command on each as a root: - kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 + ```sh + ... + You can now join any number of control-plane node by running the following command on each as a root: + kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 - Please note that the certificate-key gives access to cluster sensitive data, keep it secret! - As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward. + Please note that the certificate-key gives access to cluster sensitive data, keep it secret! + As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward. - Then you can join any number of worker nodes by running the following on each as root: + Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 - ``` + ``` 2. 应用你所选择的 CNI 插件: [请遵循以下指示](/zh-cn/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network) @@ -438,6 +438,7 @@ For each additional control plane node you should: - The `--certificate-key ...` will cause the control plane certificates to be downloaded from the `kubeadm-certs` Secret in the cluster and be decrypted using the given key. +You can join multiple control-plane nodes in parallel. --> 对于每个其他控制平面节点,你应该: @@ -452,6 +453,7 @@ For each additional control plane node you should: - `--certificate-key ...` 将导致从集群中的 `kubeadm-certs` Secret 下载控制平面证书并使用给定的密钥进行解密。 +你可以并行地加入多个控制面节点。 - 在你的集群中,将配置模板中的以下变量替换为适当值: diff --git a/content/zh-cn/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/zh-cn/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index 36def82c35c15..37de6ac9add45 100644 --- a/content/zh-cn/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/zh-cn/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -523,7 +523,7 @@ For example: ### Linux ```shell -export KUBECONFIG="${KUBECONFIG}:$HOME/.kube/config" +export KUBECONFIG="${KUBECONFIG}:${HOME}/.kube/config" ``` ### Windows Powershell diff --git a/content/zh-cn/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/zh-cn/docs/tasks/access-application-cluster/list-all-running-container-images.md index e55d110fce226..aa7208b4724bd 100644 --- a/content/zh-cn/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/content/zh-cn/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -32,20 +32,20 @@ of Containers for each. 在本练习中,你将使用 kubectl 来获取集群中运行的所有 Pod,并格式化输出来提取每个 Pod 中的容器列表。 -## 列出所有命名空间下的所有容器 +## 列出所有命名空间下的所有容器镜像 - 使用 `kubectl get pods --all-namespaces` 获取所有命名空间下的所有 Pod - 使用 `-o jsonpath={.items[*].spec.containers[*].image}` 来格式化输出,以仅包含容器镜像名称。 @@ -80,7 +80,7 @@ jsonpath 解释如下: - `.image`: 获取镜像 @@ -105,12 +105,12 @@ sort ``` -## 列出以标签过滤后的 Pod 的所有容器 +## 列出以标签过滤后的 Pod 的所有容器镜像 要获取匹配特定标签的 Pod,请使用 -l 参数。以下匹配仅与标签 `app=nginx` 相符的 Pod。 @@ -119,12 +119,12 @@ kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].ima ``` -## 列出以命名空间过滤后的 Pod 的所有容器 +## 列出以命名空间过滤后的 Pod 的所有容器镜像 要获取匹配特定命名空间的 Pod,请使用 namespace 参数。以下仅匹配 `kube-system` 命名空间下的 Pod。 @@ -133,12 +133,12 @@ kubectl get pods --namespace kube-system -o jsonpath="{.items[*].spec.containers ``` -## 使用 go-template 代替 jsonpath 来获取容器 +## 使用 go-template 代替 jsonpath 来获取容器镜像 作为 jsonpath 的替代,Kubectl 支持使用 [go-templates](https://golang.org/pkg/text/template/) 来格式化输出: diff --git a/content/zh-cn/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/zh-cn/docs/tasks/access-application-cluster/web-ui-dashboard.md index b21a652db20f9..47375267f4627 100644 --- a/content/zh-cn/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/zh-cn/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -6,6 +6,7 @@ card: name: tasks weight: 30 title: 使用 Web 界面 Dashboard + description: 部署并访问 Web 界面(Kubernetes 仪表板)。 --- ## 访问 Dashboard 用户界面 为了保护你的集群数据,默认情况下,Dashboard 会使用最少的 RBAC 配置进行部署。 当前,Dashboard 仅支持使用 Bearer 令牌登录。 要为此样本演示创建令牌,你可以按照 -[创建示例用户](https://github.com/kubernetes/dashboard/wiki/Creating-sample-user) +[创建示例用户](https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md) 上的指南进行操作。 - **CPU 需求(核数)** 和 **内存需求(MiB)**:你可以为容器定义最小的 [资源限制](/zh-cn/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/)。 默认情况下,Pod 没有 CPU 和内存限制。 - **运行命令**和**运行命令参数**:默认情况下,你的容器会运行 Docker 镜像的默认 [入口命令](/zh-cn/docs/tasks/inject-data-application/define-command-argument-container/)。 你可以使用 command 选项覆盖默认值。 - **以特权模式运行**:这个设置决定了在 [特权容器](/zh-cn/docs/concepts/workloads/pods/#privileged-mode-for-containers) @@ -344,7 +355,7 @@ If needed, you can expand the **Advanced options** section where you can specify Kubernetes supports declarative configuration. In this style, all configuration is stored in manifests (YAML or JSON configuration files). -The manifests use the Kubernetes [API](/docs/concepts/overview/kubernetes-api/) resource schemas. +The manifests use Kubernetes [API](/docs/concepts/overview/kubernetes-api/) resource schemas. --> ### 上传 YAML 或者 JSON 文件 @@ -354,7 +365,7 @@ Kubernetes 支持声明式配置。所有的配置都存储在清单文件 作为一种替代在部署向导中指定应用详情的方式,你可以在一个或多个清单文件中定义应用,并且使用 Dashboard 上传文件。 @@ -384,7 +395,7 @@ Dashboard shows most Kubernetes object kinds and groups them in a few menu categ Dashboard 展示大部分 Kubernetes 对象,并将它们分组放在几个菜单类别中。 工作负载的详情视图展示了对象的状态、详细信息和相互关系。 例如,ReplicaSet 所控制的 Pod,或者 Deployment 所关联的新 ReplicaSet 和 @@ -441,11 +452,11 @@ Storage view shows PersistentVolumeClaim resources which are used by application 存储视图展示持久卷申领(PVC)资源,这些资源被应用程序用来存储数据。 -#### ConfigMap 和 Secret +#### ConfigMap 和 Secret {#config-maps-and-secrets} 展示的所有 Kubernetes 资源是在集群中运行的应用程序的实时配置。 通过这个视图可以编辑和管理配置对象,并显示那些默认隐藏的 Secret。 @@ -453,7 +464,8 @@ Shows all Kubernetes resources that are used for live configuration of applicati #### 日志查看器 diff --git a/content/zh-cn/docs/tasks/administer-cluster/access-cluster-api.md b/content/zh-cn/docs/tasks/administer-cluster/access-cluster-api.md index 7813a5da9b95b..83e000a80b5e6 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/access-cluster-api.md +++ b/content/zh-cn/docs/tasks/administer-cluster/access-cluster-api.md @@ -1,10 +1,12 @@ --- title: 使用 Kubernetes API 访问集群 content_type: task +weight: 60 --- @@ -21,11 +23,11 @@ This page shows how to access clusters using the Kubernetes API. -## 访问集群 API +## 访问 Kubernetes API ### 使用 kubectl 进行首次访问 @@ -72,8 +74,8 @@ kubectl handles locating and authenticating to the API server. If you want to di kubectl 处理对 API 服务器的定位和身份验证。如果你想通过 http 客户端(如 `curl` 或 `wget`, 或浏览器)直接访问 REST API,你可以通过多种方式对 API 服务器进行定位和身份验证: - 1. 以代理模式运行 kubectl(推荐)。 @@ -84,7 +86,7 @@ kubectl 处理对 API 服务器的定位和身份验证。如果你想通过 htt 为防止中间人攻击,你需要将根证书导入浏览器。 使用 Go 或 Python 客户端库可以在代理模式下访问 kubectl。 @@ -98,7 +100,9 @@ locating the API server and authenticating. 下列命令使 kubectl 运行在反向代理模式下。它处理 API 服务器的定位和身份认证。 - + 像这样运行它: ```shell @@ -119,7 +123,9 @@ Then you can explore the API with curl, wget, or a browser, like so: curl http://localhost:8080/api/ ``` - + 输出类似如下: ```json @@ -184,7 +190,9 @@ TOKEN=$(kubectl get secret default-token -o jsonpath='{.data.token}' | base64 -- curl -X GET $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure ``` - + 输出类似如下: ```json @@ -239,7 +247,9 @@ Kubernetes 官方支持 [Go](#go-client)、[Python](#python-client)、[Java](#ja 参考[客户端库](/zh-cn/docs/reference/using-api/client-libraries/)了解如何使用其他语言来访问 API 以及如何执行身份认证。 - + #### Go 客户端 {#go-client} @@ -252,16 +262,16 @@ Kubernetes 官方支持 [Go](#go-client)、[Python](#python-client)、[Java](#ja 参见 [https://github.com/kubernetes/client-go/releases](https://github.com/kubernetes/client-go/releases) 查看受支持的版本。 * 基于 client-go 客户端编写应用程序。 +{{< note >}} -{{< note >}} -注意 client-go 定义了自己的 API 对象,因此如果需要,请从 client-go 而不是主仓库导入 +client-go 定义了自己的 API 对象,因此如果需要,从 client-go 而不是主仓库导入 API 定义,例如 `import "k8s.io/client-go/kubernetes"` 是正确做法。 {{< /note >}} Go 客户端可以使用与 kubectl 命令行工具相同的 @@ -273,11 +283,11 @@ Go 客户端可以使用与 kubectl 命令行工具相同的 package main import ( - "context" - "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" + "context" + "fmt" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" ) func main() { @@ -298,7 +308,9 @@ If the application is deployed as a Pod in the cluster, see [Accessing the API f 如果该应用程序部署为集群中的一个 Pod,请参阅[从 Pod 内访问 API](/zh-cn/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod)。 - + #### Python 客户端 {#python-client} Python 客户端可以使用与 kubectl 命令行工具相同的 @@ -329,7 +341,9 @@ for i in ret.items: print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) ``` - + #### Java 客户端 {#java-client} - 你必须有一个集群。 -本页内容涉及从 Kubernetes {{< skew currentVersionAddMinor -1 >}} +本页内容涉及从 Kubernetes {{< skew currentVersionAddMinor -1 >}} 升级到 Kubernetes {{< skew currentVersion >}}。 如果你的集群未运行 Kubernetes {{< skew currentVersionAddMinor -1 >}}, 那请参考目标 Kubernetes 版本的文档。 - + ## 升级方法 {#upgrade-approaches} ### kubeadm {#upgrade-kubeadm} - 如果你的集群是使用 `kubeadm` 安装工具部署而来, -那么升级集群的详细信息,请参阅 -[升级 kubeadm 集群](/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)。 +那么升级集群的详细信息,请参阅[升级 kubeadm 集群](/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)。 -升级集群之后,要记得 -[安装最新版本的 `kubectl`](/zh-cn/docs/tasks/tools/). +升级集群之后,要记得[安装最新版本的 `kubectl`](/zh-cn/docs/tasks/tools/)。 - + ### 手动部署 {#manual-deployments} - -{{< caution >}} -这些步骤不考虑第三方扩展,例如网络和存储插件。 +这些步骤不考虑网络和存储插件等第三方扩展。 {{< /caution >}} -你应该跟随下面操作顺序,手动更新控制平面: + +你应该按照下面的操作顺序,手动更新控制平面: + - etcd (所有实例) - kube-apiserver (所有控制平面的宿主机) - kube-controller-manager - kube-scheduler - cloud controller manager (在你用到时) - -现在,你应该 -[安装最新版本的 `kubectl`](/zh-cn/docs/tasks/tools/). +现在,你应该[安装最新版本的 `kubectl`](/zh-cn/docs/tasks/tools/)。 对于集群中的每个节点, -首先需要[腾空](/zh-cn/docs/tasks/administer-cluster/safely-drain-node/) -节点,然后使用一个运行了 kubelet {{< skew currentVersion >}} 版本的新节点替换它; +首先需要[腾空](/zh-cn/docs/tasks/administer-cluster/safely-drain-node/)节点, +然后使用一个运行了 kubelet {{< skew currentVersion >}} 版本的新节点替换它; 或者升级此节点的 kubelet,并使节点恢复服务。 - @@ -187,13 +187,14 @@ release with a newer device plugin API version, device plugins must be upgraded both version before the node is upgraded in order to guarantee that device allocations continue to complete successfully during the upgrade. -Refer to [API compatiblity](docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md/#api-compatibility) and [Kubelet Device Manager API Versions](docs/reference/node/device-plugin-api-versions.md) for more details. +Refer to [API compatibility](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#api-compatibility) and [Kubelet Device Manager API Versions](/docs/reference/node/device-plugin-api-versions/) for more details. --> ### 设备插件 {#device-plugins} -如果你的集群正在运行设备插件(Device Plugins)并且节点需要升级到具有更新的设备插件(Device Plugins) +如果你的集群正在运行设备插件(Device Plugin)并且节点需要升级到具有更新的设备插件(Device Plugin) API 版本的 Kubernetes 版本,则必须在升级节点之前升级设备插件以同时支持这两个插件 API 版本, 以确保升级过程中设备分配能够继续成功完成。 -有关详细信息,请参阅 [API 兼容性](/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md/#api-compatibility)和 -[kubelet 设备管理器 API 版本](/zh-cn/docs/reference/node/device-plugin-api-versions.md)。 \ No newline at end of file +有关详细信息,请参阅 +[API 兼容性](/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#api-compatibility)和 +[kubelet 设备管理器 API 版本](/zh-cn/docs/reference/node/device-plugin-api-versions/)。 diff --git a/content/zh-cn/docs/tasks/administer-cluster/controller-manager-leader-migration.md b/content/zh-cn/docs/tasks/administer-cluster/controller-manager-leader-migration.md index f4e54653b14e7..0c548ab635c06 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/controller-manager-leader-migration.md +++ b/content/zh-cn/docs/tasks/administer-cluster/controller-manager-leader-migration.md @@ -2,6 +2,7 @@ title: 迁移多副本的控制面以使用云控制器管理器 linkTitle: 迁移多副本的控制面以使用云控制器管理器 content_type: task +weight: 250 --- @@ -22,7 +24,7 @@ content_type: task ## 背景 -作为[云驱动提取工作](https://kubernetes.io/blog/2019/04/17/the-future-of-cloud-providers-in-kubernetes/) +作为[云驱动提取工作](/blog/2019/04/17/the-future-of-cloud-providers-in-kubernetes/) 的一部分,所有特定于云的控制器都必须移出 `kube-controller-manager`。 所有在 `kube-controller-manager` 中运行云控制器的现有集群必须迁移到特定于云厂商的 `cloud-controller-manager` 中运行这些控制器。 diff --git a/content/zh-cn/docs/tasks/administer-cluster/coredns.md b/content/zh-cn/docs/tasks/administer-cluster/coredns.md index fedc1585107e8..d1962837d4a9e 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/coredns.md +++ b/content/zh-cn/docs/tasks/administer-cluster/coredns.md @@ -2,6 +2,7 @@ title: 使用 CoreDNS 进行服务发现 min-kubernetes-server-version: v1.9 content_type: task +weight: 380 --- @@ -119,9 +121,9 @@ can take care of retaining the existing CoreDNS configuration automatically. ## CoreDNS 调优 diff --git a/content/zh-cn/docs/tasks/administer-cluster/cpu-management-policies.md b/content/zh-cn/docs/tasks/administer-cluster/cpu-management-policies.md index 36e1ffa74fe0d..710dccef30d6d 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/cpu-management-policies.md +++ b/content/zh-cn/docs/tasks/administer-cluster/cpu-management-policies.md @@ -2,6 +2,7 @@ title: 控制节点上的 CPU 管理策略 content_type: task min-kubernetes-server-version: v1.26 +weight: 140 --- diff --git a/content/zh-cn/docs/tasks/administer-cluster/declare-network-policy.md b/content/zh-cn/docs/tasks/administer-cluster/declare-network-policy.md index aea649bc16979..7da9eb139c41e 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/zh-cn/docs/tasks/administer-cluster/declare-network-policy.md @@ -2,6 +2,7 @@ title: 声明网络策略 min-kubernetes-server-version: v1.8 content_type: task +weight: 180 --- @@ -90,11 +92,11 @@ kubectl get svc,pod ``` ```none NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -svc/kubernetes 10.100.0.1 443/TCP 46m -svc/nginx 10.100.0.16 80/TCP 33s +service/kubernetes 10.100.0.1 443/TCP 46m +service/nginx 10.100.0.16 80/TCP 33s NAME READY STATUS RESTARTS AGE -po/nginx-701339712-e0qfq 1/1 Running 0 35s +pod/nginx-701339712-e0qfq 1/1 Running 0 35s ``` @@ -48,19 +50,19 @@ Kubernetes 核心代码导入软件包来实现一个 cloud-controller-manager ### 树外(Out of Tree) 要为你的云环境构建一个树外(Out-of-Tree)云控制器管理器: 1. 使用满足 [`cloudprovider.Interface`](https://github.com/kubernetes/cloud-provider/blob/master/cloud.go) 接口的实现来创建一个 Go 语言包。 2. 使用来自 Kubernetes 核心代码库的 - [cloud-controller-manager 中的 main.go](https://github.com/kubernetes/kubernetes/blob/master/cmd/cloud-controller-manager/main.go) + [cloud-controller-manager 中的 `main.go`](https://github.com/kubernetes/kubernetes/blob/master/cmd/cloud-controller-manager/main.go) 作为 `main.go` 的模板。如上所述,唯一的区别应该是将导入的云包不同。 3. 在 `main.go` 中导入你的云包,确保你的包有一个 `init` 块来运行 [`cloudprovider.RegisterCloudProvider`](https://github.com/kubernetes/cloud-provider/blob/master/plugins.go)。 diff --git a/content/zh-cn/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/zh-cn/docs/tasks/administer-cluster/dns-custom-nameservers.md index 22930e0a24494..cc2cd873a1429 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/zh-cn/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -2,6 +2,7 @@ title: 自定义 DNS 服务 content_type: task min-kubernetes-server-version: v1.12 +weight: 160 --- @@ -169,7 +171,7 @@ Corefile 配置包括以下 CoreDNS [插件](https://coredns.io/plugins/): @@ -84,7 +85,7 @@ dnsutils 1/1 Running 0 ``` 一旦 Pod 处于运行状态,你就可以在该环境里执行 `nslookup`。 @@ -204,7 +205,7 @@ The value for label `k8s-app` is `kube-dns` for both CoreDNS and kube-dns deploy {{< /note >}} @@ -212,9 +213,9 @@ will have to deploy it manually. 那可能这个 DNS 插件在你当前的环境里并没有成功部署,你将需要手动去部署它。 ### 检查 DNS Pod 里的错误 {#check-for-errors-in-the-dns-pod} @@ -308,8 +309,8 @@ kube-dns 10.180.3.17:53,10.180.3.17:53 1h ``` 然后按下面的例子给 Corefile 添加 `log`。 @@ -377,7 +378,7 @@ CoreDNS 的 Pod 里。 接下来,发起一些查询并依照前文所述查看日志信息,如果 CoreDNS 的 Pod 接收到这些查询, 你将可以在日志信息里看到它们。 @@ -504,9 +505,9 @@ To learn more about name resolution, see @@ -542,9 +543,9 @@ Linux 的 libc(又名 glibc)默认将 DNS `nameserver` 记录限制为 3, 如果你使用 Alpine 3.3 或更早版本作为你的基础镜像,DNS 可能会由于 Alpine 中 一个已知的问题导致无法正常工作。 diff --git a/content/zh-cn/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md b/content/zh-cn/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md index d2f2b64bdf983..cff450b59e0e8 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md +++ b/content/zh-cn/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md @@ -1,18 +1,20 @@ --- title: 自动扩缩集群 DNS 服务 content_type: task +weight: 80 --- -本页展示了如何在集群中启用和配置 DNS 服务的自动扩缩功能。 +本页展示了如何在你的 Kubernetes 集群中启用和配置 DNS 服务的自动扩缩功能。 ## {{% heading "prerequisites" %}} @@ -21,78 +23,66 @@ Kubernetes cluster. * 本指南假设你的节点使用 AMD64 或 Intel 64 CPU 架构 -* 确保已启用 [DNS 功能](/zh-cn/docs/concepts/services-networking/dns-pod-service/)本身。 +* 确保 [Kubernetes DNS](/zh-cn/docs/concepts/services-networking/dns-pod-service/) 已启用。 -* 建议使用 Kubernetes 1.4.0 或更高版本。 ## 确定是否 DNS 水平自动扩缩特性已经启用 {#determining-whether-dns-horizontal-autoscaling-is-already-enabled} -在 kube-system 命名空间中列出集群中的 {{< glossary_tooltip text="Deployments" term_id="deployment" >}} : +在 kube-system {{< glossary_tooltip text="命名空间" term_id="namespace" >}}中列出集群中的 +{{< glossary_tooltip text="Deployment" term_id="deployment" >}}: ```shell kubectl get deployment --namespace=kube-system ``` + 输出类似如下这样: ``` -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +NAME READY UP-TO-DATE AVAILABLE AGE ... -dns-autoscaler 1 1 1 1 ... +dns-autoscaler 1/1 1 1 ... ... ``` -如果在输出中看到 “dns-autoscaler”,说明 DNS 水平自动扩缩已经启用,可以跳到 -[调优自动扩缩参数](#tuning-autoscaling-parameters)。 - +如果在输出中看到 “dns-autoscaler”,说明 DNS 水平自动扩缩已经启用, +可以跳到[调优 DNS 自动扩缩参数](#tuning-autoscaling-parameters)。 -```shell -kubectl get deployment --namespace=kube-system -``` + ## 获取 DNS Deployment 的名称 {#find-scaling-target} -列出集群内 kube-system 名字空间中的 DNS Deployment: +列出集群内 kube-system 命名空间中的 DNS Deployment: ```shell kubectl get deployment -l k8s-app=kube-dns --namespace=kube-system ``` + 输出类似如下这样: ``` @@ -117,7 +107,7 @@ and look for a deployment named `coredns` or `kube-dns`. 并在输出中寻找名称为 `coredns` 或 `kube-dns` 的 Deployment。 你的扩缩目标为: @@ -127,7 +117,7 @@ Deployment/ 其中 `` 是 DNS Deployment 的名称。 例如,如果你的 DNS Deployment 名称是 `coredns`,则你的扩展目标是 Deployment/coredns。 @@ -143,16 +133,16 @@ CoreDNS 是 Kubernetes 的默认 DNS 服务。CoreDNS 设置标签 `k8s-app=kube {{< /note >}} ## 启用 DNS 水平自动扩缩 {#enablng-dns-horizontal-autoscaling} -在本节,我们创建一个 Deployment。Deployment 中的 Pod 运行一个基于 +在本节,我们创建一个新的 Deployment。Deployment 中的 Pod 运行一个基于 `cluster-proportional-autoscaler-amd64` 镜像的容器。 创建文件 `dns-horizontal-autoscaler.yaml`,内容如下所示: @@ -188,11 +178,11 @@ DNS horizontal autoscaling is now enabled. DNS 水平自动扩缩在已经启用了。 -## 调优自动扩缩参数 {#tuning-autoscaling-parameters} +## 调优 DNS 自动扩缩参数 {#tuning-autoscaling-parameters} 验证 dns-autoscaler {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} 是否存在: @@ -232,7 +222,7 @@ linear: '{"coresPerReplica":256,"min":1,"nodesPerReplica":16}' @@ -240,12 +230,12 @@ calculated using this equation: 实际后端的数量通过使用如下公式来计算: ``` -replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) ) +replicas = max( ceil( cores × 1/coresPerReplica ) , ceil( nodes × 1/nodesPerReplica ) ) ``` -注意 `coresPerReplica` 和 `nodesPerReplica` 的值都是整数。 +注意 `coresPerReplica` 和 `nodesPerReplica` 的值都是浮点数。 背后的思想是,当一个集群使用具有很多核心的节点时,由 `coresPerReplica` 来控制。 当一个集群使用具有较少核心的节点时,由 `nodesPerReplica` 来控制。 @@ -285,7 +275,9 @@ This option works for all situations. Enter this command: kubectl scale deployment --replicas=0 dns-autoscaler --namespace=kube-system ``` - + 输出如下所示: ``` @@ -327,7 +319,9 @@ no one will re-create it: kubectl delete deployment dns-autoscaler --namespace=kube-system ``` - + 输出内容如下所示: ``` @@ -341,6 +335,7 @@ This option works if dns-autoscaler is under control of the (deprecated) [Addon Manager](https://git.k8s.io/kubernetes/cluster/addons/README.md), and you have write access to the master node. --> + ### 选项 3:从主控节点删除 dns-autoscaler 清单文件 如果 dns-autoscaler 在[插件管理器](https://git.k8s.io/kubernetes/cluster/addons/README.md) diff --git a/content/zh-cn/docs/tasks/administer-cluster/enable-disable-api.md b/content/zh-cn/docs/tasks/administer-cluster/enable-disable-api.md index c1eb5ec970902..42e98b3ab6563 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/enable-disable-api.md +++ b/content/zh-cn/docs/tasks/administer-cluster/enable-disable-api.md @@ -1,11 +1,13 @@ --- title: 启用/禁用 Kubernetes API content_type: task +weight: 200 --- @@ -40,7 +42,7 @@ The `runtime-config` command line argument also supports 2 special keys: - `api/legacy`, representing only legacy APIs. Legacy APIs are any APIs that have been explicitly [deprecated](/zh-cn/docs/reference/using-api/deprecation-policy/). -For example, to turning off all API versions except v1, pass `--runtime-config=api/all=false,api/v1=true` +For example, to turn off all API versions except v1, pass `--runtime-config=api/all=false,api/v1=true` to the `kube-apiserver`. --> - `api/all`:指所有已知的 API diff --git a/content/zh-cn/docs/tasks/administer-cluster/encrypt-data.md b/content/zh-cn/docs/tasks/administer-cluster/encrypt-data.md index b7ffebc4ac299..4a387565ebee8 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/encrypt-data.md +++ b/content/zh-cn/docs/tasks/administer-cluster/encrypt-data.md @@ -2,6 +2,7 @@ title: 静态加密 Secret 数据 content_type: task min-kubernetes-server-version: 1.13 +weight: 210 --- @@ -61,7 +63,7 @@ decrypt data stored in the etcd. {{< /caution >}} ## 理解静态数据加密 {#understanding-the-encryption-at-rest-configuration} @@ -154,7 +156,7 @@ Name | Encryption | Strength | Speed | Key Length | Other Considerations `secretbox` | XSalsa20 and Poly1305 | Strong | Faster | 32-byte | A newer standard and may not be considered acceptable in environments that require high levels of review. `aesgcm` | AES-GCM with random nonce | Must be rotated every 200k writes | Fastest | 16, 24, or 32-byte | Is not recommended for use except when an automated key rotation scheme is implemented. `aescbc` | AES-CBC with [PKCS#7](https://datatracker.ietf.org/doc/html/rfc2315) padding | Weak | Fast | 32-byte | Not recommended due to CBC's vulnerability to padding oracle attacks. -`kms` | Uses envelope encryption scheme: Data is encrypted by data encryption keys (DEKs) using AES-CBC with [PKCS#7](https://datatracker.ietf.org/doc/html/rfc2315) padding, DEKs are encrypted by key encryption keys (KEKs) according to configuration in Key Management Service (KMS) | Strongest | Fast | 32-bytes | The recommended choice for using a third party tool for key management. Simplifies key rotation, with a new DEK generated for each encryption, and KEK rotation controlled by the user. [Configure the KMS provider](/docs/tasks/administer-cluster/kms-provider/) +`kms` | Uses envelope encryption scheme: Data is encrypted by data encryption keys (DEKs) using AES-CBC with [PKCS#7](https://datatracker.ietf.org/doc/html/rfc2315) padding (prior to v1.25), using AES-GCM starting from v1.25, DEKs are encrypted by key encryption keys (KEKs) according to configuration in Key Management Service (KMS) | Strongest | Fast | 32-bytes | The recommended choice for using a third party tool for key management. Simplifies key rotation, with a new DEK generated for each encryption, and KEK rotation controlled by the user. [Configure the KMS provider](/docs/tasks/administer-cluster/kms-provider/) Each provider supports multiple keys - the keys are tried in order for decryption, and if the provider is the first provider, the first key is used for encryption. @@ -392,7 +394,7 @@ program to retrieve the contents of your secret data. 4. 通过 API 检索,验证 Secret 是否被正确解密: ```shell - kubectl describe secret secret1 -n default + kubectl get secret secret1 -n default -o yaml ``` 然后运行以下命令以强制解密所有 Secret: diff --git a/content/zh-cn/docs/tasks/administer-cluster/extended-resource-node.md b/content/zh-cn/docs/tasks/administer-cluster/extended-resource-node.md index 007c9021eb397..052f1b148cf45 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/extended-resource-node.md +++ b/content/zh-cn/docs/tasks/administer-cluster/extended-resource-node.md @@ -1,10 +1,12 @@ --- title: 为节点发布扩展资源 content_type: task +weight: 70 --- @@ -26,7 +28,6 @@ resources that would otherwise be unknown to Kubernetes. ## 获取你的节点名称 @@ -34,6 +35,9 @@ Choose one of your Nodes to use for this exercise. kubectl get nodes ``` + 选择一个节点用于此练习。 -{{< note >}} 在前面的请求中,`~1` 为 patch 路径中 “/” 符号的编码。 JSON-Patch 中的操作路径值被解析为 JSON 指针。 更多细节,请查看 [IETF RFC 6901](https://tools.ietf.org/html/rfc6901) 的第 3 节。 @@ -119,21 +123,25 @@ The output shows that the Node has a capacity of 4 dongles: "example.com/dongle": "4", ``` - + 描述你的节点: -```shell +``` kubectl describe node ``` - + 输出再次展示了 dongle 资源: ```yaml Capacity: - cpu: 2 - memory: 2049008Ki - example.com/dongle: 4 + cpu: 2 + memory: 2049008Ki + example.com/dongle: 4 ``` (你应该看不到任何输出) - ## {{% heading "whatsnext" %}} ### 针对应用开发人员 -* [将扩展资源分配给容器](/zh-cn/docs/tasks/configure-pod-container/extended-resource/) - -### 针对集群管理员 +- [将扩展资源分配给容器](/zh-cn/docs/tasks/configure-pod-container/extended-resource/) -* [为名字空间配置最小和最大内存约束](/zh-cn/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) -* [为名字空间配置最小和最大 CPU 约束](/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) +### 针对集群管理员 +- [为名字空间配置最小和最大内存约束](/zh-cn/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) +- [为名字空间配置最小和最大 CPU 约束](/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) diff --git a/content/zh-cn/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md b/content/zh-cn/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md index d5ce305d88add..5e9d7e6eafa79 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md +++ b/content/zh-cn/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md @@ -1,6 +1,7 @@ --- title: 关键插件 Pod 的调度保证 content_type: concept +weight: 220 --- diff --git a/content/zh-cn/docs/tasks/administer-cluster/ip-masq-agent.md b/content/zh-cn/docs/tasks/administer-cluster/ip-masq-agent.md index ce7835b23ea7c..89a68ea78c649 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/ip-masq-agent.md +++ b/content/zh-cn/docs/tasks/administer-cluster/ip-masq-agent.md @@ -1,10 +1,12 @@ --- title: IP Masquerade Agent 用户指南 content_type: task +weight: 230 --- @@ -171,7 +173,7 @@ You must also apply the appropriate node label to any nodes in your cluster that 你必须同时将适当的节点标签应用于集群中希望代理运行的任何节点。 ```shell -kubectl label nodes my-node beta.kubernetes.io/masq-agent-ds-ready=true +kubectl label nodes my-node node.kubernetes.io/masq-agent-ds-ready=true ``` + - 本页展示了如何配置密钥管理服务(Key Management Service,KMS)驱动和插件以启用 Secret 数据加密。 目前有两个 KMS API 版本。KMS v1 将继续工作,而 KMS v2 将开发得逐渐成熟。 如果你不确定要选用哪个 KMS API 版本,可选择 v1。 @@ -82,7 +83,7 @@ as the Kubernetes control plane, is responsible for all communication with the r KMS 加密驱动使用封套加密模型来加密 etcd 中的数据。 数据使用数据加密密钥(DEK)加密;每次加密都生成一个新的 DEK。 这些 DEK 经一个密钥加密密钥(KEK)加密后在一个远端的 KMS 中存储和管理。 -KMS 驱动使用 gRPC 与一个特定的 KMS 插件通信。这个 KMS 插件作为一个 gRPC +KMS 驱动使用 gRPC 与一个特定的 KMS 插件通信。这个 KMS 插件作为一个 gRPC 服务器被部署在 Kubernetes 控制平面的相同主机上,负责与远端 KMS 的通信。 1. 使用适合于 `kms` 驱动的属性创建一个新的 `EncryptionConfiguration` 文件,以加密 Secret 和 ConfigMap 等资源。 + 如果要加密使用 CustomResourceDefinition 定义的扩展 API,你的集群必须运行 Kubernetes v1.26 或更高版本。 2. 设置 kube-apiserver 的 `--encryption-provider-config` 参数指向配置文件的位置。 -3. 重启你的 API 服务器。 + +3. `--encryption-provider-config-automatic-reload` 布尔参数决定了磁盘内容发生变化时是否应自动重新加载 + 通过 `--encryption-provider-config` 设置的文件。这样可以在不重启 API 服务器的情况下进行密钥轮换。 + +4. 重启你的 API 服务器。 ### KMS v1 {#encrypting-your-data-with-the-kms-provider-kms-v1} @@ -340,6 +350,8 @@ To encrypt the data: resources: - resources: - secrets + - configmaps + - pandas.awesome.bears.example providers: - kms: name: myKmsPluginFoo @@ -361,6 +373,8 @@ To encrypt the data: resources: - resources: - secrets + - configmaps + - pandas.awesome.bears.example providers: - kms: apiVersion: v2 @@ -375,6 +389,46 @@ To encrypt the data: timeout: 3s ``` + +`--encryption-provider-config-automatic-reload` 设置为 `true` 会将所有健康检查集中到同一个健康检查端点。 +只有 KMS v1 驱动正使用且加密配置未被自动重新加载时,才能进行独立的健康检查。 + +下表总结了每个 KMS 版本的健康检查端点: + + +| KMS 配置 | 没有自动重新加载 | 有自动重新加载 | +| ------------ | ----------------------- | ------------------ | +| 仅 KMS v1 | Individual Healthchecks | Single Healthcheck | +| 仅 KMS v2 | Single Healthcheck | Single Healthcheck | +| KMS v1 和 v2 | Individual Healthchecks | Single Healthcheck | +| 没有 KMS | 无 | Single Healthcheck | + + +`Single Healthcheck` 意味着唯一的健康检查端点是 `/healthz/kms-providers`。 + +`Individual Healthchecks` 意味着每个 KMS 插件都有一个对应的健康检查端点, +并且这一端点基于插件在加密配置中的位置确定,例如 `/healthz/kms-provider-0`、`/healthz/kms-provider-1` 等。 + +这些健康检查端点路径是由服务器硬编码、生成并控制的。 +`Individual Healthchecks` 的索引序号对应于 KMS 加密配置被处理的顺序。 + ## 验证数据已经加密 {#verifying-that-the-data-is-encrypted} -写入 etcd 时数据被加密。重启 `kube-apiserver` 后,任何新建或更新的 Secret 在存储时应该已被加密。 -要验证这点,你可以用 `etcdctl` 命令行程序获取 Secret 内容。 +写入 etcd 时数据被加密。重启 `kube-apiserver` 后,所有新建或更新的 Secret 或在 +`EncryptionConfiguration` 中配置的其他资源类型在存储时应该已被加密。 +要验证这点,你可以用 `etcdctl` 命令行程序获取私密数据的内容。 + Secret 应包含 `mykey: mydata`。 此页还详述了如何安装若干不同的容器运行时,并将 `systemd` 设为其默认驱动。 @@ -62,12 +62,12 @@ kubeadm 支持在执行 `kubeadm init` 时,传递一个 `KubeletConfiguration` {{< note >}} 在版本 1.22 中,如果用户没有在 `KubeletConfiguration` 中设置 `cgroupDriver` 字段, -`kubeadm init` 会将它设置为默认值 `systemd`。 +`kubeadm` 会将它设置为默认值 `systemd`。 {{< /note >}} @@ -107,7 +107,6 @@ in a ConfigMap called `kubeadm-config` in the `kube-system` namespace. To change a particular option in the `ClusterConfiguration` you can edit the ConfigMap with this command: -The configuration is located under the `data.ClusterConfiguration` key. --> ### 应用集群配置更改 @@ -123,6 +122,9 @@ The configuration is located under the `data.ClusterConfiguration` key. kubectl edit cm -n kube-system kubeadm-config ``` + 配置位于 `data.ClusterConfiguration` 键下。 {{< note >}} @@ -170,7 +172,6 @@ Before proceeding with these changes, make sure you have backed up the directory 要编写新证书,你可以使用: @@ -179,6 +180,9 @@ To write new manifest files in `/etc/kubernetes/manifests` you can use: kubeadm init phase certs --config ``` + 要在 `/etc/kubernetes/manifests` 中编写新的清单文件,你可以使用: ```shell @@ -212,7 +216,6 @@ in a ConfigMap called `kubelet-config` in the `kube-system` namespace. You can edit the ConfigMap with this command: -The configuration is located under the `data.kubelet` key. --> ### 应用 kubelet 配置更改 @@ -227,6 +230,9 @@ The configuration is located under the `data.kubelet` key. kubectl edit cm -n kube-system kubelet-config ``` + 配置位于 `data.kubelet` 键下。 ### 应用 kube-proxy 配置更改 @@ -302,6 +307,9 @@ The configuration is located under the `data.config.conf` key. kubectl edit cm -n kube-system kube-proxy ``` + 配置位于 `data.config.conf` 键下。 #### 反映 kube-proxy 的更改 @@ -325,12 +330,18 @@ New Pods that use the updated ConfigMap will be created. kubectl get po -n kube-system | grep kube-proxy ``` + 使用以下命令删除 Pod: ```shell kubectl delete po -n kube-system ``` + 将创建使用更新的 ConfigMap 的新 Pod。 {{< note >}} @@ -373,7 +384,6 @@ Once the CoreDNS changes are applied you can delete the CoreDNS Pods: Obtain the Pod names: -Delete a Pod with: --> #### 反映 CoreDNS 的更改 @@ -385,6 +395,9 @@ Delete a Pod with: kubectl get po -n kube-system | grep coredns ``` + 使用以下命令删除 Pod: ```shell @@ -400,6 +413,7 @@ New Pods with the updated CoreDNS configuration will be created. kubeadm 不允许在集群创建和升级期间配置 CoreDNS。 这意味着如果执行了 `kubeadm upgrade apply`,你对 diff --git a/content/zh-cn/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md b/content/zh-cn/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md index 8ffed02fa258f..a0f05a43cdebf 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md +++ b/content/zh-cn/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md @@ -2,13 +2,13 @@ title: 升级 Windows 节点 min-kubernetes-server-version: 1.17 content_type: task -weight: 40 +weight: 50 --- @@ -16,10 +16,9 @@ weight: 40 {{< feature-state for_k8s_version="v1.18" state="beta" >}} -本页解释如何升级[用 kubeadm 创建的](/zh-cn/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes) -Windows 节点。 +本页解释如何升级用 kubeadm 创建的 Windows 节点。 ## {{% heading "prerequisites" %}} @@ -150,7 +149,8 @@ upgrade the control plane nodes before upgrading your Windows nodes. {{< note >}} 如果你是在 Pod 内的 HostProcess 容器中运行 kube-proxy,而不是作为 Windows 服务, 你可以通过应用更新版本的 kube-proxy 清单文件来升级 kube-proxy。 diff --git a/content/zh-cn/docs/tasks/administer-cluster/kubelet-credential-provider.md b/content/zh-cn/docs/tasks/administer-cluster/kubelet-credential-provider.md index c2e74eb2810a4..ec64d044e1bbb 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/kubelet-credential-provider.md +++ b/content/zh-cn/docs/tasks/administer-cluster/kubelet-credential-provider.md @@ -3,6 +3,7 @@ title: 配置 kubelet 镜像凭据提供程序 description: 配置 kubelet 的镜像凭据提供程序插件 content_type: task min-kubernetes-server-version: v1.26 +weight: 120 --- {{< feature-state for_k8s_version="v1.26" state="stable" >}} @@ -133,8 +135,8 @@ providers: # 当以下所有条件都为真时,镜像和 matchImage 之间存在匹配: # # - 两者都包含相同数量的域部分并且每个部分都匹配。 - # - imageMatch 的 URL 路径必须是目标镜像 URL 路径的前缀。 - # - 如果 imageMatch 包含端口,则该端口也必须在镜像中匹配。 + # - matchImages 的 URL 路径必须是目标镜像 URL 路径的前缀。 + # - 如果 matchImages 包含端口,则该端口也必须在镜像中匹配。 # # matchImages 的示例值: # @@ -218,7 +220,7 @@ A match exists between an image name and a `matchImage` entry when all of the be * Both contain the same number of domain parts and each part matches. * The URL path of match image must be a prefix of the target image URL path. -* If the imageMatch contains a port, then the port must match in the image as well. +* If the matchImages contains a port, then the port must match in the image as well. Some example values of `matchImages` patterns are: --> @@ -226,7 +228,7 @@ Some example values of `matchImages` patterns are: * 两者都包含相同数量的域部分并且每个部分都匹配。 * 匹配图片的 URL 路径必须是目标图片 URL 路径的前缀。 -* 如果 imageMatch 包含端口,则该端口也必须在镜像中匹配。 +* 如果 matchImages 包含端口,则该端口也必须在镜像中匹配。 `matchImages` 模式的一些示例值: diff --git a/content/zh-cn/docs/tasks/administer-cluster/kubelet-in-userns.md b/content/zh-cn/docs/tasks/administer-cluster/kubelet-in-userns.md index bc6d34c7426ea..4489013a94a18 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/kubelet-in-userns.md +++ b/content/zh-cn/docs/tasks/administer-cluster/kubelet-in-userns.md @@ -2,12 +2,14 @@ title: 以非 root 用户身份运行 Kubernetes 节点组件 content_type: task min-kubernetes-server-version: 1.22 +weight: 300 --- @@ -21,7 +23,7 @@ without root privileges, by using a {{< glossary_tooltip text="user namespace" t This technique is also known as _rootless mode_. {{< note >}} -This document describes how to run Kubernetes Node components (and hence pods) a non-root user. +This document describes how to run Kubernetes Node components (and hence pods) as a non-root user. If you are just looking for how to run a pod as a non-root user, see [SecurityContext](/docs/tasks/configure-pod-container/security-context/). {{< /note >}} @@ -318,6 +320,7 @@ the host with an external port forwarder, such as RootlessKit, slirp4netns, or You can use the port forwarder from K3s. See [Running K3s in Rootless Mode](https://rancher.com/docs/k3s/latest/en/advanced/#known-issues-with-rootless-mode) for more details. +The implementation can be found in [the `pkg/rootlessports` package](https://github.com/k3s-io/k3s/blob/v1.22.3+k3s1/pkg/rootlessports/controller.go) of k3s. ### Configuring CRI @@ -343,6 +346,7 @@ Pod 的网络命名空间可以使用常规的 CNI 插件配置。对于多节 你可以使用 K3s 的端口转发器。更多细节请参阅 [在 Rootless 模式下运行 K3s](https://rancher.com/docs/k3s/latest/en/advanced/#known-issues-with-rootless-mode)。 +该实现可以在 k3s 的 [`pkg/rootlessports` 包](https://github.com/k3s-io/k3s/blob/v1.22.3+k3s1/pkg/rootlessports/controller.go)中找到。 ### 配置 CRI @@ -355,8 +359,7 @@ kubelet 依赖于容器运行时。你需要部署一个容器运行时(例如 Running CRI plugin of containerd in a user namespace is supported since containerd 1.4. -Running containerd within a user namespace requires the following configurations -in `/etc/containerd/containerd-config.toml`. +Running containerd within a user namespace requires the following configurations. ```toml version = 2 @@ -379,6 +382,9 @@ version = 2 SystemdCgroup = false ``` +The default path of the configuration file is `/etc/containerd/config.toml`. +The path can be specified with `containerd -c /path/to/containerd/config.toml`. + {{% /tab %}} {{% tab name="CRI-O" %}} @@ -387,7 +393,7 @@ Running CRI-O in a user namespace is supported since CRI-O 1.22. CRI-O requires an environment variable `_CRIO_ROOTLESS=1` to be set. -The following configurations (in `/etc/crio/crio.conf`) are also recommended: +The following configurations are also recommended: ```toml [crio] @@ -401,6 +407,8 @@ The following configurations (in `/etc/crio/crio.conf`) are also recommended: cgroup_manager = "cgroupfs" ``` +The default path of the configuration file is `/etc/crio/crio.conf`. +The path can be specified with `crio --config /path/to/crio/crio.conf`. {{% /tab %}} {{< /tabs >}} --> @@ -410,7 +418,7 @@ The following configurations (in `/etc/crio/crio.conf`) are also recommended: containerd 1.4 开始支持在用户命名空间运行 containerd 的 CRI 插件。 -在用户命名空间运行 containerd 需要在 `/etc/containerd/containerd-config.toml` 文件包含以下配置: +在用户命名空间运行 containerd 必须进行如下配置: ```toml version = 2 @@ -432,7 +440,8 @@ version = 2 # (除非你在命名空间内运行了另一个 systemd) SystemdCgroup = false ``` - +配置文件的默认路径是 `/etc/containerd/config.toml`。 +可以用 `containerd -c /path/to/containerd/config.toml` 来指定该路径。 {{% /tab %}} {{% tab name="CRI-O" %}} @@ -441,7 +450,7 @@ CRI-O 1.22 开始支持在用户命名空间运行 CRI-O。 CRI-O 必须配置一个环境变量 `_CRIO_ROOTLESS=1`。 -也推荐使用 `/etc/crio/crio.conf` 文件内的以下配置: +也推荐使用以下配置: ```toml [crio] @@ -454,7 +463,8 @@ CRI-O 必须配置一个环境变量 `_CRIO_ROOTLESS=1`。 # (除非你在命名空间内运行了另一个 systemd) cgroup_manager = "cgroupfs" ``` - +配置文件的默认路径是 `/etc/containerd/config.toml`。 +可以用 `containerd -c /path/to/containerd/config.toml` 来指定该路径。 {{% /tab %}} {{< /tabs >}} diff --git a/content/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md b/content/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md index d0c84c6726b57..80df9a884bd6d 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md +++ b/content/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md @@ -211,12 +211,12 @@ kubectl delete pod constraints-cpu-demo --namespace=constraints-cpu-example ## 尝试创建一个超过最大 CPU 限制的 Pod -这里给出了包含一个容器的 Pod 的配置文件。容器声明了 500 millicpu 的 CPU +这里给出了包含一个容器的 Pod 清单。容器声明了 500 millicpu 的 CPU 请求和 1.5 CPU 的 CPU 限制。 {{< codenew file="admin/resource/cpu-constraints-pod-2.yaml" >}} @@ -273,7 +273,7 @@ enforced minimum: ``` Error from server (Forbidden): error when creating "examples/admin/resource/cpu-constraints-pod-3.yaml": -pods "constraints-cpu-demo-4" is forbidden: minimum cpu usage per Container is 200m, but request is 100m. +pods "constraints-cpu-demo-3" is forbidden: minimum cpu usage per Container is 200m, but request is 100m. ``` @@ -424,8 +424,8 @@ kubectl delete namespace constraints-cpu-example ### 集群管理员参考: * [为命名空间配置默认内存请求和限制](/zh-cn/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/) +* [为命名空间配置默认 CPU 请求和限制](/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/) * [为命名空间配置内存限制的最小值和最大值](/zh-cn/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) -* [为命名空间配置 CPU 限制的最小值和最大值](/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) * [为命名空间配置内存和 CPU 配额](/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/) * [为命名空间配置 Pod 配额](/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace/) * [为 API 对象配置配额](/zh-cn/docs/tasks/administer-cluster/quota-api-object/) diff --git a/content/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md b/content/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md index fd50963ca109a..eb75914401c51 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md +++ b/content/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md @@ -256,19 +256,24 @@ kubectl delete namespace quota-mem-cpu-example ### 集群管理员参考 * [为命名空间配置默认内存请求和限制](/zh-cn/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/) +* [为命名空间配置默认 CPU 请求和限制](/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/) * [为命名空间配置内存限制的最小值和最大值](/zh-cn/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) * [为命名空间配置 CPU 限制的最小值和最大值](/zh-cn/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) -* [为命名空间配置内存和 CPU 配额](/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/) * [为命名空间配置 Pod 配额](/zh-cn/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace/) * [为 API 对象配置配额](/zh-cn/docs/tasks/administer-cluster/quota-api-object/) diff --git a/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/_index.md b/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/_index.md index d2ac31c881b56..d405ad22fc87c 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/_index.md +++ b/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/_index.md @@ -1,24 +1,25 @@ --- title: 从 dockershim 迁移 -weight: 10 -content_type: task +weight: 20 +content_type: task no_list: true --- - - 本节提供从 dockershim 迁移到其他容器运行时的必备知识。 - Dockershim 在 Kubernetes v1.24 版本已经被移除。 @@ -39,13 +40,12 @@ Dockershim 在 Kubernetes v1.24 版本已经被移除。 建议你迁移到其他容器运行时或使用其他方法以获得 Docker 引擎支持。 -建议从 dockershim 迁移到其他替代的容器运行时。 请参阅[容器运行时](/zh-cn/docs/setup/production-environment/container-runtimes/) 一节以了解可用的备选项。 当在迁移过程中遇到麻烦,请[上报问题](https://github.com/kubernetes/kubernetes/issues)。 @@ -57,7 +57,7 @@ configuration. These tasks will help you to migrate: -* [Check whether Dockershim deprecation affects you](/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you/) +* [Check whether Dockershim removal affects you](/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you/) * [Migrate Docker Engine nodes from dockershim to cri-dockerd](/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd/) * [Migrating telemetry and security agents from dockershim](/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents/) --> @@ -65,7 +65,7 @@ These tasks will help you to migrate: 下面这些任务可以帮助你完成迁移: -* [检查弃用 Dockershim 是否影响到你](/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you/) +* [检查移除 Dockershim 是否影响到你](/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you/) * [将 Docker Engine 节点从 dockershim 迁移到 cri-dockerd](/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd/) * [从 dockershim 迁移遥测和安全代理](/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents/) @@ -73,11 +73,11 @@ These tasks will help you to migrate: @@ -86,4 +86,3 @@ These tasks will help you to migrate: dockershim 的弃用和删除的讨论。 * 如果你发现与 dockershim 迁移相关的缺陷或其他技术问题, 可以在 Kubernetes 项目[报告问题](https://github.com/kubernetes/kubernetes/issues/new/choose)。 - diff --git a/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you.md b/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you.md index 09541e7033199..3a9b66d4963ea 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you.md +++ b/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you.md @@ -1,14 +1,14 @@ --- title: 检查移除 Dockershim 是否对你有影响 content_type: task -weight: 20 +weight: 50 --- @@ -131,7 +131,9 @@ You can read about it in [Kubernetes Containerd integration goes GA](/blog/2018/ 你可以阅读博文 [Kubernetes 正式支持集成 Containerd](/blog/2018/05/24/kubernetes-containerd-integration-goes-ga/)。 - + ![Dockershim 和 Containerd CRI 的实现对比图](/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cri-containerd.png) @@ -171,7 +171,7 @@ nodes. 如果你将节点上的容器运行时从 Docker Engine 改变为 containerd,可在 diff --git a/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md b/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md index 6a5bf88d52df8..8ebb3bdc69542 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md +++ b/content/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md @@ -1,14 +1,14 @@ --- title: 从 dockershim 迁移遥测和安全代理 content_type: task -weight: 70 +weight: 60 --- @@ -16,7 +16,13 @@ weight: 70 {{% thirdparty-content %}} Kubernetes 对与 Docker Engine 直接集成的支持已被弃用且已经被删除。 大多数应用程序不直接依赖于托管容器的运行时。但是,仍然有大量的遥测和监控代理依赖 @@ -65,8 +71,8 @@ might run a command such as [`docker ps`](https://docs.docker.com/engine/reference/commandline/ps/) or [`docker top`](https://docs.docker.com/engine/reference/commandline/top/) to list containers and processes or [`docker logs`](https://docs.docker.com/engine/reference/commandline/logs/) -+to receive streamed logs. If nodes in your existing cluster use -+Docker Engine, and you switch to a different container runtime, +to receive streamed logs. If nodes in your existing cluster use +Docker Engine, and you switch to a different container runtime, these commands will not work any longer. --> 一些代理和 Docker 工具紧密绑定。比如代理会用到 @@ -164,6 +170,9 @@ Please contact the vendor to get up to date instructions for migrating from dock 提供了为各类遥测和安全代理供应商准备的持续更新的迁移指导。 请与供应商联系,获取从 dockershim 迁移的最新说明。 + ## 从 dockershim 迁移 {#migration-from-dockershim} ### [Aqua](https://www.aquasec.com) diff --git a/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md b/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md index 6331be657247f..440f1a7f8829f 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md +++ b/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md @@ -1,7 +1,7 @@ --- title: 使用 Romana 提供 NetworkPolicy content_type: task -weight: 40 +weight: 50 --- @@ -22,7 +22,7 @@ This page shows how to use Romana for NetworkPolicy. ## {{% heading "prerequisites" %}} 完成 [kubeadm 入门指南](/zh-cn/docs/reference/setup-tools/kubeadm/)中的 1、2、3 步。 diff --git a/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md b/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md index d27f901ff1f34..283fe06305abc 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md +++ b/content/zh-cn/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md @@ -1,7 +1,7 @@ --- title: 使用 Weave Net 提供 NetworkPolicy content_type: task -weight: 50 +weight: 60 --- diff --git a/content/zh-cn/docs/tasks/administer-cluster/nodelocaldns.md b/content/zh-cn/docs/tasks/administer-cluster/nodelocaldns.md index 70a7f05ceaee5..7ee9264f35c73 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/nodelocaldns.md +++ b/content/zh-cn/docs/tasks/administer-cluster/nodelocaldns.md @@ -1,13 +1,16 @@ --- title: 在 Kubernetes 集群中使用 NodeLocal DNSCache content_type: task +weight: 390 --- @@ -185,7 +188,7 @@ This feature can be enabled using the following steps: * If kube-proxy is running in IPTABLES mode: ``` bash - sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/,__PILLAR__DNS__SERVER__//g; s/__PILLAR__CLUSTER__DNS__/$kubedns/g" nodelocaldns.yaml + sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/__PILLAR__DNS__SERVER__/$kubedns/g" nodelocaldns.yaml ``` `__PILLAR__CLUSTER__DNS__` and `__PILLAR__UPSTREAM__SERVERS__` will be populated by @@ -207,7 +210,7 @@ This feature can be enabled using the following steps: * If kube-proxy is running in IPVS mode: ``` bash - sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/__PILLAR__DNS__SERVER__//g; s/__PILLAR__CLUSTER__DNS__/$kubedns/g" nodelocaldns.yaml + sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/,__PILLAR__DNS__SERVER__//g; s/__PILLAR__CLUSTER__DNS__/$kubedns/g" nodelocaldns.yaml ``` In this mode, the `node-local-dns` pods listen only on ``. @@ -284,13 +287,12 @@ In those cases, the `kube-dns` ConfigMap can be updated. `node-local-dns` Pod 使用内存来保存缓存项并处理查询。 -由于它们并不监视 Kubernetes 对象变化,集群规模或者 Service/Endpoints +由于它们并不监视 Kubernetes 对象变化,集群规模或者 Service/EndpointSlices 的数量都不会直接影响内存用量。内存用量会受到 DNS 查询模式的影响。 根据 [CoreDNS 文档](https://github.com/coredns/deployment/blob/master/kubernetes/Scaling_CoreDNS.md), diff --git a/content/zh-cn/docs/tasks/administer-cluster/safely-drain-node.md b/content/zh-cn/docs/tasks/administer-cluster/safely-drain-node.md index 9dcebb4c349ff..13db6267c53d7 100644 --- a/content/zh-cn/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/zh-cn/docs/tasks/administer-cluster/safely-drain-node.md @@ -2,6 +2,7 @@ title: 安全地清空一个节点 content_type: task min-kubernetes-server-version: 1.5 +weight: 310 --- @@ -116,9 +118,22 @@ Next, tell Kubernetes to drain the node: 接下来,告诉 Kubernetes 清空节点: ```shell -kubectl drain +kubectl drain --ignore-daemonsets ``` + +如果存在由 DaemonSet 管理的 Pod,你需要使用 `kubectl` 指定 `--ignore-daemonsets` 才能成功腾空节点。 +`kubectl drain` 子命令本身并不会实际腾空节点上的 DaemonSet Pod: +DaemonSet 控制器(控制平面的一部分)立即创建新的等效 Pod 替换丢失的 Pod。 +DaemonSet 控制器还会创建忽略不可调度污点的 Pod,这允许新的 Pod 启动到你正在腾空的节点上。 + +上面的清单需要通过 `--admission-control-config-file` 指定给 kube-apiserver。 +{{< /note >}} + {{< note >}} - - ## 局限性 {#limitations} -* 节点问题检测器只支持基于文件类型的内核日志。 - 它不支持像 journald 这样的命令行日志工具。 * 节点问题检测器使用内核日志格式来报告内核问题。 要了解如何扩展内核日志格式,请参阅[添加对另一个日志格式的支持](#support-other-log-format)。 - ## 启用节点问题检测器 一些云供应商将节点问题检测器以{{< glossary_tooltip text="插件" term_id="addons" >}}形式启用。 -你还可以使用 `kubectl` 或创建插件 Pod 来启用节点问题探测器。 +你还可以使用 `kubectl` 或创建插件 DaemonSet 来启用节点问题探测器。 - -## 使用 kubectl 启用节点问题检测器 {#using-kubectl} +### 使用 kubectl 启用节点问题检测器 {#using-kubectl} `kubectl` 提供了节点问题探测器最灵活的管理。 你可以覆盖默认配置使其适合你的环境或检测自定义节点问题。例如: - -### 使用插件 pod 启用节点问题检测器 {#using-addon-pod} +### 使用插件 Pod 启用节点问题检测器 {#using-addon-pod} 如果你使用的是自定义集群引导解决方案,不需要覆盖默认配置, 可以利用插件 Pod 进一步自动化部署。 @@ -125,25 +120,25 @@ directory `/etc/kubernetes/addons/node-problem-detector` on a control plane node 创建 `node-strick-detector.yaml`,并在控制平面节点上保存配置到插件 Pod 的目录 `/etc/kubernetes/addons/node-problem-detector`。 - ## 覆盖配置文件 构建节点问题检测器的 docker 镜像时,会嵌入 -[默认配置](https://github.com/kubernetes/node-problem-detector/tree/v0.1/config)。 +[默认配置](https://github.com/kubernetes/node-problem-detector/tree/v0.8.12/config)。 - 不过,你可以像下面这样使用 [`ConfigMap`](/zh-cn/docs/tasks/configure-pod-container/configure-pod-configmap/) 将其覆盖: - 1. 更改 `config/` 中的配置文件 1. 创建 `ConfigMap` `node-strick-detector-config`: - + ```shell kubectl create configmap node-problem-detector-config --from-file=config/ ``` 1. 更改 `node-problem-detector.yaml` 以使用 ConfigMap: - + {{< codenew file="debug/node-problem-detector-configmap.yaml" >}} 1. 使用新的配置文件重新创建节点问题检测器: - ```shell + ```shell # 如果你正在运行节点问题检测器,请先删除,然后再重新创建 kubectl delete -f https://k8s.io/examples/debug/node-problem-detector.yaml kubectl apply -f https://k8s.io/examples/debug/node-problem-detector-configmap.yaml ``` - -## 内核监视器 -*内核监视器(Kernel Monitor)* 是节点问题检测器中支持的系统日志监视器守护进程。 -内核监视器观察内核日志并根据预定义规则检测已知的内核问题。 +## 问题守护程序 - +- `SystemLogMonitor` 类型的守护程序根据预定义的规则监视系统日志并报告问题和指标。 + 你可以针对不同的日志源自定义配置如 +[filelog](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/config/kernel-monitor-filelog.json)、 +[kmsg](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/config/kernel-monitor.json)、 +[kernel](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/config/kernel-monitor-counter.json)、 +[abrt](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/config/abrt-adaptor.json) +和 [systemd](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/config/systemd-monitor-counter.json)。 + + -内核监视器根据 [`config/kernel-monitor.json`](https://github.com/kubernetes/node-problem-detector/blob/v0.1/config/kernel-monitor.json) -中的一组预定义规则列表匹配内核问题。 -规则列表是可扩展的,你始终可以通过覆盖配置来扩展它。 - -### 添加新的 NodeCondition -要支持新的 `NodeCondition`,请在 `config/kernel-monitor.json` 中的 -`conditions` 字段中创建一个条件定义: +- `CustomPluginMonitor` 类型的守护程序通过运行用户定义的脚本来调用和检查各种节点问题。 + 你可以使用不同的自定义插件监视器来监视不同的问题,并通过更新 + [配置文件](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/config/custom-plugin-monitor.json) + 来定制守护程序行为。 -```json -{ - "type": "NodeConditionType", - "reason": "CamelCaseDefaultNodeConditionReason", - "message": "arbitrary default node condition message" -} -``` + +- `HealthChecker` 类型的守护程序检查节点上的 kubelet 和容器运行时的健康状况。 - -### 检测新的问题 -你可以使用新的规则描述来扩展 `config/kernel-monitor.json` 中的 `rules` 字段以检测新问题: +### 增加对其他日志格式的支持 {#support-other-log-format} -```json -{ - "type": "temporary/permanent", - "condition": "NodeConditionOfPermanentIssue", - "reason": "CamelCaseShortReason", - "message": "regexp matching the issue in the kernel log" -} -``` +系统日志监视器目前支持基于文件的日志、journald 和 kmsg。 +可以通过实现一个新的 +[log watcher](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/pkg/systemlogmonitor/logwatchers/types/log_watcher.go) +来添加额外的日志源。 - -### 配置内核日志设备的路径 {#kernel-log-device-path} -检查你的操作系统(OS)发行版本中的内核日志路径位置。 -Linux 内核[日志设备](https://www.kernel.org/doc/documentation/abi/testing/dev-kmsg) -通常呈现为 `/dev/kmsg`。 -但是,日志路径位置因 OS 发行版本而异。 -`config/kernel-monitor.json` 中的 `log` 字段表示容器内的日志路径。 -你可以配置 `log` 字段以匹配节点问题检测器所示的设备路径。 +### 添加自定义插件监视器 - -### 添加对其它日志格式的支持 {#support-other-log-format} -内核监视器使用 -[`Translator`](https://github.com/kubernetes/node-problem-detector/blob/v0.1/pkg/kernelmonitor/translator.go) -插件转换内核日志的内部数据结构。 -你可以为新的日志格式实现新的转换器。 +## 导出器 + +导出器(Exporter)向特定后端报告节点问题和/或指标。 +支持下列导出器: + +- **Kubernetes exporter**: 此导出器向 Kubernetes API 服务器报告节点问题。 + 临时问题报告为事件,永久性问题报告为节点状况。 + +- **Prometheus exporter**: 此导出器在本地将节点问题和指标报告为 Prometheus(或 OpenMetrics)指标。 + 你可以使用命令行参数指定导出器的 IP 地址和端口。 + +- **Stackdriver exporter**: 此导出器向 Stackdriver Monitoring API 报告节点问题和指标。 + 可以使用[配置文件](https://github.com/kubernetes/node-problem-detector/blob/v0.8.12/config/exporter/stackdriver-exporter.json)自定义导出行为。 - ## 建议和限制 diff --git a/content/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md b/content/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md index 548dc88839054..ea2110b88322f 100644 --- a/content/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md +++ b/content/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md @@ -73,8 +73,7 @@ Adding a new version: 1. Pick a conversion strategy. Since custom resource objects need the ability to be served at both versions, that means they will sometimes be served in a - different version than the one stored. To make this possible, the custom - resource objects must sometimes be converted between the + different version than the one stored. To make this possible, the custom resource objects must sometimes be converted between the version they are stored at and the version they are served at. If the conversion involves schema changes and requires custom logic, a conversion webhook should be used. If there are no schema changes, the default `None` @@ -132,7 +131,7 @@ Removing an old version: 1. Set `served` to `false` for the old version in the `spec.versions` list. If any clients are still unexpectedly using the old version they may begin reporting errors attempting to access the custom resource objects at the old version. - If this occurs, switch back to using `served:true` on the old version, migrate the + If this occurs, switch back to using `served:true` on the old version, migrate the remaining clients to the new version and repeat this step. 1. Ensure the [upgrade of existing objects to the new stored version](#upgrade-existing-objects-to-a-new-stored-version) step has been completed. 1. Verify that the `storage` is set to `true` for the new version in the `spec.versions` list in the CustomResourceDefinition. @@ -532,9 +531,6 @@ spec: ## Webhook 转换 {#webhook-conversion} @@ -627,7 +623,7 @@ how to [authenticate API servers](/docs/reference/access-authn-authz/extensible- A conversion webhook must not mutate anything inside of `metadata` of the converted object other than `labels` and `annotations`. Attempted changes to `name`, `UID` and `namespace` are rejected and fail the request -which caused the conversion. All other changes are ignored. +which caused the conversion. All other changes are ignored. --> #### 被允许的变更 @@ -639,8 +635,10 @@ which caused the conversion. All other changes are ignored. ### 部署转换 Webhook 服务 {#deploy-the-conversion-webhook-service} @@ -842,7 +840,7 @@ API 服务器一旦确定请求应发送到转换 Webhook,它需要知道如 创建 apiextensions.k8s.io/v1beta1 定制资源定义时若未指定 @@ -1298,7 +1296,7 @@ If conversion fails, a webhook should return a `response` stanza containing the {{< warning >}} @@ -1383,7 +1381,7 @@ request depends on what is specified in the CRD's `spec.conversion`: -{{< feature-state for_k8s_version="v1.25" state="alpha" >}} +{{< feature-state for_k8s_version="v1.26" state="beta" >}} @@ -49,19 +49,6 @@ You should already be familiar with the basic use of [Job](/docs/concepts/worklo {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - -{{< note >}} - -因为这些特性还处于 Alpha 阶段,所以在准备 Kubernetes -集群时要启用两个[特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/): -`JobPodFailurePolicy` 和 `PodDisruptionConditions`。 -{{< /note >}} - #### 直接访问 REST API {#directly-accessing-the-rest-api} -在运行在 Pod 中时,可以通过 `default` 命名空间中的名为 `kubernetes` 的服务访问 -Kubernetes API 服务器。也就是说,Pod 可以使用 `kubernetes.default.svc` 主机名 -来查询 API 服务器。官方客户端库自动完成这个工作。 +在运行在 Pod 中时,你的容器可以通过获取 `KUBERNETES_SERVICE_HOST` 和 +`KUBERNETES_SERVICE_PORT_HTTPS` 环境变量为 Kubernetes API +服务器生成一个 HTTPS URL。 +API 服务器的集群内地址也发布到 `default` 命名空间中名为 `kubernetes` 的 Service 中, +从而 Pod 可以引用 `kubernetes.default.svc` 作为本地 API 服务器的 DNS 名称。 + +{{< note >}} + +Kubernetes 不保证 API 服务器具有主机名 `kubernetes.default.svc` 的有效证书; +但是,控制平面应该为 `$KUBERNETES_SERVICE_HOST` 代表的主机名或 IP 地址提供有效证书。 +{{< /note >}} ## 清理 {#clean-up} -运行 `kind delete cluster --name psa-with-cluster-pss` 和 -`kind delete cluster --name psa-wo-cluster-pss` 来删除你创建的集群。 +现在通过运行以下命令删除你上面创建的集群: + +```shell +kind delete cluster --name psa-with-cluster-pss +``` +```shell +kind delete cluster --name psa-wo-cluster-pss +``` ## {{% heading "whatsnext" %}} diff --git a/content/zh-cn/docs/tutorials/security/ns-level-pss.md b/content/zh-cn/docs/tutorials/security/ns-level-pss.md index 1487e87fb1093..faf20f35993bf 100644 --- a/content/zh-cn/docs/tutorials/security/ns-level-pss.md +++ b/content/zh-cn/docs/tutorials/security/ns-level-pss.md @@ -1,13 +1,13 @@ --- title: 在名字空间级别应用 Pod 安全标准 content_type: tutorial -weight: 10 +weight: 20 --- {{% alert title="Note" %}} @@ -224,11 +224,15 @@ with no warnings. ## 清理 {#clean-up} -运行 `kind delete cluster --name psa-ns-level` 删除创建的集群。 +现在通过运行以下命令删除你上面创建的集群: + +```shell +kind delete cluster --name psa-ns-level +``` ## {{% heading "whatsnext" %}} diff --git a/content/zh-cn/docs/tutorials/security/seccomp.md b/content/zh-cn/docs/tutorials/security/seccomp.md index 5db921b96cff1..6d4694378537b 100644 --- a/content/zh-cn/docs/tutorials/security/seccomp.md +++ b/content/zh-cn/docs/tutorials/security/seccomp.md @@ -1,7 +1,7 @@ --- title: 使用 seccomp 限制容器的系统调用 content_type: tutorial -weight: 20 +weight: 40 min-kubernetes-server-version: v1.22 --- @@ -424,6 +424,70 @@ docker exec -it kind-worker bash -c \ } ``` + +## 创建使用容器运行时默认 seccomp 配置文件的 Pod {#create-pod-that-uses-the-container-runtime-default-seccomp-profile} + +大多数容器运行时都提供了一组合理的、默认被允许或默认被禁止的系统调用。 +你可以通过将 Pod 或容器的安全上下文中的 seccomp 类型设置为 `RuntimeDefault` +来为你的工作负载采用这些默认值。 + +{{< note >}} + +如果你已经启用了 `SeccompDefault` [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/), +只要没有指定其他 seccomp 配置文件,那么 Pod 就会使用 `RuntimeDefault` seccomp 配置文件。 +否则,默认值为 `Unconfined`。 +{{< /note >}} + + +这是一个 Pod 的清单,它要求其所有容器使用 `RuntimeDefault` seccomp 配置文件: + +{{< codenew file="pods/security/seccomp/ga/default-pod.yaml" >}} + + +创建此 Pod: + +```shell +kubectl apply -f https://k8s.io/examples/pods/security/seccomp/ga/default-pod.yaml +``` + +```shell +kubectl get pod default-pod +``` + + +此 Pod 应该显示为已成功启动: + +``` +NAME READY STATUS RESTARTS AGE +default-pod 1/1 Running 0 20s +``` + + +最后,你看到一切正常之后,请清理: + +```shell +kubectl delete pod default-pod --wait --now +``` + -## 创建使用容器运行时默认 seccomp 配置文件的 Pod {#create-pod-that-uses-the-container-runtime-default-seccomp-profile} - -大多数容器运行时都提供了一组合理的默认系统调用,以及是否允许执行这些系统调用。 -你可以通过将 Pod 或容器的安全上下文中的 seccomp 类型设置为 `RuntimeDefault` -来为你的工作负载采用这些默认值。 - -{{< note >}} - -如果你已经启用了 `SeccompDefault` [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/), -只要没有指定其他 seccomp 配置文件,那么 Pod 就会使用 `SeccompDefault` 的 seccomp 配置文件。 -否则,默认值为 `Unconfined`。 -{{< /note >}} - - -这是一个 Pod 的清单,它要求其所有容器使用 `RuntimeDefault` seccomp 配置文件: - -{{< codenew file="pods/security/seccomp/ga/default-pod.yaml" >}} - - -创建此 Pod: - -```shell -kubectl apply -f https://k8s.io/examples/pods/security/seccomp/ga/default-pod.yaml -``` - -```shell -kubectl get pod default-pod -``` - - -此 Pod 应该显示为成功启动: - -``` -NAME READY STATUS RESTARTS AGE -default-pod 1/1 Running 0 20s -``` - - -最后,你看到一切正常之后,请清理: - -```shell -kubectl delete pod default-pod --wait --now -``` - ## {{% heading "whatsnext" %}} +要配置分配给 StatefulSet 中每个 Pod 的整数序号, +请参阅[起始序号](/zh-cn/docs/concepts/workloads/controllers/statefulset/#start-ordinal)。 +{{< /note >}} + @@ -1798,56 +1807,55 @@ Service: ```shell kubectl delete svc nginx ``` + 删除本教程中用到的 PersistentVolume 卷的持久化存储介质。 +```shell +kubectl get pvc +``` +``` +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +www-web-0 Bound pvc-2bf00408-d366-4a12-bad0-1869c65d0bee 1Gi RWO standard 25m +www-web-1 Bound pvc-ba3bfe9c-413e-4b95-a2c0-3ea8a54dbab4 1Gi RWO standard 24m +www-web-2 Bound pvc-cba6cfa6-3a47-486b-a138-db5930207eaf 1Gi RWO standard 15m +www-web-3 Bound pvc-0c04d7f0-787a-4977-8da3-d9d3a6d8d752 1Gi RWO standard 15m +www-web-4 Bound pvc-b2c73489-e70b-4a4e-9ec1-9eab439aa43e 1Gi RWO standard 14m +``` -+```shell -+kubectl get pvc -+``` -+``` -+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -+www-web-0 Bound pvc-2bf00408-d366-4a12-bad0-1869c65d0bee 1Gi RWO standard 25m -+www-web-1 Bound pvc-ba3bfe9c-413e-4b95-a2c0-3ea8a54dbab4 1Gi RWO standard 24m -+www-web-2 Bound pvc-cba6cfa6-3a47-486b-a138-db5930207eaf 1Gi RWO standard 15m -+www-web-3 Bound pvc-0c04d7f0-787a-4977-8da3-d9d3a6d8d752 1Gi RWO standard 15m -+www-web-4 Bound pvc-b2c73489-e70b-4a4e-9ec1-9eab439aa43e 1Gi RWO standard 14m -+``` -+ -+```shell -+kubectl get pv -+``` -+``` -+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -+pvc-0c04d7f0-787a-4977-8da3-d9d3a6d8d752 1Gi RWO Delete Bound default/www-web-3 standard 15m -+pvc-2bf00408-d366-4a12-bad0-1869c65d0bee 1Gi RWO Delete Bound default/www-web-0 standard 25m -+pvc-b2c73489-e70b-4a4e-9ec1-9eab439aa43e 1Gi RWO Delete Bound default/www-web-4 standard 14m -+pvc-ba3bfe9c-413e-4b95-a2c0-3ea8a54dbab4 1Gi RWO Delete Bound default/www-web-1 standard 24m -+pvc-cba6cfa6-3a47-486b-a138-db5930207eaf 1Gi RWO Delete Bound default/www-web-2 standard 15m -+``` -+ -+```shell -+kubectl delete pvc www-web-0 www-web-1 www-web-2 www-web-3 www-web-4 -+``` -+ -+``` -+persistentvolumeclaim "www-web-0" deleted -+persistentvolumeclaim "www-web-1" deleted -+persistentvolumeclaim "www-web-2" deleted -+persistentvolumeclaim "www-web-3" deleted -+persistentvolumeclaim "www-web-4" deleted -+``` -+ -+```shell -+kubectl get pvc -+``` -+ -+``` -+No resources found in default namespace. -+``` +```shell +kubectl get pv +``` +``` +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-0c04d7f0-787a-4977-8da3-d9d3a6d8d752 1Gi RWO Delete Bound default/www-web-3 standard 15m +pvc-2bf00408-d366-4a12-bad0-1869c65d0bee 1Gi RWO Delete Bound default/www-web-0 standard 25m +pvc-b2c73489-e70b-4a4e-9ec1-9eab439aa43e 1Gi RWO Delete Bound default/www-web-4 standard 14m +pvc-ba3bfe9c-413e-4b95-a2c0-3ea8a54dbab4 1Gi RWO Delete Bound default/www-web-1 standard 24m +pvc-cba6cfa6-3a47-486b-a138-db5930207eaf 1Gi RWO Delete Bound default/www-web-2 standard 15m +``` + +```shell +kubectl delete pvc www-web-0 www-web-1 www-web-2 www-web-3 www-web-4 +``` + +``` +persistentvolumeclaim "www-web-0" deleted +persistentvolumeclaim "www-web-1" deleted +persistentvolumeclaim "www-web-2" deleted +persistentvolumeclaim "www-web-3" deleted +persistentvolumeclaim "www-web-4" deleted +``` + +```shell +kubectl get pvc +``` +``` +No resources found in default namespace. +``` {{< note >}} -## Cherry Picks +## Cherry Pick 请遵循 [Cherry Pick 流程](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-release/cherry-picks.md)。 @@ -143,13 +143,12 @@ releases may also occur in between these. --> ## 未来发布的月度版本 {#upcoming-monthly-releases} -时间表可能会因错误修复的严重程度而有所不同,但为了便于规划,我们将针对以下每月发布点。 -计划外的关键版本也可能发生在这些版本之间。 +时间表可能会因错误修复的严重程度而有所不同,但为了便于规划,我们每月将按照以下时间点进行发布。 +中间可能会发布一些计划外的关键版本。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Backend Pod 1 - labels: app=MyApp - port: 9376 - - - - - - Backend Pod 2 - labels: app=MyApp - port: 9376 - - - - - - Backend Pod 3 - labels: app=MyApp - port: 9376 - - - - - - - - - - - - Client - - - - - - kube-proxy - - - - - - - apiserver - - - - - - clusterIP - (iptables) - - Node - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/images/docs/services-ipvs-overview.svg b/static/images/docs/services-ipvs-overview.svg index de745a764066e..d2c2f702d4611 100644 --- a/static/images/docs/services-ipvs-overview.svg +++ b/static/images/docs/services-ipvs-overview.svg @@ -1,121 +1,592 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Backend Pod 1 - - - - - - Backend Pod 2 - - - - - - Backend Pod 3 - - - - - - - - - - - - Client - - - - - - kube-proxy - - - - - - - apiserver - - - - - - clusterIP - (Virtual Server) - - Node - (Real Server) - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +