diff --git a/Makefile b/Makefile index 12b6f67d5ab5b..a6f987547be7a 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,7 @@ container-build: module-check $(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 $(CONTAINER_IMAGE) sh -c "npm ci && hugo --minify" container-serve: module-check ## Boot the development server using container. Run `make container-image` before this. - $(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir + $(CONTAINER_RUN) --cap-drop=ALL --cap-add=AUDIT_WRITE --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir test-examples: scripts/test_examples.sh install @@ -91,4 +91,4 @@ clean-api-reference: ## Clean all directories in API reference directory, preser api-reference: clean-api-reference ## Build the API reference pages. go needed cd api-ref-generator/gen-resourcesdocs && \ - go run cmd/main.go kwebsite --config-dir config/v1.20/ --file api/v1.20/swagger.json --output-dir ../../content/en/docs/reference/kubernetes-api --templates templates + go run cmd/main.go kwebsite --config-dir config/v1.21/ --file api/v1.21/swagger.json --output-dir ../../content/en/docs/reference/kubernetes-api --templates templates diff --git a/OWNERS b/OWNERS index f352793ec1bd7..9b12305b4b026 100644 --- a/OWNERS +++ b/OWNERS @@ -11,7 +11,7 @@ emeritus_approvers: # - jaredbhatti, commented out to disable PR assignments # - steveperry-53, commented out to disable PR assignments - stewart-yu -- zacharysarah +# - zacharysarah, commented out to disable PR assignments labels: - sig/docs diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 978be7ca3312e..b958f84bac368 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -3,13 +3,13 @@ aliases: - castrojo - kbarnard10 - onlydole - - zacharysarah - mrbobbytables sig-docs-blog-reviewers: # Reviewers for blog content - castrojo - kbarnard10 - mrbobbytables - onlydole + - sftim sig-docs-de-owners: # Admins for German content - bene2k1 - mkorbi @@ -32,7 +32,6 @@ aliases: - sftim - steveperry-53 - tengqm - - zacharysarah - zparnold sig-docs-en-reviews: # PR reviews for English content - bradtopol @@ -49,11 +48,9 @@ aliases: - zparnold sig-docs-es-owners: # Admins for Spanish content - raelga - - alexbrand + - electrocucaracha sig-docs-es-reviews: # PR reviews for Spanish content - raelga - - alexbrand - # glo-pena - electrocucaracha sig-docs-fr-owners: # Admins for French content - remyleone @@ -92,15 +89,21 @@ aliases: - daminisatya - mittalyashu sig-docs-id-owners: # Admins for Indonesian content + - ariscahyadi + - danninov - girikuncoro + - habibrosyad - irvifa + - phanama + - wahyuoi sig-docs-id-reviews: # PR reviews for Indonesian content + - ariscahyadi + - danninov - girikuncoro - habibrosyad - irvifa - - wahyuoi - phanama - - danninov + - wahyuoi sig-docs-it-owners: # Admins for Italian content - fabriziopandini - Fale @@ -169,14 +172,20 @@ aliases: # zhangxiaoyu-zidif sig-docs-pt-owners: # Admins for Portuguese content - femrtnz + - jailton - jcjesus - devlware - jhonmike + - rikatz + - yagonobre sig-docs-pt-reviews: # PR reviews for Portugese content - femrtnz + - jailton - jcjesus - devlware - jhonmike + - rikatz + - yagonobre sig-docs-vi-owners: # Admins for Vietnamese content - huynguyennovem - ngtuna diff --git a/README-de.md b/README-de.md index b570f43671591..b6f4491e70c9b 100644 --- a/README-de.md +++ b/README-de.md @@ -9,7 +9,7 @@ Herzlich willkommen! Dieses Repository enthält alle Assets, die zur Erstellung Sie können auf die Schaltfläche **Fork** im oberen rechten Bereich des Bildschirms klicken, um eine Kopie dieses Repositorys in Ihrem GitHub-Konto zu erstellen. Diese Kopie wird als *Fork* bezeichnet. Nehmen Sie die gewünschten Änderungen an Ihrem Fork vor. Wenn Sie bereit sind, diese Änderungen an uns zu senden, gehen Sie zu Ihrem Fork und erstellen Sie eine neue Pull-Anforderung, um uns darüber zu informieren. -Sobald Ihre Pull-Anfrage erstellt wurde, übernimmt ein Rezensent von Kubernetes die Verantwortung für klares, umsetzbares Feedback. Als Eigentümer des Pull-Request **liegt es in Ihrer Verantwortung Ihren Pull-Reqest enstsprechend des Feedbacks, dass Sie vom Kubernetes-Reviewer erhalten haben abzuändern.** Beachten Sie auch, dass Sie am Ende mehr als einen Rezensenten von Kubernetes erhalten, der Ihnen Feedback gibt, oder dass Sie Rückmeldungen von einem Rezensenten von Kubernetes erhalten, der sich von demjenigen unterscheidet, der ursprünglich für das Feedback zugewiesen wurde. In einigen Fällen kann es vorkommen, dass einer Ihrer Prüfer bei Bedarf eine technische Überprüfung von einem [Kubernetes Tech-Reviewer](https://github.com/kubernetes/website/wiki/tech-reviewers) anfordert. Reviewer geben ihr Bestes, um zeitnah Feedback zu geben, die Antwortzeiten können jedoch je nach den Umständen variieren. +Sobald Ihre Pull-Anfrage erstellt wurde, übernimmt ein Rezensent von Kubernetes die Verantwortung für klares, umsetzbares Feedback. Als Eigentümer des Pull-Request **liegt es in Ihrer Verantwortung Ihren Pull-Reqest entsprechend des Feedbacks, dass Sie vom Kubernetes-Reviewer erhalten haben abzuändern.** Beachten Sie auch, dass Sie am Ende mehr als einen Rezensenten von Kubernetes erhalten, der Ihnen Feedback gibt, oder dass Sie Rückmeldungen von einem Rezensenten von Kubernetes erhalten, der sich von demjenigen unterscheidet, der ursprünglich für das Feedback zugewiesen wurde. In einigen Fällen kann es vorkommen, dass einer Ihrer Prüfer bei Bedarf eine technische Überprüfung von einem [Kubernetes Tech-Reviewer](https://github.com/kubernetes/website/wiki/tech-reviewers) anfordert. Reviewer geben ihr Bestes, um zeitnah Feedback zu geben, die Antwortzeiten können jedoch je nach den Umständen variieren. Weitere Informationen zum Beitrag zur Kubernetes-Dokumentation finden Sie unter: diff --git a/README-es.md b/README-es.md index f5b1e870dddb3..3f007461259c7 100644 --- a/README-es.md +++ b/README-es.md @@ -30,6 +30,17 @@ El método recomendado para levantar una copia local del sitio web kubernetes.io > Si prefiere levantar el sitio web sin utilizar **Docker**, puede seguir las instrucciones disponibles en la sección [Levantando kubernetes.io en local con Hugo](#levantando-kubernetesio-en-local-con-hugo). +**`Nota`: Para el procedimiento de construir una imagen de Docker e iniciar el servidor.** +El sitio web de Kubernetes utiliza Docsy Hugo theme. Se sugiere que se instale si aún no se ha hecho, los **submódulos** y otras dependencias de herramientas de desarrollo ejecutando el siguiente comando de `git`: + +```bash +# pull de los submódulos del repositorio +git submodule update --init --recursive --depth 1 + +``` + +Si identifica que `git` reconoce una cantidad innumerable de cambios nuevos en el proyecto, la forma más simple de solucionarlo es cerrando y volviendo a abrir el proyecto en el editor. Los submódulos son automáticamente detectados por `git`, pero los plugins usados por los editores pueden tener dificultades para ser cargados. + Una vez tenga Docker [configurado en su máquina](https://www.docker.com/get-started), puede construir la imagen de Docker `kubernetes-hugo` localmente ejecutando el siguiente comando en la raíz del repositorio: ```bash @@ -73,4 +84,4 @@ La participación en la comunidad de Kubernetes está regulada por el [Código d Kubernetes es posible gracias a la participación de la comunidad y la documentación es vital para facilitar el acceso al proyecto. -Agradecemos muchísimo sus contribuciones a nuestro sitio web y nuestra documentación. \ No newline at end of file +Agradecemos muchísimo sus contribuciones a nuestro sitio web y nuestra documentación. diff --git a/README-pl.md b/README-pl.md index 7d89d518cb5f4..5426aef445ca8 100644 --- a/README-pl.md +++ b/README-pl.md @@ -1,60 +1,45 @@ # Dokumentacja projektu Kubernetes -[![Build Status](https://api.travis-ci.org/kubernetes/website.svg?branch=master)](https://travis-ci.org/kubernetes/website) -[![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) - -Witamy! +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) W tym repozytorium znajdziesz wszystko, czego potrzebujesz do zbudowania [strony internetowej Kubernetesa wraz z dokumentacją](https://kubernetes.io/). Bardzo nam miło, że chcesz wziąć udział w jej współtworzeniu! -## Twój wkład w dokumentację ++ [Twój wkład w dokumentację](#twój-wkład-w-dokumentację) ++ [Informacje o wersjach językowych](#informacje-o-wersjach-językowych) -Możesz kliknąć w przycisk **Fork** w prawym górnym rogu ekranu, aby stworzyć kopię tego repozytorium na swoim koncie GitHub. Taki rodzaj kopii (odgałęzienia) nazywa się *fork*. Zmieniaj w nim, co chcesz, a kiedy będziesz już gotowy/a przesłać te zmiany do nas, przejdź do swojej kopii i stwórz nowy *pull request*, abyśmy zostali o tym poinformowani. +# Jak używać tego repozytorium -Po stworzeniu *pull request*, jeden z recenzentów projektu Kubernetes podejmie się przekazania jasnych wskazówek pozwalających podjąć następne działania. Na Tobie, jako właścicielu *pull requesta*, **spoczywa odpowiedzialność za wprowadzenie poprawek zgodnie z uwagami recenzenta.** Może też się zdarzyć, że swoje uwagi zgłosi więcej niż jeden recenzent, lub że recenzję będzie robił ktoś inny, niż ten, kto został przydzielony na początku. W niektórych przypadkach, jeśli zajdzie taka potrzeba, recenzent może poprosić dodatkowo o recenzję jednego z [recenzentów technicznych](https://github.com/kubernetes/website/wiki/Tech-reviewers). Recenzenci zrobią wszystko, aby odpowiedzieć sprawnie, ale konkretny czas odpowiedzi zależy od wielu czynników. +Możesz uruchomić serwis lokalnie poprzez Hugo (Extended version) lub ze środowiska kontenerowego. Zdecydowanie zalecamy korzystanie z kontenerów, bo dzięki temu lokalna wersja będzie spójna z tym, co jest na oficjalnej stronie. -Więcej informacji na temat współpracy przy tworzeniu dokumentacji znajdziesz na stronach: +## Wymagania wstępne -* [Jak rozpocząć współpracę](https://kubernetes.io/docs/contribute/start/) -* [Podgląd wprowadzanych zmian w dokumentacji](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) -* [Szablony stron](https://kubernetes.io/docs/contribute/style/page-content-types/) -* [Styl pisania dokumentacji](http://kubernetes.io/docs/contribute/style/style-guide/) -* [Lokalizacja dokumentacji Kubernetes](https://kubernetes.io/docs/contribute/localization/) - -## Różne wersje językowe `README.md` +Aby móc skorzystać z tego repozytorium, musisz lokalnie zainstalować: -| | | -|----------------------------------------|----------------------------------------| -| [README po angielsku](README.md) | [README po francusku](README-fr.md) | -| [README po koreańsku](README-ko.md) | [README po niemiecku](README-de.md) | -| [README po portugalsku](README-pt.md) | [README w hindi](README-hi.md) | -| [README po hiszpańsku](README-es.md) | [README po indonezyjsku](README-id.md) | -| [README po chińsku](README-zh.md) | [README po japońsku](README-ja.md) | -| [README po wietnamsku](README-vi.md) | [README po rosyjsku](README-ru.md) | -| [README po włosku](README-it.md) | [README po ukraińsku](README-uk.md) | -| | | +- [npm](https://www.npmjs.com/) +- [Go](https://golang.org/) +- [Hugo (Extended version)](https://gohugo.io/) +- Środowisko obsługi kontenerów, np. [Docker-a](https://www.docker.com/). -## Jak uruchomić lokalną kopię strony przy pomocy Dockera? +Przed rozpoczęciem zainstaluj niezbędne zależności. Sklonuj repozytorium i przejdź do odpowiedniego katalogu: -Zalecaną metodą uruchomienia serwisu internetowego Kubernetesa lokalnie jest użycie specjalnego obrazu [Dockera](https://docker.com), który zawiera generator stron statycznych [Hugo](https://gohugo.io). +``` +git clone https://github.com/kubernetes/website.git +cd website +``` -> Użytkownicy Windows będą potrzebowali dodatkowych narzędzi, które mogą zainstalować przy pomocy [Chocolatey](https://chocolatey.org). +Strona Kubernetesa używa [Docsy Hugo theme](https://github.com/google/docsy#readme). Nawet jeśli planujesz uruchomić serwis w środowisku kontenerowym, zalecamy pobranie podmodułów i innych zależności za pomocą polecenia: -```bash -choco install make +``` +# pull in the Docsy submodule +git submodule update --init --recursive --depth 1 ``` -> Jeśli wolisz uruchomić serwis lokalnie bez Dockera, przeczytaj [jak uruchomić serwis lokalnie przy pomocy Hugo](#jak-uruchomić-lokalną-kopię-strony-przy-pomocy-hugo) poniżej. +## Uruchomienie serwisu w kontenerze -Jeśli [zainstalowałeś i uruchomiłeś](https://www.docker.com/get-started) już Dockera, zbuduj obraz `kubernetes-hugo` lokalnie: +Aby zbudować i uruchomić serwis wewnątrz środowiska kontenerowego, wykonaj następujące polecenia: -```bash -make container-image ``` - -Po zbudowaniu obrazu, możesz uruchomić serwis lokalnie: - -```bash +make container-image make container-serve ``` @@ -62,29 +47,106 @@ Aby obejrzeć zawartość serwisu otwórz w przeglądarce adres http://localhost ## Jak uruchomić lokalną kopię strony przy pomocy Hugo? -Zajrzyj do [oficjalnej dokumentacji Hugo](https://gohugo.io/getting-started/installing/) po instrukcję instalacji. Upewnij się, że instalujesz rozszerzoną wersję Hugo, określoną przez zmienną środowiskową `HUGO_VERSION` w pliku [`netlify.toml`](netlify.toml#L9). +Upewnij się, że zainstalowałeś odpowiednią wersję Hugo "extended", określoną przez zmienną środowiskową `HUGO_VERSION` w pliku [`netlify.toml`](netlify.toml#L10). -Aby uruchomić serwis lokalnie po instalacji Hugo, napisz: +Aby uruchomić i przetestować serwis lokalnie, wykonaj: ```bash +# install dependencies +npm ci make serve ``` Zostanie uruchomiony lokalny serwer Hugo na porcie 1313. Otwórz w przeglądarce adres http://localhost:1313, aby obejrzeć zawartość serwisu. Po każdej zmianie plików źródłowych, Hugo automatycznie aktualizuje stronę i odświeża jej widok w przeglądarce. -## Społeczność, listy dyskusyjne, uczestnictwo i wsparcie +## Budowanie dokumentacji źródłowej API + +Budowanie dokumentacji źródłowej API zostało opisane w [angielskiej wersji pliku README.md](README.md#building-the-api-reference-pages). + +## Rozwiązywanie problemów +### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version + +Z przyczyn technicznych, Hugo jest rozprowadzany w dwóch wersjach. Aktualny serwis używa tylko wersji **Hugo Extended**. Na stronie z [wydaniami](https://github.com/gohugoio/hugo/releases) poszukaj archiwum z `extended` w nazwie. Dla potwierdzenia, uruchom `hugo version` i poszukaj słowa `extended`. + +### Błąd w środowisku macOS: "too many open files" + +Jeśli po uruchomieniu `make serve` na macOS widzisz następujący błąd: + +``` +ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files +make: *** [serve] Error 1 +``` + +sprawdź aktualny limit otwartych plików: -Zajrzyj na stronę [społeczności](http://kubernetes.io/community/), aby dowiedzieć się, jak możesz zaangażować się w jej działania. +`launchctl limit maxfiles` + +Uruchom następujące polecenia: (na podstawie https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c): + +```shell +#!/bin/sh + +# These are the original gist links, linking to my gists now. +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist + +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxfiles.plist +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxproc.plist + +sudo mv limit.maxfiles.plist /Library/LaunchDaemons +sudo mv limit.maxproc.plist /Library/LaunchDaemons + +sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +sudo chown root:wheel /Library/LaunchDaemons/limit.maxproc.plist + +sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist +``` + +Przedstawiony sposób powinien działać dla MacOS w wersji Catalina i Mojave. + + +# Zaangażowanie w prace SIG Docs + +O społeczności SIG Docs i terminach spotkań dowiesz z [jej strony](https://github.com/kubernetes/community/tree/master/sig-docs#meetings). Możesz kontaktować się z gospodarzami projektu za pomocą: -* [Komunikatora Slack](https://kubernetes.slack.com/messages/sig-docs) -* [List dyskusyjnych](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) +- [Komunikatora Slack](https://kubernetes.slack.com/messages/sig-docs) [Tutaj możesz dostać zaproszenie do tej grupy Slack-a](https://slack.k8s.io/) +- [List dyskusyjnych](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) + +# Twój wkład w dokumentację + +Możesz kliknąć w przycisk **Fork** w prawym górnym rogu ekranu, aby stworzyć kopię tego repozytorium na swoim koncie GitHub. Taki rodzaj kopii (odgałęzienia) nazywa się *fork*. Zmieniaj w nim, co chcesz, a kiedy będziesz już gotowy/a przesłać te zmiany do nas, przejdź do swojej kopii i stwórz nowy *pull request*, abyśmy zostali o tym poinformowani. + +Po stworzeniu *pull request*, jeden z recenzentów projektu Kubernetes podejmie się przekazania jasnych wskazówek pozwalających podjąć następne działania. Na Tobie, jako właścicielu *pull requesta*, **spoczywa odpowiedzialność za wprowadzenie poprawek zgodnie z uwagami recenzenta.** + +Może też się zdarzyć, że swoje uwagi zgłosi więcej niż jeden recenzent, lub że recenzję będzie robił ktoś inny, niż ten, kto został przydzielony na początku. + +W niektórych przypadkach, jeśli zajdzie taka potrzeba, recenzent może poprosić dodatkowo o recenzję jednego z [recenzentów technicznych](https://github.com/kubernetes/website/wiki/Tech-reviewers). Recenzenci zrobią wszystko, aby odpowiedzieć sprawnie, ale konkretny czas odpowiedzi zależy od wielu czynników. + +Więcej informacji na temat współpracy przy tworzeniu dokumentacji znajdziesz na stronach: + +* [Udział w rozwijaniu dokumentacji](https://kubernetes.io/docs/contribute/) +* [Rodzaje stron](https://kubernetes.io/docs/contribute/style/page-content-types/) +* [Styl pisania dokumentacji](http://kubernetes.io/docs/contribute/style/style-guide/) +* [Lokalizacja dokumentacji Kubernetes](https://kubernetes.io/docs/contribute/localization/) + +# Różne wersje językowe `README.md` + +| Język | Język | +|---|---| +| [angielski](README.md) | [francuski](README-fr.md) | +| [koreański](README-ko.md) | [niemiecki](README-de.md) | +| [portugalski](README-pt.md) | [hindi](README-hi.md) | +| [hiszpański](README-es.md) | [indonezyjski](README-id.md) | +| [chiński](README-zh.md) | [japoński](README-ja.md) | +| [wietnamski](README-vi.md) | [rosyjski](README-ru.md) | +| [włoski](README-it.md) | [ukraiński](README-uk.md) | -### Zasady postępowania +# Zasady postępowania -Udział w działaniach społeczności Kubernetes jest regulowany przez [Kodeks postępowania](code-of-conduct.md). +Udział w działaniach społeczności Kubernetesa jest regulowany przez [Kodeks postępowania CNCF](https://github.com/cncf/foundation/blob/master/code-of-conduct-languages/pl.md). -## Dziękujemy! +# Dziękujemy! Kubernetes rozkwita dzięki zaangażowaniu społeczności — doceniamy twój wkład w tworzenie naszego serwisu i dokumentacji! diff --git a/README-pt.md b/README-pt.md index 3154b77bab6f2..e27bf544d158f 100644 --- a/README-pt.md +++ b/README-pt.md @@ -1,76 +1,193 @@ # A documentação do Kubernetes -[![Build Status](https://api.travis-ci.org/kubernetes/website.svg?branch=master)](https://travis-ci.org/kubernetes/website) -[![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) -Bem vindos! Este repositório abriga todos os recursos necessários para criar o [site e documentação do Kubernetes](https://kubernetes.io/). Estamos muito satisfeitos por você querer contribuir! +Bem-vindos! Este repositório contém todos os recursos necessários para criar o [website e documentação do Kubernetes](https://kubernetes.io/). Estamos muito satisfeitos por você querer contribuir! -## Contribuindo com os documentos +# Utilizando este repositório -Você pode clicar no botão **Fork** na área superior direita da tela para criar uma cópia desse repositório na sua conta do GitHub. Esta cópia é chamada de *fork*. Faça as alterações desejadas no seu fork e, quando estiver pronto para enviar as alterações para nós, vá até o fork e crie uma nova solicitação de pull para nos informar sobre isso. +Você pode executar o website localmente utilizando o Hugo (versão Extended), ou você pode executa-ló em um container runtime. É altamente recomendável utilizar um container runtime, pois garante a consistência na implantação do website real. -Depois que seu **pull request** for criado, um revisor do Kubernetes assumirá a responsabilidade de fornecer um feedback claro e objetivo. Como proprietário do pull request, **é sua responsabilidade modificar seu pull request para abordar o feedback que foi fornecido a você pelo revisor do Kubernetes.** Observe também que você pode acabar tendo mais de um revisor do Kubernetes para fornecer seu feedback ou você pode acabar obtendo feedback de um revisor do Kubernetes que é diferente daquele originalmente designado para lhe fornecer feedback. Além disso, em alguns casos, um de seus revisores pode solicitar uma revisão técnica de um [revisor de tecnologia Kubernetes](https://github.com/kubernetes/website/wiki/Tech-reviewers) quando necessário. Os revisores farão o melhor para fornecer feedback em tempo hábil, mas o tempo de resposta pode variar de acordo com as circunstâncias. +## Pré-requisitos -Para mais informações sobre como contribuir com a documentação do Kubernetes, consulte: +Para usar este repositório, você precisa instalar: -* [Comece a contribuir](https://kubernetes.io/docs/contribute/start/) -* [Preparando suas alterações na documentação](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) -* [Usando Modelos de Página](http://kubernetes.io/docs/contribute/style/page-templates/) -* [Guia de Estilo da Documentação](http://kubernetes.io/docs/contribute/style/style-guide/) -* [Localizando documentação do Kubernetes](https://kubernetes.io/docs/contribute/localization/) +- [npm](https://www.npmjs.com/) +- [Go](https://golang.org/) +- [Hugo (versão Extended)](https://gohugo.io/) +- Um container runtime, por exemplo [Docker](https://www.docker.com/). -Você pode contactar os mantenedores da localização em Português em: - -* Felipe ([GitHub - @femrtnz](https://github.com/femrtnz)) -* [Slack channel](https://kubernetes.slack.com/messages/kubernetes-docs-pt) +Antes de você iniciar, instale as dependências, clone o repositório e navegue até o diretório: -## Executando o site localmente usando o Docker +``` +git clone https://github.com/kubernetes/website.git +cd website +``` -A maneira recomendada de executar o site do Kubernetes localmente é executar uma imagem especializada do [Docker](https://docker.com) que inclui o gerador de site estático [Hugo](https://gohugo.io). +O website do Kubernetes utiliza o [tema Docsy Hugo](https://github.com/google/docsy#readme). Mesmo se você planeje executar o website em um container, é altamente recomendado baixar os submódulos e outras dependências executando o seguinte comando: -> Se você está rodando no Windows, você precisará de mais algumas ferramentas que você pode instalar com o [Chocolatey](https://chocolatey.org). `choco install make` +``` +# Baixar o submódulo Docsy +git submodule update --init --recursive --depth 1 +``` -> Se você preferir executar o site localmente sem o Docker, consulte [Executando o site localmente usando o Hugo](#executando-o-site-localmente-usando-o-hugo) abaixo. +## Executando o website usando um container -Se você tiver o Docker [em funcionamento](https://www.docker.com/get-started), crie a imagem do Docker do `kubernetes-hugo` localmente: +Para executar o build do website em um container, execute o comando abaixo para criar a imagem do container e executa-lá: -```bash +``` make container-image +make container-serve ``` -Depois que a imagem foi criada, você pode executar o site localmente: +Abra seu navegador em http://localhost:1313 para visualizar o website. Conforme você faz alterações nos arquivos fontes, o Hugo atualiza o website e força a atualização do navegador. + +## Executando o website localmente utilizando o Hugo + +Consulte a [documentação oficial do Hugo](https://gohugo.io/getting-started/installing/) para instruções de instalação do Hugo. Certifique-se de instalar a versão do Hugo especificada pela variável de ambiente `HUGO_VERSION` no arquivo [`netlify.toml`](netlify.toml#L9). + +Para executar o build e testar o website localmente, execute: ```bash +# instalar dependências +npm ci +make serve +``` + +Isso iniciará localmente o Hugo na porta 1313. Abra o seu navegador em http://localhost:1313 para visualizar o website. Conforme você faz alterações nos arquivos fontes, o Hugo atualiza o website e força uma atualização no navegador. + +## Construindo a página de referência da API + +A página de referência da API localizada em `content/en/docs/reference/kubernetes-api` é construída a partir da especificação do Swagger utilizando https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs. + +Siga os passos abaixo para atualizar a página de referência para uma nova versão do Kubernetes: + +OBS: modifique o "v1.20" no exemplo a seguir pela versão a ser atualizada + +1. Obter o submódulo `kubernetes-resources-reference`: + +``` +git submodule update --init --recursive --depth 1 +``` + +2. Criar a nova versão da API no submódulo e adicionar à especificação do Swagger: + +``` +mkdir api-ref-generator/gen-resourcesdocs/api/v1.20 +curl 'https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/openapi-spec/swagger.json' > api-ref-generator/gen-resourcesdocs/api/v1.20/swagger.json +``` + +3. Copiar o sumário e os campos de configuração para a nova versão a partir da versão anterior: + +``` +mkdir api-ref-generator/gen-resourcesdocs/api/v1.20 +cp api-ref-generator/gen-resourcesdocs/api/v1.19/* api-ref-generator/gen-resourcesdocs/api/v1.20/ +``` + +4. Ajustar os arquivos `toc.yaml` e `fields.yaml` para refletir as mudanças entre as duas versões. + +5. Em seguida, gerar as páginas: + +``` +make api-reference +``` + +Você pode validar o resultado localmente gerando e disponibilizando o site a partir da imagem do container: + +``` +make container-image make container-serve ``` -Abra seu navegador para http://localhost:1313 para visualizar o site. Conforme você faz alterações nos arquivos de origem, Hugo atualiza o site e força a atualização do navegador. +Abra o seu navegador em http://localhost:1313/docs/reference/kubernetes-api/ para visualizar a página de referência da API. -## Executando o site localmente usando o Hugo +6. Quando todas as mudanças forem refletidas nos arquivos de configuração `toc.yaml` e `fields.yaml`, crie um pull request com a nova página de referência de API. -Veja a [documentação oficial do Hugo](https://gohugo.io/getting-started/installing/) para instruções de instalação do Hugo. Certifique-se de instalar a versão do Hugo especificada pela variável de ambiente `HUGO_VERSION` no arquivo [`netlify.toml`](netlify.toml#L9). +## Troubleshooting +### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version -Para executar o site localmente quando você tiver o Hugo instalado: +Por motivos técnicos, o Hugo é disponibilizado em dois conjuntos de binários. O website atual funciona apenas na versão **Hugo Extended**. Na [página de releases](https://github.com/gohugoio/hugo/releases) procure por arquivos com `extended` no nome. Para confirmar, execute `hugo version` e procure pela palavra `extended`. -```bash -make serve +### Troubleshooting macOS for too many open files + +Se você executar o comando `make serve` no macOS e retornar o seguinte erro: + +``` +ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files +make: *** [serve] Error 1 ``` -Isso iniciará o servidor Hugo local na porta 1313. Abra o navegador para http://localhost:1313 para visualizar o site. Conforme você faz alterações nos arquivos de origem, Hugo atualiza o site e força a atualização do navegador. +Verifique o limite atual para arquivos abertos: -## Comunidade, discussão, contribuição e apoio +`launchctl limit maxfiles` -Aprenda a se envolver com a comunidade do Kubernetes na [página da comunidade](http://kubernetes.io/community/). +Em seguida, execute os seguintes comandos (adaptado de https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c): -Você pode falar com os mantenedores deste projeto: +```shell +#!/bin/sh -- [Slack](https://kubernetes.slack.com/messages/sig-docs) +# Esse são os links do gist original, vinculados ao meu gists agora. +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist + +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxfiles.plist +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxproc.plist + +sudo mv limit.maxfiles.plist /Library/LaunchDaemons +sudo mv limit.maxproc.plist /Library/LaunchDaemons + +sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +sudo chown root:wheel /Library/LaunchDaemons/limit.maxproc.plist + +sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist +``` + +Esta solução funciona tanto para o MacOS Catalina quanto para o MacOS Mojave. + +### Erro de "Out of Memory" + +Se você executar o comando `make container-serve` e retornar o seguinte erro: +``` +make: *** [container-serve] Error 137 +``` + +Verifique a quantidade de memória disponível para o agente de execução de contêiner. No caso do Docker Desktop para macOS, abra o menu "Preferences..." -> "Resources..." e tente disponibilizar mais memória. + +# Comunidade, discussão, contribuição e apoio + +Saiba mais sobre a comunidade Kubernetes SIG Docs e reuniões na [página da comunidade](http://kubernetes.io/community/). + +Você também pode entrar em contato com os mantenedores deste projeto em: + +- [Slack](https://kubernetes.slack.com/messages/sig-docs) ([Obter o convide para o este slack](https://slack.k8s.io/)) - [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) -### Código de conduta +# Contribuindo com os documentos + +Você pode clicar no botão **Fork** na área superior direita da tela para criar uma cópia desse repositório na sua conta do GitHub. Esta cópia é chamada de *fork*. Faça as alterações desejadas no seu fork e, quando estiver pronto para enviar as alterações para nós, vá até o fork e crie um novo **pull request** para nos informar sobre isso. + +Depois que seu **pull request** for criado, um revisor do Kubernetes assumirá a responsabilidade de fornecer um feedback claro e objetivo. Como proprietário do pull request, **é sua responsabilidade modificar seu pull request para atender ao feedback que foi fornecido a você pelo revisor do Kubernetes.** + +Observe também que você pode acabar tendo mais de um revisor do Kubernetes para fornecer seu feedback ou você pode acabar obtendo feedback de um outro revisor do Kubernetes diferente daquele originalmente designado para lhe fornecer o feedback. + +Além disso, em alguns casos, um de seus revisores pode solicitar uma revisão técnica de um [revisor técnico do Kubernetes](https://github.com/kubernetes/website/wiki/Tech-reviewers) quando necessário. Os revisores farão o melhor para fornecer feedbacks em tempo hábil, mas o tempo de resposta pode variar de acordo com as circunstâncias. + +Para mais informações sobre como contribuir com a documentação do Kubernetes, consulte: + +* [Contribua com a documentação do Kubernetes](https://kubernetes.io/docs/contribute/) +* [Tipos de conteúdo de página](https://kubernetes.io/docs/contribute/style/page-content-types/) +* [Guia de Estilo da Documentação](http://kubernetes.io/docs/contribute/style/style-guide/) +* [Localizando documentação do Kubernetes](https://kubernetes.io/docs/contribute/localization/) + +Você pode contatar os mantenedores da localização em Português em: + +* Felipe ([GitHub - @femrtnz](https://github.com/femrtnz)) +* [Slack channel](https://kubernetes.slack.com/messages/kubernetes-docs-pt) + +# Código de conduta A participação na comunidade Kubernetes é regida pelo [Código de Conduta da Kubernetes](code-of-conduct.md). -## Obrigado! +# Obrigado! -O Kubernetes conta com a participação da comunidade e nós realmente agradecemos suas contribuições para o nosso site e nossa documentação! +O Kubernetes prospera com a participação da comunidade e nós realmente agradecemos suas contribuições para o nosso website e nossa documentação! \ No newline at end of file diff --git a/README.md b/README.md index 44dcc7a7ca679..d548cd32aab7a 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,8 @@ make container-image make container-serve ``` +If you see errors, it probably means that the hugo container did not have enough computing resources available. To solve it, increase the amount of allowed CPU and memory usage for Docker on your machine ([MacOSX](https://docs.docker.com/docker-for-mac/#resources) and [Windows](https://docs.docker.com/docker-for-windows/#resources)). + Open up your browser to http://localhost:1313 to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh. ## Running the website locally using Hugo @@ -100,6 +102,8 @@ make container-image make container-serve ``` +In a web browser, go to http://localhost:1313/docs/reference/kubernetes-api/ to view the API reference. + 6. When all changes of the new contract are reflected into the configuration files `toc.yaml` and `fields.yaml`, create a Pull Request with the newly generated API reference pages. ## Troubleshooting diff --git a/api-ref-generator b/api-ref-generator index ce97454e557b2..78e64febda1b5 160000 --- a/api-ref-generator +++ b/api-ref-generator @@ -1 +1 @@ -Subproject commit ce97454e557b2b164f77326cb06ef619ab623599 +Subproject commit 78e64febda1b53cafc79979c5978b42162cea276 diff --git a/assets/scss/_base.scss b/assets/scss/_base.scss index b1b112cb389c3..4113b49beed39 100644 --- a/assets/scss/_base.scss +++ b/assets/scss/_base.scss @@ -869,3 +869,22 @@ body.td-documentation { display: none; } } + +// nav-tabs and tab-content +.nav-tabs { + border-bottom: none !important; +} + +.td-content .tab-content .highlight { + margin: 0; +} + +.tab-pane { + border-radius: 0.25rem; + padding: 0 16px 16px; + + border: 1px solid #dee2e6; + &:first-of-type.active { + border-top-left-radius: 0; + } +} diff --git a/assets/scss/_custom.scss b/assets/scss/_custom.scss index 568a258a1c5e1..29a8e1ecf829a 100644 --- a/assets/scss/_custom.scss +++ b/assets/scss/_custom.scss @@ -639,3 +639,10 @@ body.td-documentation { } } +.td-content { + table code { + background-color: inherit !important; + color: inherit !important; + font-size: inherit !important; + } +} diff --git a/config.toml b/config.toml index d77c315331d32..e1ef9db0149b0 100644 --- a/config.toml +++ b/config.toml @@ -91,7 +91,7 @@ blog = "/:section/:year/:month/:day/:slug/" [outputs] home = [ "HTML", "RSS", "HEADERS" ] page = [ "HTML"] -section = [ "HTML"] +section = [ "HTML", "print" ] # Add a "text/netlify" media type for auto-generating the _headers file [mediaTypes] @@ -138,10 +138,10 @@ time_format_default = "January 02, 2006 at 3:04 PM PST" description = "Production-Grade Container Orchestration" showedit = true -latest = "v1.20" +latest = "v1.21" -fullversion = "v1.20.0" -version = "v1.20" +fullversion = "v1.21.0" +version = "v1.21" githubbranch = "master" docsbranch = "master" deprecated = false @@ -178,40 +178,40 @@ js = [ ] [[params.versions]] -fullversion = "v1.20.0" -version = "v1.20" -githubbranch = "v1.20.0" +fullversion = "v1.21.0" +version = "v1.21" +githubbranch = "v1.21.0" docsbranch = "master" url = "https://kubernetes.io" [[params.versions]] -fullversion = "v1.19.4" +fullversion = "v1.20.5" +version = "v1.20" +githubbranch = "v1.20.5" +docsbranch = "release-1.20" +url = "https://v1-20.docs.kubernetes.io" + +[[params.versions]] +fullversion = "v1.19.9" version = "v1.19" -githubbranch = "v1.19.4" +githubbranch = "v1.19.9" docsbranch = "release-1.19" url = "https://v1-19.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.18.12" +fullversion = "v1.18.17" version = "v1.18" -githubbranch = "v1.18.12" +githubbranch = "v1.18.17" docsbranch = "release-1.18" url = "https://v1-18.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.17.14" +fullversion = "v1.17.17" version = "v1.17" -githubbranch = "v1.17.14" +githubbranch = "v1.17.17" docsbranch = "release-1.17" url = "https://v1-17.docs.kubernetes.io" -[[params.versions]] -fullversion = "v1.16.15" -version = "v1.16" -githubbranch = "v1.16.15" -docsbranch = "release-1.16" -url = "https://v1-16.docs.kubernetes.io" - # User interface configuration [params.ui] @@ -399,15 +399,15 @@ time_format_blog = "02.01.2006" # A list of language codes to look for untranslated content, ordered from left to right. language_alternatives = ["en"] -[languages.pt] +[languages.pt-br] title = "Kubernetes" description = "Orquestração de contêineres em nível de produção" languageName ="Português" weight = 9 -contentDir = "content/pt" +contentDir = "content/pt-br" languagedirection = "ltr" -[languages.pt.params] +[languages.pt-br.params] time_format_blog = "02.01.2006" # A list of language codes to look for untranslated content, ordered from left to right. language_alternatives = ["en"] diff --git a/content/de/docs/concepts/architecture/nodes.md b/content/de/docs/concepts/architecture/nodes.md index b790e68035be7..933346a4d5dfb 100644 --- a/content/de/docs/concepts/architecture/nodes.md +++ b/content/de/docs/concepts/architecture/nodes.md @@ -147,7 +147,8 @@ Die zweite ist, die interne Node-Liste des Node Controllers mit der Liste der ve Wenn ein Node in einer Cloud-Umgebung ausgeführt wird und sich in einem schlechten Zustand befindet, fragt der Node Controller den Cloud-Anbieter, ob die virtuelle Maschine für diesen Node noch verfügbar ist. Wenn nicht, löscht der Node Controller den Node aus seiner Node-Liste. Der dritte ist die Überwachung des Zustands der Nodes. Der Node Controller ist dafür verantwortlich, -die NodeReady-Bedingung von NodeStatus auf ConditionUnknown zu aktualisieren, wenn ein wenn ein Node unerreichbar wird (der Node Controller empfängt aus irgendeinem Grund keine Herzschläge mehr, z.B. weil der Node heruntergefahren ist) und später alle Pods aus dem Node zu entfernen (und diese ordnungsgemäss zu beenden), wenn der Node weiterhin unzugänglich ist. (Die Standard-Timeouts sind 40s, um ConditionUnknown zu melden und 5 Minuten, um mit der Evakuierung der Pods zu beginnen). +die NodeReady-Bedingung von NodeStatus auf ConditionUnknown zu aktualisieren, wenn ein Node unerreichbar wird (der Node Controller empfängt aus irgendeinem Grund keine Herzschläge mehr, z.B. weil der Node heruntergefahren ist) und später alle Pods aus dem Node zu entfernen (und diese ordnungsgemäss zu beenden), wenn der Node weiterhin unzugänglich ist. (Die Standard-Timeouts sind 40s, um ConditionUnknown zu melden und 5 Minuten, um mit der Evakuierung der Pods zu beginnen). + Der Node Controller überprüft den Zustand jedes Nodes alle `--node-monitor-period` Sekunden. diff --git a/content/de/docs/concepts/workloads/pods/_index.md b/content/de/docs/concepts/workloads/pods/_index.md new file mode 100644 index 0000000000000..956190e6c760b --- /dev/null +++ b/content/de/docs/concepts/workloads/pods/_index.md @@ -0,0 +1,369 @@ +--- +title: Pods +content_type: concept +weight: 10 +no_list: true +card: + name: concepts + weight: 60 +--- + + + +_Pods_ sind die kleinsten einsetzbaren Einheiten, die in Kubernetes +erstellt und verwaltet werden können. + +Ein _Pod_ (übersetzt Gruppe/Schote, wie z. B. eine Gruppe von Walen oder eine +Erbsenschote) ist eine Gruppe von einem oder mehreren +{{< glossary_tooltip text="Containern" term_id="container" >}} mit gemeinsam +genutzten Speicher- und Netzwerkressourcen und einer Spezifikation für die +Ausführung der Container. Die Ressourcen eines Pods befinden sich immer auf dem +gleichen (virtuellen) Server, werden gemeinsam geplant und in einem +gemeinsamen Kontext ausgeführt. Ein Pod modelliert einen anwendungsspezifischen +"logischen Server": Er enthält eine oder mehrere containerisierte Anwendungen, +die relativ stark voneinander abhängen. +In Nicht-Cloud-Kontexten sind Anwendungen, die auf +demselben physischen oder virtuellen Server ausgeführt werden, vergleichbar zu +Cloud-Anwendungen, die auf demselben logischen Server ausgeführt werden. + +Ein Pod kann neben Anwendungs-Containern auch sogenannte +[Initialisierungs-Container](/docs/concepts/workloads/pods/init-containers/) +enthalten, die beim Starten des Pods ausgeführt werden. +Es können auch +kurzlebige/[ephemere Container](/docs/concepts/workloads/pods/ephemeral-containers/) +zum Debuggen gestartet werden, wenn dies der Cluster anbietet. + + + +## Was ist ein Pod? + +{{< note >}} +Obwohl Kubernetes abgesehen von [Docker](https://www.docker.com/) auch andere +{{}} unterstützt, ist Docker am bekanntesten und + es ist hilfreich, Pods mit der Terminologie von Docker zu beschreiben. +{{< /note >}} + +Der gemeinsame Kontext eines Pods besteht aus einer Reihe von Linux-Namespaces, +Cgroups und möglicherweise anderen Aspekten der Isolation, also die gleichen +Dinge, die einen Dockercontainer isolieren. Innerhalb des Kontexts eines Pods +können die einzelnen Anwendungen weitere Unterisolierungen haben. + +Im Sinne von Docker-Konzepten ähnelt ein Pod einer Gruppe von Docker-Containern, +die gemeinsame Namespaces und Dateisystem-Volumes nutzen. + +## Pods verwenden + +Normalerweise müssen keine Pods erzeugt werden, auch keine Singleton-Pods. +Stattdessen werden sie mit Workload-Ressourcen wie {{}} oder {{}} erzeugt. Für Pods, die von einem Systemzustand +abhängen, ist die Nutzung von {{}}-Ressourcen zu erwägen. + +Pods in einem Kubernetes-Cluster werden hauptsächlich auf zwei Arten verwendet: + +* **Pods, die einen einzelnen Container ausführen**. Das +"Ein-Container-per-Pod"-Modell ist der häufigste Kubernetes-Anwendungsfall. In +diesem Fall kannst du dir einen einen Pod als einen Behälter vorstellen, der einen +einzelnen Container enthält; Kubernetes verwaltet die Pods anstatt die +Container direkt zu verwalten. +* **Pods, in denen mehrere Container ausgeführt werden, die zusammenarbeiten +müssen**. Wenn eine Softwareanwendung aus co-lokaliserten Containern besteht, +die sich gemeinsame Ressourcen teilen und stark voneinander abhängen, kann ein +Pod die Container verkapseln. +Diese Container bilden eine einzelne zusammenhängende +Serviceeinheit, z. B. ein Container, der Daten in einem gemeinsam genutzten +Volume öffentlich verfügbar macht, während ein separater _Sidecar_-Container +die Daten aktualisiert. Der Pod fasst die Container, die Speicherressourcen +und eine kurzlebiges Netzwerk-Identität als eine Einheit zusammen. + +{{< note >}} +Das Gruppieren mehrerer gemeinsam lokalisierter und gemeinsam verwalteter +Container in einem einzigen Pod ist ein relativ fortgeschrittener +Anwendungsfall. Du solltest diese Architektur nur in bestimmten Fällen +verwenden, wenn deine Container stark voneinander abhängen. +{{< /note >}} + +Jeder Pod sollte eine einzelne Instanz einer gegebenen Anwendung ausführen. Wenn +du deine Anwendung horizontal skalieren willst (um mehr Instanzen auszuführen +und dadurch mehr Gesamtressourcen bereitstellen), solltest du mehrere Pods +verwenden, einen für jede Instanz. +In Kubernetes wird dies typischerweise als Replikation bezeichnet. +Replizierte Pods werden normalerweise als eine Gruppe durch eine +Workload-Ressource und deren +{{}} erstellt +und verwaltet. + +Der Abschnitt [Pods und Controller](#pods-und-controller) beschreibt, wie +Kubernetes Workload-Ressourcen und deren Controller verwendet, um Anwendungen +zu skalieren und zu heilen. + +### Wie Pods mehrere Container verwalten + +Pods unterstützen mehrere kooperierende Prozesse (als Container), die eine +zusammenhängende Serviceeinheit bilden. Kubernetes plant und stellt automatisch +sicher, dass sich die Container in einem Pod auf demselben physischen oder +virtuellen Server im Cluster befinden. Die Container können Ressourcen und +Abhängigkeiten gemeinsam nutzen, miteinander kommunizieren und +ferner koordinieren wann und wie sie beendet werden. + +Zum Beispiel könntest du einen Container haben, der als Webserver für Dateien in +einem gemeinsamen Volume arbeitet. Und ein separater "Sidecar" -Container +aktualisiert die Daten von einer externen Datenquelle, siehe folgenden +Abbildung: + +{{< figure src="/images/docs/pod.svg" alt="Pod-Beispieldiagramm" width="50%" >}} + +Einige Pods haben sowohl {{}} als auch {{}}. +Initialisierungs-Container werden gestartet und beendet bevor die +Anwendungs-Container gestartet werden. + +Pods stellen standardmäßig zwei Arten von gemeinsam Ressourcen für die +enthaltenen Container bereit: +[Netzwerk](#pod-netzwerk) und [Speicher](#datenspeicherung-in-pods). + + +## Mit Pods arbeiten + +Du wirst selten einzelne Pods direkt in Kubernetes erstellen, selbst +Singleton-Pods. Das liegt daran, dass Pods als relativ kurzlebige +Einweg-Einheiten konzipiert sind. Wann Ein Pod erstellt wird (entweder direkt +von Ihnen oder indirekt von einem +{{}}), wird die +Ausführung auf einem {{}} in Ihrem Cluster +geplant. Der Pod bleibt auf diesem (virtuellen) Server, bis entweder der Pod die +Ausführung beendet hat, das Pod-Objekt gelöscht wird, der Pod aufgrund +mangelnder Ressourcen *evakuiert* wird oder oder der Node ausfällt. + +{{< note >}} +Das Neustarten eines Containers in einem Pod sollte nicht mit dem Neustarten +eines Pods verwechselt werden. Ein Pod ist kein Prozess, sondern eine Umgebung +zur Ausführung von Containern. Ein Pod bleibt bestehen bis er gelöscht wird. +{{< /note >}} + +Stelle beim Erstellen des Manifests für ein Pod-Objekt sicher, dass der +angegebene Name ein gültiger +[DNS-Subdomain-Name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) +ist. + +### Pods und Controller + +Mit Workload-Ressourcen kannst du mehrere Pods erstellen und verwalten. Ein +Controller für die Ressource kümmert sich um Replikation, Roll-Out sowie +automatische Wiederherstellung im Fall von versagenden Pods. Wenn beispielsweise ein Node +ausfällt, bemerkt ein Controller, dass die Pods auf dem Node nicht mehr laufen +und plant die Ausführung eines Ersatzpods auf einem funktionierenden Node. +Hier sind einige Beispiele für Workload-Ressourcen, die einen oder mehrere Pods +verwalten: + +* {{< glossary_tooltip text="Deployment" term_id="deployment" >}} +* {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} +* {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}} + +### Pod Vorlagen + +Controller für +{{}}-Ressourcen +erstellen Pods von einer _Pod Vorlage_ und verwalten diese Pods für dich. + +Pod Vorlagen sind Spezifikationen zum Erstellen von Pods und sind in +Workload-Ressourcen enthalten wie z. B. +[Deployments](/docs/concepts/workloads/controllers/deployment/), +[Jobs](/docs/concepts/workloads/controllers/job/), and +[DaemonSets](/docs/concepts/workloads/controllers/daemonset/). + +Jeder Controller für eine Workload-Ressource verwendet die Pod Vorlage innerhalb +des Workload-Objektes, um Pods zu erzeugen. Die Pod Vorlage ist Teil des +gewünschten Zustands der Workload-Ressource, mit der du deine Anwendung +ausgeführt hast. + +Das folgende Beispiel ist ein Manifest für einen einfachen Job mit einer +`Vorlage`, die einen Container startet. Der Container in diesem Pod druckt +eine Nachricht und pausiert dann. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hello +spec: + template: + # Dies is the Pod Vorlage + spec: + containers: + - name: hello + image: busybox + command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600'] + restartPolicy: OnFailure + # Die Pod Vorlage endet hier +``` +Das Ändern der Pod Vorlage oder der Wechsel zu einer neuen Pod Vorlage hat keine +direkten Auswirkungen auf bereits existierende Pods. Wenn du die Pod Vorlage für +eine Workload-Ressource änderst, dann muss diese Ressource die Ersatz-Pods +erstellen, welche die aktualisierte Vorlage verwenden. + +Beispielsweise stellt der StatefulSet-Controller sicher, dass für jedes +StatefulSet-Objekt die ausgeführten Pods mit der aktueller Pod Vorlage +übereinstimmen. Wenn du das StatefulSet bearbeitest und die Vorlage änderst, +beginnt das StatefulSet mit der Erstellung neuer Pods basierend auf der +aktualisierten Vorlage. Schließlich werden alle alten Pods durch neue Pods +ersetzt, und das Update ist abgeschlossen. + +Jede Workload-Ressource implementiert eigenen Regeln für die Umsetzung von +Änderungen der Pod Vorlage. Wenn du mehr über StatefulSet erfahren möchtest, +dann lese die Seite +[Update-Strategien](/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) +im Tutorial StatefulSet Basics. + + +Auf Nodes beobachtet oder verwaltet das +{{< glossary_tooltip term_id="kubelet" text="Kubelet" >}} +nicht direkt die Details zu Pod Vorlagen und Updates. Diese Details sind +abstrahiert. Die Abstraktion und Trennung von Aufgaben vereinfacht die +Systemsemantik und ermöglicht so das Verhalten des Clusters zu ändern ohne +vorhandenen Code zu ändern. + +## Pod Update und Austausch + +Wie im vorherigen Abschnitt erwähnt, erstellt der Controller neue Pods basierend +auf der aktualisierten Vorlage, wenn die Pod Vorlage für eine Workload-Ressource +geändert wird anstatt die vorhandenen Pods zu aktualisieren oder zu patchen. + +Kubernetes hindert dich nicht daran, Pods direkt zu verwalten. Es ist möglich, +einige Felder eines laufenden Pods zu aktualisieren. Allerdings haben +Pod-Aktualisierungsvorgänge wie zum Beispiel +[`patch`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#patch-pod-v1-core), +und +[`replace`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#replace-pod-v1-core) +einige Einschränkungen: + +- Die meisten Metadaten zu einem Pod können nicht verändert werden. Zum Beispiel kannst + du nicht die Felder `namespace`, `name`, `uid`, oder `creationTimestamp` + ändern. Das `generation`-Feld muss eindeutig sein. Es werden nur Aktualisierungen + akzeptiert, die den Wert des Feldes inkrementieren. +- Wenn das Feld `metadata.deletionTimestamp` gesetzt ist, kann kein neuer + Eintrag zur Liste `metadata.finalizers` hinzugefügt werden. +- Pod-Updates dürfen keine Felder ändern, die Ausnahmen sind + `spec.containers[*].image`, + `spec.initContainers[*].image`,` spec.activeDeadlineSeconds` oder + `spec.tolerations`. Für `spec.tolerations` kannnst du nur neue Einträge + hinzufügen. +- Für `spec.activeDeadlineSeconds` sind nur zwei Änderungen erlaubt: + + 1. ungesetztes Feld in eine positive Zahl + 1. positive Zahl in eine kleinere positive Zahl, die nicht negativ ist + +## Gemeinsame Nutzung von Ressourcen und Kommunikation + +Pods ermöglichen den Datenaustausch und die Kommunikation zwischen den +Containern, die im Pod enthalten sind. + +### Datenspeicherung in Pods + +Ein Pod kann eine Reihe von gemeinsam genutzten Speicher- +{{}} spezifizieren. Alle +Container im Pod können auf die gemeinsamen Volumes zugreifen und dadurch Daten +austauschen. Volumes ermöglichen auch, dass Daten ohne Verlust gespeichert +werden, falls einer der Container neu gestartet werden muss. +Im Kapitel [Datenspeicherung](/docs/concepts/storage/) findest du weitere +Informationen, wie Kubernetes gemeinsam genutzten Speicher implementiert und +Pods zur Verfügung stellt. + +### Pod-Netzwerk + +Jedem Pod wird für jede Adressenfamilie eine eindeutige IP-Adresse zugewiesen. +Jeder Container in einem Pod nutzt den gemeinsamen Netzwerk-Namespace, +einschließlich der IP-Adresse und der Ports. In einem Pod (und **nur** dann) +können die Container, die zum Pod gehören, über `localhost` miteinander +kommunizieren. Wenn Container in einem Pod mit Entitäten *außerhalb des Pods* +kommunizieren, müssen sie koordinieren, wie die gemeinsam genutzten +Netzwerkressourcen (z. B. Ports) verwenden werden. Innerhalb eines Pods teilen +sich Container eine IP-Adresse und eine Reihe von Ports und können sich +gegenseitig über `localhost` finden. Die Container in einem Pod können auch die +üblichen Kommunikationsverfahren zwischen Prozessen nutzen, wie z. B. +SystemV-Semaphoren oder "POSIX Shared Memory". Container in verschiedenen Pods +haben unterschiedliche IP-Adressen und können nicht per IPC ohne +[spezielle Konfiguration](/docs/concepts/policy/pod-security-policy/) +kommunizieren. Container, die mit einem Container in einem anderen Pod +interagieren möchten, müssen IP Netzwerke verwenden. + +Für die Container innerhalb eines Pods stimmt der "hostname" mit dem +konfigurierten `Namen` des Pods überein. Mehr dazu im Kapitel +[Netzwerke](/docs/concepts/cluster-administration/networking/). + +## Privilegierter Modus für Container + +Jeder Container in einem Pod kann den privilegierten Modus aktivieren, indem +das Flag `privileged` im +[Sicherheitskontext](/docs/tasks/configure-pod-container/security-context/) +der Container-Spezifikation verwendet wird. +Dies ist nützlich für Container, die Verwaltungsfunktionen des Betriebssystems +verwenden möchten, z. B. das Manipulieren des Netzwerk-Stacks oder den Zugriff +auf Hardware. Prozesse innerhalb eines privilegierten Containers erhalten fast +die gleichen Rechte wie sie Prozessen außerhalb eines Containers zur Verfügung +stehen. + +{{< note >}} +Ihre +{{}} +muss das Konzept eines privilegierten Containers unterstützen, damit diese +Einstellung relevant ist. +{{< /note >}} + + +## Statische Pods + +_Statische Pods_ werden direkt vom Kubelet-Daemon auf einem bestimmten Node +verwaltet ohne dass sie vom +{{}} überwacht +werden. + +Die meisten Pods werden von der Kontrollebene verwaltet (z. B. +{{< glossary_tooltip text="Deployment" term_id="deployment" >}}). Aber für +statische Pods überwacht das Kubelet jeden statischen Pod direkt (und startet +ihn neu, wenn er ausfällt). + +Statische Pods sind immer an ein {{}} auf +einem bestimmten Node gebunden. Der Hauptanwendungsfall für statische Pods +besteht darin, eine selbst gehostete Steuerebene auszuführen. Mit anderen +Worten: Das Kubelet dient zur Überwachung der einzelnen +[Komponenten der Kontrollebene](/docs/concepts/overview/components/#control-plane-components). + +Das Kubelet versucht automatisch auf dem Kubernetes API-Server für jeden +statischen Pod einen spiegelbildlichen Pod +(im Englischen: {{}}) +zu erstellen. +Das bedeutet, dass die auf einem Node ausgeführten Pods auf dem API-Server +sichtbar sind jedoch von dort nicht gesteuert werden können. + +## {{% heading "whatsnext" %}} + +* Verstehe den + [Lebenszyklus eines Pods](/docs/concepts/workloads/pods/pod-lifecycle/). +* Erfahre mehr über [RuntimeClass](/docs/concepts/containers/runtime-class/) + und wie du damit verschiedene Pods mit unterschiedlichen + Container-Laufzeitumgebungen konfigurieren kannst. +* Mehr zum Thema + [Restriktionen für die Verteilung von Pods](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). +* Lese + [Pod-Disruption-Budget](/docs/concepts/workloads/pods/disruptions/) + und wie du es verwenden kannst, um die Verfügbarkeit von Anwendungen bei + Störungen zu verwalten. Die + [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) + -Objektdefinition beschreibt das Objekt im Detail. +* [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) + erläutert allgemeine Layouts für Pods mit mehr als einem Container. + +Um den Hintergrund zu verstehen, warum Kubernetes eine gemeinsame Pod-API in +andere Ressourcen, wie z. B. +{{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} +oder {{< glossary_tooltip text="Deployments" term_id="deployment" >}} einbindet, +kannst du Artikel zu früheren Technologien lesen, unter anderem: + * [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema) + * [Borg](https://research.google.com/pubs/pub43438.html) + * [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html) + * [Omega](https://research.google/pubs/pub41684/) + * [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/). \ No newline at end of file diff --git a/content/de/docs/setup/_index.md b/content/de/docs/setup/_index.md index d7f074efb30e8..2203fcc19cf59 100644 --- a/content/de/docs/setup/_index.md +++ b/content/de/docs/setup/_index.md @@ -9,7 +9,7 @@ content_type: concept Diese Sektion umfasst verschiedene Optionen zum Einrichten und Betrieb von Kubernetes. -Verschiedene Kubernetes Lösungen haben verschiedene Anforderungen: Einfache Wartung, Sicherheit, Kontrolle, verfügbare Resourcen und erforderliches Fachwissen zum Betrieb und zur Verwaltung dess folgende Diagramm zeigt die möglichen Abstraktionen eines Kubernetes-Clusters und ob eine Abstraktion selbst verwaltet oder von einem Anbieter verwaltet wird. +Verschiedene Kubernetes Lösungen haben verschiedene Anforderungen: Einfache Wartung, Sicherheit, Kontrolle, verfügbare Resourcen und erforderliches Fachwissen zum Betrieb und zur Verwaltung. Das folgende Diagramm zeigt die möglichen Abstraktionen eines Kubernetes-Clusters und ob eine Abstraktion selbst verwaltet oder von einem Anbieter verwaltet wird. Sie können einen Kubernetes-Cluster auf einer lokalen Maschine, Cloud, On-Prem Datacenter bereitstellen; oder wählen Sie einen verwalteten Kubernetes-Cluster. Sie können auch eine individuelle Lösung über eine grosse Auswahl an Cloud Anbietern oder Bare-Metal-Umgebungen nutzen. diff --git a/content/en/blog/_posts/2017-03-00-Advanced-Scheduling-In-Kubernetes.md b/content/en/blog/_posts/2017-03-00-Advanced-Scheduling-In-Kubernetes.md index 5d7f0383c5e2f..722b1e59b0356 100644 --- a/content/en/blog/_posts/2017-03-00-Advanced-Scheduling-In-Kubernetes.md +++ b/content/en/blog/_posts/2017-03-00-Advanced-Scheduling-In-Kubernetes.md @@ -20,21 +20,14 @@ For example, if we want to require scheduling on a node that is in the us-centra ``` -affinity: - - nodeAffinity: - - requiredDuringSchedulingIgnoredDuringExecution: - - nodeSelectorTerms: - - - matchExpressions: - - - key: "failure-domain.beta.kubernetes.io/zone" - - operator: In - - values: ["us-central1-a"] + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "failure-domain.beta.kubernetes.io/zone" + operator: In + values: ["us-central1-a"] ``` @@ -44,21 +37,14 @@ Preferred rules mean that if nodes match the rules, they will be chosen first, a ``` -affinity: - - nodeAffinity: - - preferredDuringSchedulingIgnoredDuringExecution: - - nodeSelectorTerms: - - - matchExpressions: - - - key: "failure-domain.beta.kubernetes.io/zone" - - operator: In - - values: ["us-central1-a"] + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "failure-domain.beta.kubernetes.io/zone" + operator: In + values: ["us-central1-a"] ``` @@ -67,21 +53,14 @@ Node anti-affinity can be achieved by using negative operators. So for instance ``` -affinity: - - nodeAffinity: - - requiredDuringSchedulingIgnoredDuringExecution: - - nodeSelectorTerms: - - - matchExpressions: - - - key: "failure-domain.beta.kubernetes.io/zone" - - operator: NotIn - - values: ["us-central1-a"] + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "failure-domain.beta.kubernetes.io/zone" + operator: NotIn + values: ["us-central1-a"] ``` @@ -99,7 +78,7 @@ The kubectl command allows you to set taints on nodes, for example: ``` kubectl taint nodes node1 key=value:NoSchedule - ``` +``` creates a taint that marks the node as unschedulable by any pods that do not have a toleration for taint with key key, value value, and effect NoSchedule. (The other taint effects are PreferNoSchedule, which is the preferred version of NoSchedule, and NoExecute, which means any pods that are running on the node when the taint is applied will be evicted unless they tolerate the taint.) The toleration you would add to a PodSpec to have the corresponding pod tolerate this taint would look like this @@ -107,15 +86,11 @@ creates a taint that marks the node as unschedulable by any pods that do not hav ``` -tolerations: - -- key: "key" - - operator: "Equal" - - value: "value" - - effect: "NoSchedule" + tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule" ``` @@ -138,21 +113,13 @@ Let’s look at an example. Say you have front-ends in service S1, and they comm ``` affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: service - operator: In - values: [“S1”] - topologyKey: failure-domain.beta.kubernetes.io/zone ``` @@ -172,25 +139,15 @@ Here we have a Pod where we specify the schedulerName field: ``` apiVersion: v1 - kind: Pod - metadata: - name: nginx - labels: - app: nginx - spec: - schedulerName: my-scheduler - containers: - - name: nginx - image: nginx:1.10 ``` diff --git a/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md b/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md index 024506a2de670..a28196d56834f 100644 --- a/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md +++ b/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md @@ -176,7 +176,7 @@ Cluster-distributed stateful services (e.g., Cassandra) can benefit from splitti [Logs](/docs/concepts/cluster-administration/logging/) and [metrics](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) (if collected and persistently retained) are valuable to diagnose outages, but given the variety of technologies available it will not be addressed in this blog. If Internet connectivity is available, it may be desirable to retain logs and metrics externally at a central location. -Your production deployment should utilize an automated installation, configuration and update tool (e.g., [Ansible](https://github.com/kubernetes-incubator/kubespray), [BOSH](https://github.com/cloudfoundry-incubator/kubo-deployment), [Chef](https://github.com/chef-cookbooks/kubernetes), [Juju](/docs/getting-started-guides/ubuntu/installation/), [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), [Puppet](https://forge.puppet.com/puppetlabs/kubernetes), etc.). A manual process will have repeatability issues, be labor intensive, error prone, and difficult to scale. [Certified distributions](https://www.cncf.io/certification/software-conformance/#logos) are likely to include a facility for retaining configuration settings across updates, but if you implement your own install and config toolchain, then retention, backup and recovery of the configuration artifacts is essential. Consider keeping your deployment components and settings under a version control system such as Git. +Your production deployment should utilize an automated installation, configuration and update tool (e.g., [Ansible](https://github.com/kubernetes-incubator/kubespray), [BOSH](https://github.com/cloudfoundry-incubator/kubo-deployment), [Chef](https://github.com/chef-cookbooks/kubernetes), [Juju](/docs/getting-started-guides/ubuntu/installation/), [kubeadm](/docs/reference/setup-tools/kubeadm/), [Puppet](https://forge.puppet.com/puppetlabs/kubernetes), etc.). A manual process will have repeatability issues, be labor intensive, error prone, and difficult to scale. [Certified distributions](https://www.cncf.io/certification/software-conformance/#logos) are likely to include a facility for retaining configuration settings across updates, but if you implement your own install and config toolchain, then retention, backup and recovery of the configuration artifacts is essential. Consider keeping your deployment components and settings under a version control system such as Git. ## Outage recovery diff --git a/content/en/blog/_posts/2018-12-03-kubernetes-1-13-release-announcement.md b/content/en/blog/_posts/2018-12-03-kubernetes-1-13-release-announcement.md index 247bfa2c8d0eb..8aba0dc232506 100644 --- a/content/en/blog/_posts/2018-12-03-kubernetes-1-13-release-announcement.md +++ b/content/en/blog/_posts/2018-12-03-kubernetes-1-13-release-announcement.md @@ -17,7 +17,7 @@ Let’s dive into the key features of this release: ## Simplified Kubernetes Cluster Management with kubeadm in GA -Most people who have gotten hands-on with Kubernetes have at some point been hands-on with kubeadm. It's an essential tool for managing the cluster lifecycle, from creation to configuration to upgrade; and now kubeadm is officially GA. [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) handles the bootstrapping of production clusters on existing hardware and configuring the core Kubernetes components in a best-practice-manner to providing a secure yet easy joining flow for new nodes and supporting easy upgrades. What’s notable about this GA release are the now graduated advanced features, specifically around pluggability and configurability. The scope of kubeadm is to be a toolbox for both admins and automated, higher-level system and this release is a significant step in that direction. +Most people who have gotten hands-on with Kubernetes have at some point been hands-on with kubeadm. It's an essential tool for managing the cluster lifecycle, from creation to configuration to upgrade; and now kubeadm is officially GA. [kubeadm](/docs/reference/setup-tools/kubeadm/) handles the bootstrapping of production clusters on existing hardware and configuring the core Kubernetes components in a best-practice-manner to providing a secure yet easy joining flow for new nodes and supporting easy upgrades. What’s notable about this GA release are the now graduated advanced features, specifically around pluggability and configurability. The scope of kubeadm is to be a toolbox for both admins and automated, higher-level system and this release is a significant step in that direction. ## Container Storage Interface (CSI) Goes GA diff --git a/content/en/blog/_posts/2019-03-15-Kubernetes-setup-using-Ansible-and-Vagrant.md b/content/en/blog/_posts/2019-03-15-Kubernetes-setup-using-Ansible-and-Vagrant.md index 8b31d1df0b79b..247748b6f0059 100644 --- a/content/en/blog/_posts/2019-03-15-Kubernetes-setup-using-Ansible-and-Vagrant.md +++ b/content/en/blog/_posts/2019-03-15-Kubernetes-setup-using-Ansible-and-Vagrant.md @@ -66,6 +66,7 @@ Vagrant.configure("2") do |config| end end end +end ``` ### Step 2: Create an Ansible playbook for Kubernetes master. diff --git a/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md b/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md index 2474d07a95052..80926b6bc4d29 100644 --- a/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md +++ b/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md @@ -58,7 +58,7 @@ Take maven project as example, adding the following dependencies into your depen Then we can make use of the provided builder libraries to write your own controller. For example, the following one is a simple controller prints out node information -on watch notification, see complete example [here](https://github.com/kubernetes-client/java/blob/master/examples/src/main/java/io/kubernetes/client/examples/ControllerExample.java): +on watch notification, see complete example [here](https://github.com/kubernetes-client/java/blob/master/examples/examples-release-13/src/main/java/io/kubernetes/client/examples/ControllerExample.java): ```java ... diff --git a/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md b/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md index f5c6761eab373..983d7ba31ee09 100644 --- a/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md +++ b/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md @@ -31,9 +31,9 @@ Standard labels are used by Kubernetes components to support some features. For The labels are reaching general availability in this release. Kubernetes components have been updated to populate the GA and beta labels and to react to both. However, if you are using the beta labels in your pod specs for features such as node affinity, or in your custom controllers, we recommend that you start migrating them to the new GA labels. You can find the documentation for the new labels here: -- [node.kubernetes.io/instance-type](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#nodekubernetesioinstance-type) -- [topology.kubernetes.io/region](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#topologykubernetesioregion) -- [topology.kubernetes.io/zone](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#topologykubernetesiozone) +- [node.kubernetes.io/instance-type](/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) +- [topology.kubernetes.io/region](/docs/reference/labels-annotations-taints/#topologykubernetesioregion) +- [topology.kubernetes.io/zone](/docs/reference/labels-annotations-taints/#topologykubernetesiozone) ## Volume Snapshot Moves to Beta diff --git a/content/en/blog/_posts/2020-09-03-warnings/index.md b/content/en/blog/_posts/2020-09-03-warnings/index.md index d88dd8328dba8..50576c032939e 100644 --- a/content/en/blog/_posts/2020-09-03-warnings/index.md +++ b/content/en/blog/_posts/2020-09-03-warnings/index.md @@ -325,7 +325,7 @@ Now that we have a way to communicate helpful information to users in context, we're already considering other ways we can use this to improve people's experience with Kubernetes. A couple areas we're looking at next are warning about [known problematic values](http://issue.k8s.io/64841#issuecomment-395141013) we cannot reject outright for compatibility reasons, and warning about use of deprecated fields or field values -(like selectors using beta os/arch node labels, [deprecated in v1.14](/docs/reference/kubernetes-api/labels-annotations-taints/#beta-kubernetes-io-arch-deprecated)). +(like selectors using beta os/arch node labels, [deprecated in v1.14](/docs/reference/labels-annotations-taints/#beta-kubernetes-io-arch-deprecated)). I'm excited to see progress in this area, continuing to make it easier to use Kubernetes. --- diff --git a/content/en/blog/_posts/2020-12-02-dockershim-faq.md b/content/en/blog/_posts/2020-12-02-dockershim-faq.md index f8dbe7f7c7499..918a969e5132a 100644 --- a/content/en/blog/_posts/2020-12-02-dockershim-faq.md +++ b/content/en/blog/_posts/2020-12-02-dockershim-faq.md @@ -114,7 +114,7 @@ will have strictly better performance and less overhead. However, we encourage y to explore all the options from the [CNCF landscape] in case another would be an even better fit for your environment. -[CNCF landscape]: https://landscape.cncf.io/category=container-runtime&format=card-mode&grouping=category +[CNCF landscape]: https://landscape.cncf.io/card-mode?category=container-runtime&grouping=category ### What should I look out for when changing CRI implementations? diff --git a/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md b/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md index e0fef7ab90f2c..deb459c4bea0d 100644 --- a/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md +++ b/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md @@ -64,7 +64,7 @@ The Kubernetes community has written a [detailed blog post about deprecation](ht A longstanding bug regarding exec probe timeouts that may impact existing pod definitions has been fixed. Prior to this fix, the field `timeoutSeconds` was not respected for exec probes. Instead, probes would run indefinitely, even past their configured deadline, until a result was returned. With this change, the default value of `1 second` will be applied if a value is not specified and existing pod definitions may no longer be sufficient if a probe takes longer than one second. A feature gate, called `ExecProbeTimeout`, has been added with this fix that enables cluster operators to revert to the previous behavior, but this will be locked and removed in subsequent releases. In order to revert to the previous behavior, cluster operators should set this feature gate to `false`. -Please review the updated documentation regarding [configuring probes](docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) for more details. +Please review the updated documentation regarding [configuring probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) for more details. ## Other Updates diff --git a/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/along-the-way-ui.png b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/along-the-way-ui.png new file mode 100644 index 0000000000000..e83656a624b22 Binary files /dev/null and b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/along-the-way-ui.png differ diff --git a/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/current-ui.png b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/current-ui.png new file mode 100644 index 0000000000000..7d96058165fe7 Binary files /dev/null and b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/current-ui.png differ diff --git a/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/first-ui.png b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/first-ui.png new file mode 100644 index 0000000000000..ba7fec54088ee Binary files /dev/null and b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/first-ui.png differ diff --git a/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/index.md b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/index.md new file mode 100644 index 0000000000000..345394f809880 --- /dev/null +++ b/content/en/blog/_posts/2021-03-09-The-Evolution-of-Kubernetes-Dashboard/index.md @@ -0,0 +1,63 @@ +--- +layout: blog +title: "The Evolution of Kubernetes Dashboard" +date: 2021-03-09 +slug: the-evolution-of-kubernetes-dashboard +--- + +Authors: Marcin Maciaszczyk, Kubermatic & Sebastian Florek, Kubermatic + +In October 2020, the Kubernetes Dashboard officially turned five. As main project maintainers, we can barely believe that so much time has passed since our very first commits to the project. However, looking back with a bit of nostalgia, we realize that quite a lot has happened since then. Now it’s due time to celebrate “our baby” with a short recap. + +## How It All Began + +The initial idea behind the Kubernetes Dashboard project was to provide a web interface for Kubernetes. We wanted to reflect the kubectl functionality through an intuitive web UI. The main benefit from using the UI is to be able to quickly see things that do not work as expected (monitoring and troubleshooting). Also, the Kubernetes Dashboard is a great starting point for users that are new to the Kubernetes ecosystem. + +The very [first commit](https://github.com/kubernetes/dashboard/commit/5861187fa807ac1cc2d9b2ac786afeced065076c) to the Kubernetes Dashboard was made by Filip Grządkowski from Google on 16th October 2015 – just a few months from the initial commit to the Kubernetes repository. Our initial commits go back to November 2015 ([Sebastian committed on 16 November 2015](https://github.com/kubernetes/dashboard/commit/09e65b6bb08c49b926253de3621a73da05e400fd); [Marcin committed on 23 November 2015](https://github.com/kubernetes/dashboard/commit/1da4b1c25ef040818072c734f71333f9b4733f55)). Since that time, we’ve become regular contributors to the project. For the next two years, we worked closely with the Googlers, eventually becoming main project maintainers ourselves. + +{{< figure src="first-ui.png" caption="The First Version of the User Interface" >}} + +{{< figure src="along-the-way-ui.png" caption="Prototype of the New User Interface" >}} + +{{< figure src="current-ui.png" caption="The Current User Interface" >}} + +As you can see, the initial look and feel of the project were completely different from the current one. We have changed the design multiple times. The same has happened with the code itself. + +## Growing Up - The Big Migration + +At [the beginning of 2018](https://github.com/kubernetes/dashboard/pull/2727), we reached a point where AngularJS was getting closer to the end of its life, while the new Angular versions were published quite often. A lot of the libraries and the modules that we were using were following the trend. That forced us to spend a lot of the time rewriting the frontend part of the project to make it work with newer technologies. + +The migration came with many benefits like being able to refactor a lot of the code, introduce design patterns, reduce code complexity, and benefit from the new modules. However, you can imagine that the scale of the migration was huge. Luckily, there were a number of contributions from the community helping us with the resource support, new Kubernetes version support, i18n, and much more. After many long days and nights, we finally released the [first beta version](https://github.com/kubernetes/dashboard/releases/tag/v2.0.0-beta1) in July 2019, followed by the [2.0 release](https://github.com/kubernetes/dashboard/releases/tag/v2.0.0) in April 2020 — our baby had grown up. + +## Where Are We Standing in 2021? + +Due to limited resources, unfortunately, we were not able to offer extensive support for many different Kubernetes versions. So, we’ve decided to always try and support the latest Kubernetes version available at the time of the Kubernetes Dashboard release. The latest release, [Dashboard v2.2.0](https://github.com/kubernetes/dashboard/releases/tag/v2.2.0) provides support for Kubernetes v1.20. + +On top of that, we put in a great deal of effort into [improving resource support](https://github.com/kubernetes/dashboard/issues/5232). Meanwhile, we do offer support for most of the Kubernetes resources. Also, the Kubernetes Dashboard supports multiple languages: English, German, French, Japanese, Korean, Chinese (Traditional, Simplified, Traditional Hong Kong). Persian and Russian localizations are currently in progress. Moreover, we are working on the support for 3rd party themes and the design of the app in general. As you can see, quite a lot of things are going on. + +Luckily, we do have regular contributors with domain knowledge who are taking care of the project, updating the Helm charts, translations, Go modules, and more. But as always, there could be many more hands on deck. So if you are thinking about contributing to Kubernetes, keep us in mind ;) + +## What’s Next + +The Kubernetes Dashboard has been growing and prospering for more than 5 years now. It provides the community with an intuitive Web UI, thereby decreasing the complexity of Kubernetes and increasing its accessibility to new community members. We are proud of what the project has achieved so far, but this is by far not the end. These are our priorities for the future: + +* Keep providing support for the new Kubernetes versions +* Keep improving the support for the existing resources +* Keep working on auth system improvements +* [Rewrite the API to use gRPC and shared informers](https://github.com/kubernetes/dashboard/pull/5449): This will allow us to improve the performance of the application but, most importantly, to support live updates coming from the Kubernetes project. It is one of the most requested features from the community. +* Split the application into two containers, one with the UI and the second with the API running inside. + +## The Kubernetes Dashboard in Numbers + +* Initial commit made on October 16, 2015 +* Over 100 million pulls from Dockerhub since the v2 release +* 8 supported languages and the next 2 in progress +* Over 3360 closed PRs +* Over 2260 closed issues +* 100% coverage of the supported core Kubernetes resources +* Over 9000 stars on GitHub +* Over 237 000 lines of code + +## Join Us + +As mentioned earlier, we are currently looking for more people to help us further develop and grow the project. We are open to contributions in multiple areas, i.e., [issues with help wanted label](https://github.com/kubernetes/dashboard/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). Please feel free to reach out via GitHub or the #sig-ui channel in the [Kubernetes Slack](https://slack.k8s.io/). diff --git a/content/en/blog/_posts/2021-04-06-PodSecurityPolicy-Past-Present-and-Future.md b/content/en/blog/_posts/2021-04-06-PodSecurityPolicy-Past-Present-and-Future.md new file mode 100644 index 0000000000000..c65069460a38f --- /dev/null +++ b/content/en/blog/_posts/2021-04-06-PodSecurityPolicy-Past-Present-and-Future.md @@ -0,0 +1,74 @@ +--- +layout: blog +title: "PodSecurityPolicy Deprecation: Past, Present, and Future" +date: 2021-04-06 +slug: podsecuritypolicy-deprecation-past-present-and-future +--- + +**Author:** Tabitha Sable (Kubernetes SIG Security) + +PodSecurityPolicy (PSP) is being deprecated in Kubernetes 1.21, to be released later this week. This starts the countdown to its removal, but doesn’t change anything else. PodSecurityPolicy will continue to be fully functional for several more releases before being removed completely. In the meantime, we are developing a replacement for PSP that covers key use cases more easily and sustainably. + +What are Pod Security Policies? Why did we need them? Why are they going away, and what’s next? How does this affect you? These key questions come to mind as we prepare to say goodbye to PSP, so let’s walk through them together. We’ll start with an overview of how features get removed from Kubernetes. + +## What does deprecation mean in Kubernetes? + +Whenever a Kubernetes feature is set to go away, our [deprecation policy](/docs/reference/using-api/deprecation-policy/) is our guide. First the feature is marked as deprecated, then after enough time has passed, it can finally be removed. + +Kubernetes 1.21 starts the deprecation process for PodSecurityPolicy. As with all feature deprecations, PodSecurityPolicy will continue to be fully functional for several more releases. The current plan is to remove PSP from Kubernetes in the 1.25 release. + +Until then, PSP is still PSP. There will be at least a year during which the newest Kubernetes releases will still support PSP, and nearly two years until PSP will pass fully out of all supported Kubernetes versions. + +## What is PodSecurityPolicy? + +[PodSecurityPolicy](/docs/concepts/policy/pod-security-policy/) is a built-in [admission controller](/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/) that allows a cluster administrator to control security-sensitive aspects of the Pod specification. + +First, one or more PodSecurityPolicy resources are created in a cluster to define the requirements Pods must meet. Then, RBAC rules are created to control which PodSecurityPolicy applies to a given pod. If a pod meets the requirements of its PSP, it will be admitted to the cluster as usual. In some cases, PSP can also modify Pod fields, effectively creating new defaults for those fields. If a Pod does not meet the PSP requirements, it is rejected, and cannot run. + +One more important thing to know about PodSecurityPolicy: it’s not the same as [PodSecurityContext](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context). + +A part of the Pod specification, PodSecurityContext (and its per-container counterpart `SecurityContext`) is the collection of fields that specify many of the security-relevant settings for a Pod. The security context dictates to the kubelet and container runtime how the Pod should actually be run. In contrast, the PodSecurityPolicy only constrains (or defaults) the values that may be set on the security context. + +The deprecation of PSP does not affect PodSecurityContext in any way. + +## Why did we need PodSecurityPolicy? + +In Kubernetes, we define resources such as Deployments, StatefulSets, and Services that represent the building blocks of software applications. The various controllers inside a Kubernetes cluster react to these resources, creating further Kubernetes resources or configuring some software or hardware to accomplish our goals. + +In most Kubernetes clusters, RBAC (Role-Based Access Control) [rules](/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) control access to these resources. `list`, `get`, `create`, `edit`, and `delete` are the sorts of API operations that RBAC cares about, but _RBAC does not consider what settings are being put into the resources it controls_. For example, a Pod can be almost anything from a simple webserver to a privileged command prompt offering full access to the underlying server node and all the data. It’s all the same to RBAC: a Pod is a Pod is a Pod. + +To control what sorts of settings are allowed in the resources defined in your cluster, you need Admission Control in addition to RBAC. Since Kubernetes 1.3, PodSecurityPolicy has been the built-in way to do that for security-related Pod fields. Using PodSecurityPolicy, you can prevent “create Pod” from automatically meaning “root on every cluster node,” without needing to deploy additional external admission controllers. + +## Why is PodSecurityPolicy going away? + +In the years since PodSecurityPolicy was first introduced, we have realized that PSP has some serious usability problems that can’t be addressed without making breaking changes. + +The way PSPs are applied to Pods has proven confusing to nearly everyone that has attempted to use them. It is easy to accidentally grant broader permissions than intended, and difficult to inspect which PSP(s) apply in a given situation. The “changing Pod defaults” feature can be handy, but is only supported for certain Pod settings and it’s not obvious when they will or will not apply to your Pod. Without a “dry run” or audit mode, it’s impractical to retrofit PSP to existing clusters safely, and it’s impossible for PSP to ever be enabled by default. + +For more information about these and other PSP difficulties, check out SIG Auth’s KubeCon NA 2019 Maintainer Track session video: {{< youtube "SFtHRmPuhEw?start=953" youtube-quote-sm >}} + +Today, you’re not limited only to deploying PSP or writing your own custom admission controller. Several external admission controllers are available that incorporate lessons learned from PSP to provide a better user experience. [K-Rail](https://github.com/cruise-automation/k-rail), [Kyverno](https://github.com/kyverno/kyverno/), and [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper/) are all well-known, and each has its fans. + +Although there are other good options available now, we believe there is still value in having a built-in admission controller available as a choice for users. With this in mind, we turn toward building what’s next, inspired by the lessons learned from PSP. + +## What’s next? + +Kubernetes SIG Security, SIG Auth, and a diverse collection of other community members have been working together for months to ensure that what’s coming next is going to be awesome. We have developed a Kubernetes Enhancement Proposal ([KEP 2579](https://github.com/kubernetes/enhancements/issues/2579)) and a prototype for a new feature, currently being called by the temporary name "PSP Replacement Policy." We are targeting an Alpha release in Kubernetes 1.22. + +PSP Replacement Policy starts with the realization that since there is a robust ecosystem of external admission controllers already available, PSP’s replacement doesn’t need to be all things to all people. Simplicity of deployment and adoption is the key advantage a built-in admission controller has compared to an external webhook, so we have focused on how to best utilize that advantage. + +PSP Replacement Policy is designed to be as simple as practically possible while providing enough flexibility to really be useful in production at scale. It has soft rollout features to enable retrofitting it to existing clusters, and is configurable enough that it can eventually be active by default. It can be deactivated partially or entirely, to coexist with external admission controllers for advanced use cases. + +## What does this mean for you? + +What this all means for you depends on your current PSP situation. If you’re already using PSP, there’s plenty of time to plan your next move. Please review the PSP Replacement Policy KEP and think about how well it will suit your use case. + +If you’re making extensive use of the flexibility of PSP with numerous PSPs and complex binding rules, you will likely find the simplicity of PSP Replacement Policy too limiting. Use the next year to evaluate the other admission controller choices in the ecosystem. There are resources available to ease this transition, such as the [Gatekeeper Policy Library](https://github.com/open-policy-agent/gatekeeper-library). + +If your use of PSP is relatively simple, with a few policies and straightforward binding to service accounts in each namespace, you will likely find PSP Replacement Policy to be a good match for your needs. Evaluate your PSPs compared to the Kubernetes [Pod Security Standards](/docs/concepts/security/pod-security-standards/) to get a feel for where you’ll be able to use the Restricted, Baseline, and Privileged policies. Please follow along with or contribute to the KEP and subsequent development, and try out the Alpha release of PSP Replacement Policy when it becomes available. + +If you’re just beginning your PSP journey, you will save time and effort by keeping it simple. You can approximate the functionality of PSP Replacement Policy today by using the Pod Security Standards’ PSPs. If you set the cluster default by binding a Baseline or Restricted policy to the `system:serviceaccounts` group, and then make a more-permissive policy available as needed in certain Namespaces [using ServiceAccount bindings](/docs/concepts/policy/pod-security-policy/#run-another-pod), you will avoid many of the PSP pitfalls and have an easy migration to PSP Replacement Policy. If your needs are much more complex than this, your effort is probably better spent adopting one of the more fully-featured external admission controllers mentioned above. + +We’re dedicated to making Kubernetes the best container orchestration tool we can, and sometimes that means we need to remove longstanding features to make space for better things to come. When that happens, the Kubernetes deprecation policy ensures you have plenty of time to plan your next move. In the case of PodSecurityPolicy, several options are available to suit a range of needs and use cases. Start planning ahead now for PSP’s eventual removal, and please consider contributing to its replacement! Happy securing! + +**Acknowledgment:** It takes a wonderful group to make wonderful software. Thanks are due to everyone who has contributed to the PSP replacement effort, especially (in alphabetical order) Tim Allclair, Ian Coldwater, and Jordan Liggitt. It’s been a joy to work with y’all on this. diff --git a/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/controller-flowchart.svg b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/controller-flowchart.svg new file mode 100644 index 0000000000000..9357c89d40a74 --- /dev/null +++ b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/controller-flowchart.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/index.md b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/index.md new file mode 100644 index 0000000000000..bb9214b027aa1 --- /dev/null +++ b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/index.md @@ -0,0 +1,105 @@ +--- +layout: blog +title: 'Kubernetes 1.21: CronJob Reaches GA' +date: 2021-04-09 +slug: kubernetes-release-1.21-cronjob-ga +--- + + **Authors:** Alay Patel (Red Hat), and Maciej Szulik (Red Hat) + +In Kubernetes v1.21, the +[CronJob](/docs/concepts/workloads/controllers/cron-jobs/) resource +reached general availability (GA). We've also substantially improved the +performance of CronJobs since Kubernetes v1.19, by implementing a new +controller. + +In Kubernetes v1.20 we launched a revised v2 controller for CronJobs, +initially as an alpha feature. Kubernetes 1.21 uses the newer controller by +default, and the CronJob resource itself is now GA (group version: `batch/v1`). + +In this article, we'll take you through the driving forces behind this new +development, give you a brief description of controller design for core +Kubernetes, and we'll outline what you will gain from this improved controller. + +The driving force behind promoting the API was Kubernetes' policy choice to +[ensure APIs move beyond beta](/blog/2020/08/21/moving-forward-from-beta/). +That policy aims to prevent APIs from being stuck in a “permanent beta” state. +Over the years the old CronJob controller implementation had received healthy +feedback from the community, with reports of several widely recognized +[issues](https://github.com/kubernetes/kubernetes/issues/82659). + +If the beta API for CronJob was to be supported as GA, the existing controller +code would need substantial rework. Instead, the SIG Apps community decided +to introduce a new controller and gradually replace the old one. + +## How do controllers work? + +Kubernetes [controllers](/docs/concepts/architecture/controller/) are control +loops that watch the state of resource(s) in your cluster, then make or +request changes where needed. Each controller tries to move part of the +current cluster state closer to the desired state. + +The v1 CronJob controller works by performing a periodic poll and sweep of all +the CronJob objects in your cluster, in order to act on them. It is a single +worker implementation that gets all CronJobs every 10 seconds, iterates over +each one of them, and syncs them to their desired state. This was the default +way of doing things almost 5 years ago when the controller was initially +written. In hindsight, we can certainly say that such an approach can +overload the API server at scale. + +These days, every core controller in kubernetes must follow the guidelines +described in [Writing Controllers](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/controllers.md#readme). +Among many details, that document prescribes using +[shared informers](https://www.cncf.io/blog/2019/10/15/extend-kubernetes-via-a-shared-informer/) +to “receive notifications of adds, updates, and deletes for a particular +resource”. Upon any such events, the related object(s) is placed in a queue. +Workers pull items from the queue and process them one at a time. This +approach ensures consistency and scalability. + +The picture below shows the flow of information from kubernetes API server, +through shared informers and queue, to the main part of a controller - a +reconciliation loop which is responsible for performing the core functionality. + +![Controller flowchart](controller-flowchart.svg) + +The CronJob controller V2 uses a queue that implements the DelayingInterface to +handle the scheduling aspect. This queue allows processing an element after a +specific time interval. Every time there is a change in a CronJob or its related +Jobs, the key that represents the CronJob is pushed to the queue. The main +handler pops the key, processes the CronJob, and after completion +pushes the key back into the queue for the next scheduled time interval. This is +immediately a more performant implementation, as it no longer requires a linear +scan of all the CronJobs. On top of that, this controller can be scaled by +increasing the number of workers processing the CronJobs in parallel. + +## Performance impact of the new controller {#performance-impact} + +In order to test the performance difference of the two controllers a VM instance +with 128 GiB RAM and 64 vCPUs was used to set up a single node Kubernetes cluster. +Initially, a sample workload was created with 20 CronJob instances with a schedule +to run every minute, and 2100 CronJobs running every 20 hours. Additionally, +over the next few minutes we added 1000 CronJobs with a schedule to run every +20 hours, until we reached a total of 5120 CronJobs. + +![Visualization of performance](performance-impact-graph.svg) + +We observed that for every 1000 CronJobs added, the old controller used +around 90 to 120 seconds more wall-clock time to schedule 20 Jobs every cycle. +That is, at 5120 CronJobs, the old controller took approximately 9 minutes +to create 20 Jobs. Hence, during each cycle, about 8 schedules were missed. +The new controller, implemented with architectural change explained above, +created 20 Jobs without any delay, even when we created an additional batch +of 1000 CronJobs reaching a total of 6120. + +As a closing remark, the new controller exposes a histogram metric +`cronjob_controller_cronjob_job_creation_skew_duration_seconds` which helps +monitor the time difference between when a CronJob is meant to run and when +the actual Job is created. + +Hopefully the above description is a sufficient argument to follow the +guidelines and standards set in the Kubernetes project, even for your own +controllers. As mentioned before, the new controller is on by default starting +from Kubernetes v1.21; if you want to check it out in the previous release (1.20), +you can enable the `CronJobControllerV2` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +for the kube-controller-manager: `--feature-gate="CronJobControllerV2=true"`. diff --git a/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/performance-impact-graph.svg b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/performance-impact-graph.svg new file mode 100644 index 0000000000000..976b428d91aa1 --- /dev/null +++ b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/performance-impact-graph.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-08-kubernetes-release-1.21.md b/content/en/blog/_posts/2021-04-08-kubernetes-release-1.21.md new file mode 100644 index 0000000000000..ed0da32f2506d --- /dev/null +++ b/content/en/blog/_posts/2021-04-08-kubernetes-release-1.21.md @@ -0,0 +1,128 @@ +--- +layout: blog +title: 'Kubernetes 1.21: Power to the Community' +date: 2021-04-08 +slug: kubernetes-1-21-release-announcement +--- + +**Authors:** [Kubernetes 1.21 Release Team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.21/release-team.md) + +We’re pleased to announce the release of Kubernetes 1.21, our first release of 2021! This release consists of 51 enhancements: 13 enhancements have graduated to stable, 16 enhancements are moving to beta, 20 enhancements are entering alpha, and 2 features have been deprecated. + +This release cycle, we saw a major shift in ownership of processes around the release team. We moved from a synchronous mode of communication, where we periodically asked the community for inputs, to a mode where the community opts-in to contribute features and/or blogs to the release. These changes have resulted in an increase in collaboration and teamwork across the community. The result of all that is reflected in Kubernetes 1.21 having the most number of features in the recent times. + +## Major Themes + +### CronJobs Graduate to Stable! +[CronJobs](/docs/concepts/workloads/controllers/cron-jobs/) (previously ScheduledJobs) has been a beta feature since Kubernetes 1.8! With 1.21 we get to finally see this widely used API graduate to stable. + +CronJobs are meant for performing regular scheduled actions such as backups, report generation, and so on. Each of those tasks should be configured to recur indefinitely (for example: once a day / week / month); you can define the point in time within that interval when the job should start. + +### Immutable Secrets and ConfigMaps +[Immutable Secrets](/docs/concepts/configuration/secret/#secret-immutable) and [ConfigMaps](/docs/concepts/configuration/configmap/#configmap-immutable) add a new field to those resource types that will reject changes to those objects if set. Secrets and ConfigMaps by default are mutable which is beneficial for pods that are able to consume changes. Mutating Secrets and ConfigMaps can also cause problems if a bad configuration is pushed for pods that use them. + +By marking Secrets and ConfigMaps as immutable you can be sure your application configuration won't change. If you want to make changes you'll need to create a new, uniquly named Secret or ConfigMap and deploy a new pod to consume that resource. Immutable resources also have scaling benefits because controllers do not need to poll the API server to watch for changes. + +This feature has graduated to stable in Kubernetes 1.21. + +### IPv4/IPv6 dual-stack support +IP addresses are a consumable resource that cluster operators and administrators need to make sure are not exhausted. In particular, public IPv4 addresses are now scarce. Having dual-stack support enables native IPv6 routing to pods and services, whilst still allowing your cluster to talk IPv4 where needed. Dual-stack cluster networking also improves a possible scaling limitation for workloads. + +Dual-stack support in Kubernetes means that pods, services, and nodes can get IPv4 addresses and IPv6 addresses. In Kubernetes 1.21 [dual-stack networking](/docs/concepts/services-networking/dual-stack/) has graduated from alpha to beta, and is now enabled by default. + +### Graceful Node Shutdown +[Graceful Node shutdown](/docs/concepts/architecture/nodes/#graceful-node-shutdown) also graduated to beta with this release (and will now be available to a much larger group of users)! This is a hugely beneficial feature that allows the kubelet to be aware of node shutdown, and gracefully terminate pods that are scheduled to that node. + +Currently, when a node shuts down, pods do not follow the expected termination lifecycle and are not shut down gracefully. This can introduce problems with a lot of different workloads. Going forward, the kubelet will be able to detect imminent system shutdown through systemd, then inform running pods so they can terminate as gracefully as possible. + +### PersistentVolume Health Monitor +Persistent Volumes (PV) are commonly used in applications to get local, file-based storage. They can be used in many different ways and help users migrate applications without needing to re-write storage backends. + +Kubernetes 1.21 has a new alpha feature which allows PVs to be monitored for health of the volume and marked accordingly if the volume becomes unhealthy. Workloads will be able to react to the health state to protect data from being written or read from a volume that is unhealthy. + +### Reducing Kubernetes Build Maintenance +Previously Kubernetes has maintained multiple build systems. This has often been a source of friction and complexity for new and current contributors. + +Over the last release cycle, a lot of work has been put in to simplify the build process, and standardize on the native Golang build tools. This should empower broader community maintenance, and lower the barrier to entry for new contributors. + +## Major Changes + +### PodSecurityPolicy Deprecation +In Kubernetes 1.21, PodSecurityPolicy is deprecated. As with all Kubernetes feature deprecations, PodSecurityPolicy will continue to be available and fully-functional for several more releases. PodSecurityPolicy, previously in the beta stage, is planned for removal in Kubernetes 1.25. + +What's next? We're developing a new built-in mechanism to help limit Pod privileges, with a working title of “PSP Replacement Policy.” Our plan is for this new mechanism to cover the key PodSecurityPolicy use cases, with greatly improved ergonomics and maintainability. To learn more, read [PodSecurityPolicy Deprecation: Past, Present, and Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future). + +### TopologyKeys Deprecation +The Service field `topologyKeys` is now deprecated; all the component features that used this field were previously alpha, and are now also deprecated. +We've replaced `topologyKeys` with a way to implement topology-aware routing, called topology-aware hints. Topology-aware hints are an alpha feature in Kubernetes 1.21. You can read more details about the replacement feature in [Topology Aware Hints](/docs/concepts/services-networking/service-topology/); the related [KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/2433-topology-aware-hints/README.md) explains the context for why we switched. + +## Other Updates + +### Graduated to Stable + +* [EndpointSlice](https://github.com/kubernetes/enhancements/issues/752) +* [Add sysctl support](https://github.com/kubernetes/enhancements/issues/34) +* [PodDisruptionBudgets](https://github.com/kubernetes/enhancements/issues/85) + +### Notable Feature Updates + +* [External client-go credential providers](https://github.com/kubernetes/enhancements/issues/541) - beta in 1.21 +* [Structured logging](https://github.com/kubernetes/enhancements/issues/1602) - graduating to beta in 1.22 +* [TTL after finish cleanup for Jobs and Pods](https://github.com/kubernetes/enhancements/issues/592) - graduated to beta + +# Release notes + +You can check out the full details of the 1.21 release in the [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md). + +# Availability of release + +Kubernetes 1.21 is available for [download on GitHub](https://github.com/kubernetes/kubernetes/releases/tag/v1.21.0). There are some great resources out there for getting started with Kubernetes. You can check out some [interactive tutorials](https://kubernetes.io/docs/tutorials/) on the main Kubernetes site, or run a local cluster on your machine using Docker containers with [kind](https://kind.sigs.k8s.io). If you’d like to try building a cluster from scratch, check out the [Kubernetes the Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) tutorial by Kelsey Hightower. + +# Release Team + +This release was made possible by a very dedicated group of individuals, who came together as a team in the midst of a lot of things happening out in the world. A huge thank you to the release lead Nabarun Pal, and to everyone else on the release team for supporting each other, and working so hard to deliver the 1.21 release for the community. + +# Release Logo + +![Kubernetes 1.21 Release Logo](/images/blog/2021-04-08-kubernetes-release-1.21/globe_250px.png) + +The Kubernetes 1.21 Release Logo portrays the global nature of the Release Team, with release team members residing in timezones from UTC+8 all the way to UTC-8. The diversity of the release team brought in a lot of challenges, but the team tackled them all by adopting more asynchronous communication practices. The heptagonal globe in the release logo signifies the sheer determination of the community to overcome the challenges as they come. It celebrates the amazing teamwork of the release team over the last 3 months to bring in a fun packed Kubernetes release! + +The logo is designed by [Aravind Sekar](https://www.behance.net/noblebatman), an independent designer based out of India. Aravind helps open source communities like PyCon India in their design efforts. + +# User Highlights + +- CNCF welcomes 47 new organizations across the globe as members to advance Cloud Native technology further at the start of 2021! These [new members](https://www.cncf.io/announcements/2021/02/24/cloud-native-computing-foundation-welcomes-47-new-members-at-the-start-of-2021/) will join CNCF at the upcoming 2021 KubeCon + CloudNativeCon events, including [KubeCon + CloudNativeCom EU – Virtual](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/) from May 4 – 7, 2021, and [KubeCon + CloudNativeCon NA in Los Angeles](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/) from October 12 – 15, 2021. + +# Project Velocity + +The [CNCF K8s DevStats project](https://k8s.devstats.cncf.io/) aggregates a number of interesting data points related to the velocity of Kubernetes and various sub-projects. This includes everything from individual contributions to the number of companies that are contributing, and is a neat illustration of the depth and breadth of effort that goes into evolving this ecosystem. + +In the v1.21 release cycle, which ran for 12 weeks (January 11 to April 8), we saw contributions from [999 companies](https://k8s.devstats.cncf.io/d/9/companies-table?orgId=1&var-period_name=v1.20.0%20-%20now&var-metric=contributions) and [1279 individuals](https://k8s.devstats.cncf.io/d/66/developer-activity-counts-by-companies?orgId=1&var-period_name=v1.20.0%20-%20now&var-metric=contributions&var-repogroup_name=Kubernetes&var-country_name=All&var-companies=All). + +# Ecosystem Updates + +- In the wake of rising racism & attacks on global Asian communities, read the statement from CNCF General Priyanka Sharma on the [CNCF blog](https://www.cncf.io/blog/2021/03/18/statement-from-cncf-general-manager-priyanka-sharma-on-the-unacceptable-attacks-against-aapi-and-asian-communities/) reinstating the community's commitment towards inclusive values & diversity-powered resilience. +- We now have a process in place for migration of the default branch from master → main. Learn more about the guidelines [here](k8s.dev/rename) +- CNCF and the Linux Foundation have announced the availability of their new training course, [LFS260 – Kubernetes Security Essentials](https://training.linuxfoundation.org/training/kubernetes-security-essentials-lfs260/). In addition to providing skills and knowledge on a broad range of best practices for securing container-based applications and Kubernetes platforms, the course is also a great way to prepare for the recently launched [Certified Kubernetes Security Specialist](https://training.linuxfoundation.org/certification/certified-kubernetes-security-specialist/) certification exam. + +# Event Updates + +- KubeCon + CloudNativeCon Europe 2021 will take place May 4 - 7, 2021! You can find more information about the conference [here](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/). +- [Kubernetes Community Days](https://kubernetescommunitydays.org/) are being relaunched! Q2 2021 will start with Africa and Bengaluru. + +# Upcoming release webinar + +Join the members of the Kubernetes 1.21 release team on May 13th, 2021 to learn about the major features in this release including IPv4/IPv6 dual-stack support, PersistentVolume Health Monitor, Immutable Secrets and ConfigMaps, and many more. Register here: [https://community.cncf.io/events/details/cncf-cncf-online-programs-presents-cncf-live-webinar-kubernetes-121-release/](https://community.cncf.io/events/details/cncf-cncf-online-programs-presents-cncf-live-webinar-kubernetes-121-release/) + +# Get Involved + +If you’re interested in contributing to the Kubernetes community, Special Interest Groups (SIGs) are a great starting point. Many of them may align with your interests! If there are things you’d like to share with the community, you can join the weekly community meeting, or use any of the following channels: + +* Find out more about contributing to Kubernetes at the [Kubernetes Contributor website](https://www.kubernetes.dev/) +* Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +* Join the community discussion on [Discuss](https://discuss.kubernetes.io/) +* Join the community on [Slack](http://slack.k8s.io/) +* Share your Kubernetes [story](https://github.com/cncf/foundation/blob/master/case-study-guidelines.md) +* Read more about what’s happening with Kubernetes on the [blog](https://kubernetes.io/blog/) +* Learn more about the [Kubernetes Release Team](https://github.com/kubernetes/sig-release/tree/master/release-team) diff --git a/content/en/blog/_posts/2021-04-12-introducing-suspended-jobs.md b/content/en/blog/_posts/2021-04-12-introducing-suspended-jobs.md new file mode 100644 index 0000000000000..d03c9d0c25d2c --- /dev/null +++ b/content/en/blog/_posts/2021-04-12-introducing-suspended-jobs.md @@ -0,0 +1,110 @@ +--- +title: "Introducing Suspended Jobs" +date: 2021-04-12 +slug: introducing-suspended-jobs +layout: blog +--- + +**Author:** Adhityaa Chandrasekar (Google) + +[Jobs](/docs/concepts/workloads/controllers/job/) are a crucial part of +Kubernetes' API. While other kinds of workloads such as [Deployments](/docs/concepts/workloads/controllers/deployment/), +[ReplicaSets](/docs/concepts/workloads/controllers/replicaset/), +[StatefulSets](/docs/concepts/workloads/controllers/statefulset/), and +[DaemonSets](/docs/concepts/workloads/controllers/daemonset/) +solve use-cases that require Pods to run forever, Jobs are useful when Pods need +to run to completion. Commonly used in parallel batch processing, Jobs can be +used in a variety of applications ranging from video rendering and database +maintenance to sending bulk emails and scientific computing. + +While the amount of parallelism and the conditions for Job completion are +configurable, the Kubernetes API lacked the ability to suspend and resume Jobs. +This is often desired when cluster resources are limited and a higher priority +Job needs to execute in the place of another Job. Deleting the lower priority +Job is a poor workaround as Pod completion history and other metrics associated +with the Job will be lost. + +With the recent Kubernetes 1.21 release, you will be able to suspend a Job by +updating its spec. The feature is currently in **alpha** and requires you to +enable the `SuspendJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) +and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) +in order to use it. + +## API changes + +We introduced a new boolean field `suspend` into the `.spec` of Jobs. Let's say +I create the following Job: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: my-job +spec: + suspend: true + parallelism: 2 + completions: 10 + template: + spec: + containers: + - name: my-container + image: busybox + command: ["sleep", "5"] + restartPolicy: Never +``` + +Jobs are not suspended by default, so I'm explicitly setting the `suspend` field +to _true_ in the `.spec` of the above Job manifest. In the above example, the +Job controller will refrain from creating Pods until I'm ready to start the Job, +which I can do by updating `suspend` to false. + +As another example, consider a Job that was created with the `suspend` field +omitted. The Job controller will happily create Pods to work towards Job +completion. However, before the Job completes, if I explicitly set the field to +true with a Job update, the Job controller will terminate all active Pods that +are running and will wait indefinitely for the flag to be flipped back to false. +Typically, Pod termination is done by sending a SIGTERM signal to all container +processes in the Pod; the [graceful termination period](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) +defined in the Pod spec will be honoured. Pods terminated this way will not be +counted as failures by the Job controller. + +It is important to understand that succeeded and failed Pods from the past will +continue to exist after you suspend a Job. That is, that they will count towards +Job completion once you resume it. You can verify this by looking at Job's +status before and after suspension. + +Read the [documentation](/docs/concepts/workloads/controllers/job#suspending-a-job) +for a full overview of this new feature. + +## Where is this useful? + +Let's say I'm the operator of a large cluster. I have many users submitting Jobs +to the cluster, but not all Jobs are created equal — some Jobs are more +important than others. Cluster resources aren't infinite either, so all users +must share resources. If all Jobs were created in the suspended state and placed +in a pending queue, I can achieve priority-based Job scheduling by resuming Jobs +in the right order. + +As another motivational use-case, consider a cloud provider where compute +resources are cheaper at night than in the morning. If I have a long-running Job +that takes multiple days to complete, being able to suspend the Job in the +morning and then resume it in the evening every day can reduce costs. + +Since this field is a part of the Job spec, [CronJobs](/docs/concepts/workloads/controllers/cron-jobs/) +automatically get this feature for free too. + +## References and next steps + +If you're interested in a deeper dive into the rationale behind this feature and +the decisions we have taken, consider reading the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-apps/2232-suspend-jobs). +There's more detail on suspending and resuming jobs in the documentation for [Job](/docs/concepts/workloads/controllers/job#suspending-a-job). + +As previously mentioned, this feature is currently in alpha and is available +only if you explicitly opt-in through the `SuspendJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). +If this is a feature you're interested in, please consider testing suspended +Jobs in your cluster and providing feedback. You can discuss this enhancement [on GitHub](https://github.com/kubernetes/enhancements/issues/2232). +The SIG Apps community also [meets regularly](https://github.com/kubernetes/community/tree/master/sig-apps#meetings) +and can be reached through [Slack or the mailing list](https://github.com/kubernetes/community/tree/master/sig-apps#contact). +Barring any unexpected changes to the API, we intend to graduate the feature to +beta in Kubernetes 1.22, so that the feature becomes available by default. diff --git a/content/en/blog/_posts/2021-04-13-kube-state-metrics-goes-v-2-0.md b/content/en/blog/_posts/2021-04-13-kube-state-metrics-goes-v-2-0.md new file mode 100644 index 0000000000000..822880547d0c2 --- /dev/null +++ b/content/en/blog/_posts/2021-04-13-kube-state-metrics-goes-v-2-0.md @@ -0,0 +1,45 @@ +--- +layout: blog +title: "kube-state-metrics goes v2.0" +date: 2021-04-13 +slug: kube-state-metrics-v-2-0 +--- + +**Authors:** Lili Cosic (Red Hat), Frederic Branczyk (Polar Signals), Manuel Rüger (Sony Interactive Entertainment), Tariq Ibrahim (Salesforce) + +## What? + +[kube-state-metrics](https://github.com/kubernetes/kube-state-metrics), a project under the Kubernetes organization, generates Prometheus format metrics based on the current state of the Kubernetes native resources. It does this by listening to the Kubernetes API and gathering information about resources and objects, e.g. Deployments, Pods, Services, and StatefulSets. A full list of resources is available in the [documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs) of kube-state-metrics. + +## Why? + +There are numerous useful metrics and insights provided by `kube-state-metrics` right out of the box! These metrics can be used to serve as an insight into your cluster: Either through metrics alone, in the form of dashboards, or through an alerting pipeline. To provide a few examples: + +* `kube_pod_container_status_restarts_total` can be used to alert on a crashing pod. +* `kube_deployment_status_replicas` which together with `kube_deployment_status_replicas_available` can be used to alert on whether a deployment is rolled out successfully or stuck. +* `kube_pod_container_resource_requests` and `kube_pod_container_resource_limits` can be used in capacity planning dashboards. + +And there are many more metrics available! To learn more about the other metrics and their details, please check out the [documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs#readme). + +## What is new in v2.0? + +So now that we know what kube-state-metrics is, we are excited to announce the next release: kube-state-metrics v2.0! This release was long-awaited and started with an alpha release in September 2020. To ease maintenance we removed tech debt and also adjusted some confusing wording around user-facing flags and APIs. We also removed some metrics that caused unnecessarily high cardinality in Prometheus! For the 2.0 release, we took the time to set up scale and performance testing. This allows us to better understand if we hit any issues in large clusters and also to document resource request recommendations for your clusters. In this release (and v1.9.8) container builds providing support for multiple architectures were introduced allowing you to run kube-state-metrics on ARM, ARM64, PPC64 and S390x as well! + +So without further ado, here is the list of more noteworthy user-facing breaking changes. A full list of changes, features and bug fixes is available in the changelog at the end of this post. + +* Flag `--namespace` was renamed to `--namespaces`. If you are using the former, please make sure to update the flag before deploying the latest release. +* Flag `--collectors` was renamed to `--resources`. +* Flags `--metric-blacklist` and `--metric-whitelist` were renamed to `--metric-denylist` and `--metric-allowlist`. +* Flag `--metric-labels-allowlist` allows you to specify a list of Kubernetes labels that get turned into the dimensions of the `kube__labels` metrics. By default, the metric contains only name and namespace labels. +* All metrics with a prefix of `kube_hpa_*` were renamed to `kube_horizontalpodautoscaler_*`. +* Metric labels that relate to Kubernetes were converted to snake_case. +* If you are importing kube-state-metrics as a library, we have updated our go module path to `k8s.io/kube-state-metrics/v2` +* All deprecated stable metrics were removed as per the [notice in the v1.9 release](https://github.com/kubernetes/kube-state-metrics/tree/release-1.9/docs#metrics-deprecation). +* `quay.io/coreos/kube-state-metrics` images will no longer be updated. `k8s.gcr.io/kube-state-metrics/kube-state-metrics` is the new canonical location. +* The helm chart that is part of the kubernetes/kube-state-metrics repository is deprecated. https://github.com/prometheus-community/helm-charts will be its new location. + +For the full list of v2.0 release changes includes features, bug fixes and other breaking changes see the full [CHANGELOG](https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md). + +## Found a problem? + +Thanks to all our users for testing so far and thank you to all our contributors for your issue reports as well as code and documentation changes! If you find any problems, we the [maintainers](https://github.com/kubernetes/kube-state-metrics/blob/master/OWNERS) are more than happy to look into them, so please report them by opening a [GitHub issue](https://github.com/kubernetes/kube-state-metrics/issues/new/choose). diff --git a/content/en/blog/_posts/2021-04-14-local-storage-features-go-beta.md b/content/en/blog/_posts/2021-04-14-local-storage-features-go-beta.md new file mode 100644 index 0000000000000..457e9238f3c8d --- /dev/null +++ b/content/en/blog/_posts/2021-04-14-local-storage-features-go-beta.md @@ -0,0 +1,216 @@ +--- +layout: blog +title: "Local Storage: Storage Capacity Tracking, Distributed Provisioning and Generic Ephemeral Volumes hit Beta" +date: 2021-04-14 +slug: local-storage-features-go-beta +--- + + **Authors:** Patrick Ohly (Intel) + +The ["generic ephemeral +volumes"](/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) +and ["storage capacity +tracking"](/docs/concepts/storage/storage-capacity/) +features in Kubernetes are getting promoted to beta in Kubernetes +1.21. Together with the [distributed provisioning +support](https://github.com/kubernetes-csi/external-provisioner#deployment-on-each-node) +in the CSI external-provisioner, development and deployment of +Container Storage Interface (CSI) drivers which manage storage locally +on a node become a lot easier. + +This blog post explains how such drivers worked before and how these +features can be used to make drivers simpler. + +## Problems we are solving + +There are drivers for local storage, like +[TopoLVM](https://github.com/cybozu-go/topolvm) for traditional disks +and [PMEM-CSI](https://intel.github.io/pmem-csi/latest/README.html) +for [persistent memory](https://pmem.io/). They work and are ready for +usage today also on older Kubernetes releases, but making that possible +was not trivial. + +### Central component required + +The first problem is volume provisioning: it is handled through the +Kubernetes control plane. Some component must react to +[PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +(PVCs) +and create volumes. Usually, that is handled by a central deployment +of the [CSI +external-provisioner](https://kubernetes-csi.github.io/docs/external-provisioner.html) +and a CSI driver component that then connects to the storage +backplane. But for local storage, there is no such backplane. + +TopoLVM solved this by having its different components communicate +with each other through the Kubernetes API server by creating and +reacting to custom resources. So although TopoLVM is based on CSI, a +standard that is independent of a particular container orchestrator, +TopoLVM only works on Kubernetes. + +PMEM-CSI created its own storage backplane with communication through +gRPC calls. Securing that communication depends on TLS certificates, +which made driver deployment more complicated. + +### Informing Pod scheduler about capacity + +The next problem is scheduling. When volumes get created independently +of pods ("immediate binding"), the CSI driver must pick a node without +knowing anything about the pod(s) that are going to use it. Topology +information then forces those pods to run on the node where the volume +was created. If other resources like RAM or CPU are exhausted there, +the pod cannot start. This can be avoided by configuring in the +StorageClass that volume creation is meant to wait for the first pod +that uses a volume (`volumeBinding: WaitForFirstConsumer`). In that +mode, the Kubernetes scheduler tentatively picks a node based on other +constraints and then the external-provisioner is asked to create a +volume such that it is usable there. If local storage is exhausted, +the provisioner [can +ask](https://github.com/kubernetes-csi/external-provisioner/blob/master/doc/design.md) +for another scheduling round. But without information about available +capacity, the scheduler might always pick the same unsuitable node. + +Both TopoLVM and PMEM-CSI solved this with scheduler extenders. This +works, but it is hard to configure when deploying the driver because +communication between kube-scheduler and the driver is very dependent +on how the cluster was set up. + +### Rescheduling + +A common use case for local storage is scratch space. A better fit for +that use case than persistent volumes are ephemeral volumes that get +created for a pod and destroyed together with it. The initial API for +supporting ephemeral volumes with CSI drivers (hence called ["*CSI* +ephemeral +volumes"](/docs/concepts/storage/ephemeral-volumes/#csi-ephemeral-volumes)) +was [designed for light-weight +volumes](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md) +where volume creation is unlikely to fail. Volume creation happens +after pods have been permanently scheduled onto a node, in contrast to +the traditional provisioning where volume creation is tried before +scheduling a pod onto a node. CSI drivers must be modified to support +"CSI ephemeral volumes", which was done for TopoLVM and PMEM-CSI. But +due to the design of the feature in Kubernetes, pods can get stuck +permanently if storage capacity runs out on a node. The scheduler +extenders try to avoid that, but cannot be 100% reliable. + +## Enhancements in Kubernetes 1.21 + +### Distributed provisioning + +Starting with [external-provisioner +v2.1.0](https://github.com/kubernetes-csi/external-provisioner/releases/tag/v2.1.0), +released for Kubernetes 1.20, provisioning can be handled by +external-provisioner instances that get [deployed together with the +CSI driver on each +node](https://github.com/kubernetes-csi/external-provisioner#deployment-on-each-node) +and then cooperate to provision volumes ("distributed +provisioning"). There is no need any more to have a central component +and thus no need for communication between nodes, at least not for +provisioning. + +### Storage capacity tracking + +A scheduler extender still needs some way to find out about capacity +on each node. When PMEM-CSI switched to distributed provisioning in +v0.9.0, this was done by querying the metrics data exposed by the +local driver containers. But it is better also for users to eliminate +the need for a scheduler extender completely because the driver +deployment becomes simpler. [Storage capacity +tracking](/docs/concepts/storage/storage-capacity/), [introduced in +1.19](/blog/2020/09/01/ephemeral-volumes-with-storage-capacity-tracking/) +and promoted to beta in Kubernetes 1.21, achieves that. It works by +publishing information about capacity in `CSIStorageCapacity` +objects. The scheduler itself then uses that information to filter out +unsuitable nodes. Because information might be not quite up-to-date, +pods may still get assigned to nodes with insufficient storage, it's +just less likely and the next scheduling attempt for a pod should work +better once the information got refreshed. + +### Generic ephemeral volumes + +So CSI drivers still need the ability to recover from a bad scheduling +decision, something that turned out to be impossible to implement for +"CSI ephemeral volumes". ["*Generic* ephemeral +volumes"](/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes), +another feature that got promoted to beta in 1.21, don't have that +limitation. This feature adds a controller that will create and manage +PVCs with the lifetime of the Pod and therefore the normal recovery +mechanism also works for them. Existing storage drivers will be able +to process these PVCs without any new logic to handle this new +scenario. + +## Known limitations + +Both generic ephemeral volumes and storage capacity tracking increase +the load on the API server. Whether that is a problem depends a lot on +the kind of workload, in particular how many pods have volumes and how +often those need to be created and destroyed. + +No attempt was made to model how scheduling decisions affect storage +capacity. That's because the effect can vary considerably depending on +how the storage system handles storage. The effect is that multiple +pods with unbound volumes might get assigned to the same node even +though there is only sufficient capacity for one pod. Scheduling +should recover, but it would be more efficient if the scheduler knew +more about storage. + +Because storage capacity gets published by a running CSI driver and +the cluster autoscaler needs information about a node that hasn't been +created yet, it will currently not scale up a cluster for pods that +need volumes. There is an [idea how to provide that +information](https://github.com/kubernetes/autoscaler/pull/3887), but +more work is needed in that area. + +Distributed snapshotting and resizing are not currently supported. It +should be doable to adapt the respective sidecar and there are +tracking issues for external-snapshotter and external-resizer open +already, they just need some volunteer. + +The recovery from a bad scheduling decising can fail for pods with +multiple volumes, in particular when those volumes are local to nodes: +if one volume can be created and then storage is insufficient for +another volume, the first volume continues to exist and forces the +scheduler to put the pod onto the node of that volume. There is an +idea how do deal with this, [rolling back the provision of the +volume](https://github.com/kubernetes/enhancements/pull/1703), but +this is only in the very early stages of brainstorming and not even a +merged KEP yet. For now it is better to avoid creating pods with more +than one persistent volume. + +## Enabling the new features and next steps + +With the feature entering beta in the 1.21 release, no additional actions are needed to enable it. Generic +ephemeral volumes also work without changes in CSI drivers. For more +information, see the +[documentation](/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) +and the [previous blog +post](/blog/2020/09/01/ephemeral-volumes-with-storage-capacity-tracking/) +about it. The API has not changed at all between alpha and beta. + +For the other two features, the external-provisioner documentation +explains how CSI driver developers must change how their driver gets +deployed to support [storage capacity +tracking](https://github.com/kubernetes-csi/external-provisioner#capacity-support) +and [distributed +provisioning](https://github.com/kubernetes-csi/external-provisioner#deployment-on-each-node). +These two features are independent, therefore it is okay to enable +only one of them. + +[SIG +Storage](https://github.com/kubernetes/community/tree/master/sig-storage) +would like to hear from you if you are using these new features. We +can be reached through +[email](https://groups.google.com/forum/#!forum/kubernetes-sig-storage), +[Slack](https://slack.k8s.io/) (channel [`#sig-storage`](https://kubernetes.slack.com/messages/sig-storage)) and in the +[regular SIG +meeting](https://github.com/kubernetes/community/tree/master/sig-storage#meeting). +A description of your workload would be very useful to validate design +decisions, set up performance tests and eventually promote these +features to GA. + +## Acknowledgements + +Thanks a lot to the members of the community who have contributed to these +features or given feedback including members of SIG Scheduling, SIG Auth, +and of course SIG Storage! diff --git a/content/en/blog/_posts/2021-04-15-Three-Tenancy-Models-for-Kubernetes.md b/content/en/blog/_posts/2021-04-15-Three-Tenancy-Models-for-Kubernetes.md new file mode 100644 index 0000000000000..a3687cf5fd111 --- /dev/null +++ b/content/en/blog/_posts/2021-04-15-Three-Tenancy-Models-for-Kubernetes.md @@ -0,0 +1,80 @@ +--- +layout: blog +title: 'Three Tenancy Models For Kubernetes' +date: 2021-04-15 +slug: three-tenancy-models-for-kubernetes +--- + +**Authors:** Ryan Bezdicek (Medtronic), Jim Bugwadia (Nirmata), Tasha Drew (VMware), Fei Guo (Alibaba), Adrian Ludwin (Google) + +Kubernetes clusters are typically used by several teams in an organization. In other cases, Kubernetes may be used to deliver applications to end users requiring segmentation and isolation of resources across users from different organizations. Secure sharing of Kubernetes control plane and worker node resources allows maximizing productivity and saving costs in both cases. + +The Kubernetes Multi-Tenancy Working Group is chartered with defining tenancy models for Kubernetes and making it easier to operationalize tenancy related use cases. This blog post, from the working group members, describes three common tenancy models and introduces related working group projects. + +We will also be presenting on this content and discussing different use cases at our Kubecon EU 2021 panel session, [Multi-tenancy vs. Multi-cluster: When Should you Use What?](https://sched.co/iE66). + +## Namespaces as a Service + +With the *namespaces-as-a-service* model, tenants share a cluster and tenant workloads are restricted to a set of Namespaces assigned to the tenant. The cluster control plane resources like the API server and scheduler, and worker node resources like CPU, memory, etc. are available for use across all tenants. + +To isolate tenant workloads, each namespace must also contain: +* **[role bindings](/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding):** for controlling access to the namespace +* **[network policies](/docs/concepts/services-networking/network-policies/):** to prevent network traffic across tenants +* **[resource quotas](/docs/concepts/policy/resource-quotas/):** to limit usage and ensure fairness across tenants + +With this model, tenants share cluster-wide resources like ClusterRoles and CustomResourceDefinitions (CRDs) and hence cannot create or update these cluster-wide resources. + +The [Hierarchical Namespace Controller (HNC)](/blog/2020/08/14/introducing-hierarchical-namespaces/) project makes it easier to manage namespace based tenancy by allowing users to create additional namespaces under a namespace, and propagating resources within the namespace hierarchy. This allows self-service namespaces for tenants, without requiring cluster-wide permissions. + +The [Multi-Tenancy Benchmarks (MTB)](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks) project provides benchmarks and a command-line tool that performs several configuration and runtime checks to report if tenant namespaces are properly isolated and the necessary security controls are implemented. + +## Clusters as a Service + +With the *clusters-as-a-service* usage model, each tenant gets their own cluster. This model allows tenants to have different versions of cluster-wide resources such as CRDs, and provides full isolation of the Kubernetes control plane. + +The tenant clusters may be provisioned using projects like [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/) where a management cluster is used to provision multiple workload clusters. A workload cluster is assigned to a tenant and tenants have full control over cluster resources. Note that in most enterprises a central platform team may be responsible for managing required add-on services such as security and monitoring services, and for providing cluster lifecycle management services such as patching and upgrades. A tenant administrator may be restricted from modifying the centrally managed services and other critical cluster information. + +## Control planes as a Service + +In a variation of the *clusters-as-a-service* model, the tenant cluster may be a **virtual cluster** where each tenant gets their own dedicated Kubernetes control plane but share worker node resources. As with other forms of virtualization, users of a virtual cluster see no significant differences between a virtual cluster and other Kubernetes clusters. This is sometimes referred to as `Control Planes as a Service` (CPaaS). + +A virtual cluster of this type shares worker node resources and workload state independent control plane components, like the scheduler. Other workload aware control-plane components, like the API server, are created on a per-tenant basis to allow overlaps, and additional components are used to synchronize and manage state across the per-tenant control plane and the underlying shared cluster resources. With this model users can manage their own cluster-wide resources. + +The [Virtual Cluster](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/virtualcluster) project implements this model, where a `supercluster` is shared by multiple `virtual clusters`. The [Cluster API Nested](https://github.com/kubernetes-sigs/cluster-api-provider-nested) project is extending this work to conform to the CAPI model, allowing use of familiar API resources to provision and manage virtual clusters. + +## Security considerations + +Cloud native security involves different system layers and lifecycle phases as described in the [Cloud Native Security Whitepaper](/blog/2020/11/18/cloud-native-security-for-your-clusters) from CNCF SIG Security. Without proper security measures implemented across all layers and phases, Kubernetes tenant isolation can be compromised and a security breach with one tenant can threaten other tenants. + +It is important for any new user to Kubernetes to realize that the default installation of a new upstream Kubernetes cluster is not secure, and you are going to need to invest in hardening it in order to avoid security issues. + +At a minimum, the following security measures are required: +* image scanning: container image vulnerabilities can be exploited to execute commands and access additional resources. +* [RBAC](/docs/reference/access-authn-authz/rbac/): for *namespaces-as-a-service* user roles and permissions must be properly configured at a per-namespace level; for other models tenants may need to be restricted from accessing centrally managed add-on services and other cluster-wide resources. +* [network policies](/docs/concepts/services-networking/network-policies/): for *namespaces-as-a-service* default network policies that deny all ingress and egress traffic are recommended to prevent cross-tenant network traffic and may also be used as a best practice for other tenancy models. +* [Kubernetes Pod Security Standards](/docs/concepts/security/pod-security-standards/): to enforce Pod hardening best practices the `Restricted` policy is recommended as the default for tenant workloads with exclusions configured only as needed. +* [CIS Benchmarks for Kubernetes](https://www.cisecurity.org/benchmark/kubernetes/): the CIS Benchmarks for Kubernetes guidelines should be used to properly configure Kubernetes control-plane and worker node components. + +Additional recommendations include using: +* policy engines: for configuration security best practices, such as only allowing trusted registries. +* runtime scanners: to detect and report runtime security events. +* VM-based container sandboxing: for stronger data plane isolation. + +While proper security is required independently of tenancy models, not having essential security controls like [pod security](/docs/concepts/security/pod-security-standards/) in a shared cluster provides attackers with means to compromise tenancy models and possibly access sensitive information across tenants increasing the overall risk profile. + +## Summary + +A 2020 CNCF survey showed that production Kubernetes usage has increased by over 300% since 2016. As an increasing number of Kubernetes workloads move to production, organizations are looking for ways to share Kubernetes resources across teams for agility and cost savings. + +The **namespaces as a service** tenancy model allows sharing clusters and hence enables resource efficiencies. However, it requires proper security configurations and has limitations as all tenants share the same cluster-wide resources. + +The **clusters as a service** tenancy model addresses these limitations, but with higher management and resource overhead. + +The **control planes as a service** model provides a way to share resources of a single Kubernetes cluster and also let tenants manage their own cluster-wide resources. Sharing worker node resources increases resource effeciencies, but also exposes cross tenant security and isolation concerns that exist for shared clusters. + + In many cases, organizations will use multiple tenancy models to address different use cases and as different product and development teams will have varying needs. Following security and management best practices, such as applying [Pod Security Standards](/docs/concepts/security/pod-security-standards/) and not using the `default` namespace, makes it easer to switch from one model to another. + +The [Kubernetes Multi-Tenancy Working Group](https://github.com/kubernetes-sigs/multi-tenancy) has created several projects like [Hierarchical Namespaces Controller](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/hnc), [Virtual Cluster](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/virtualcluster) / [CAPI Nested](https://github.com/kubernetes-sigs/cluster-api-provider-nested), and [Multi-Tenancy Benchmarks](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks) to make it easier to provision and manage multi-tenancy models. + +If you are interested in multi-tenancy topics, or would like to share your use cases, please join us in an upcoming [community meeting](https://github.com/kubernetes/community/blob/master/wg-multitenancy/README.md) or reach out on the *wg-multitenancy channel* on the [Kubernetes slack](https://slack.k8s.io/). + diff --git a/content/en/blog/_posts/2021-04-16-volume-health-monitoring-alpha.md b/content/en/blog/_posts/2021-04-16-volume-health-monitoring-alpha.md new file mode 100644 index 0000000000000..99b2fd65b2e32 --- /dev/null +++ b/content/en/blog/_posts/2021-04-16-volume-health-monitoring-alpha.md @@ -0,0 +1,95 @@ +--- +layout: blog +title: "Volume Health Monitoring Alpha Update" +date: 2021-04-16 +slug: volume-health-monitoring-alpha-update +--- + +**Author:** Xing Yang (VMware) + +The CSI Volume Health Monitoring feature, originally introduced in 1.19 has undergone a large update for the 1.21 release. + +## Why add Volume Health Monitoring to Kubernetes? + +Without Volume Health Monitoring, Kubernetes has no knowledge of the state of the underlying volumes of a storage system after a PVC is provisioned and used by a Pod. Many things could happen to the underlying storage system after a volume is provisioned in Kubernetes. For example, the volume could be deleted by accident outside of Kubernetes, the disk that the volume resides on could fail, it could be out of capacity, the disk may be degraded which affects its performance, and so on. Even when the volume is mounted on a pod and used by an application, there could be problems later on such as read/write I/O errors, file system corruption, accidental unmounting of the volume outside of Kubernetes, etc. It is very hard to debug and detect root causes when something happened like this. + +Volume health monitoring can be very beneficial to Kubernetes users. It can communicate with the CSI driver to retrieve errors detected by the underlying storage system. PVC events can be reported up to the user to take action. For example, if the volume is out of capacity, they could request a volume expansion to get more space. + +## What is Volume Health Monitoring? + +CSI Volume Health Monitoring allows CSI Drivers to detect abnormal volume conditions from the underlying storage systems and report them as events on PVCs or Pods. + +The Kubernetes components that monitor the volumes and report events with volume health information include the following: + +* Kubelet, in addition to gathering the existing volume stats will watch the volume health of the PVCs on that node. If a PVC has an abnormal health condition, an event will be reported on the pod object using the PVC. If multiple pods are using the same PVC, events will be reported on all pods using that PVC. +* An [External Volume Health Monitor Controller](https://github.com/kubernetes-csi/external-health-monitor) watches volume health of the PVCs and reports events on the PVCs. + +Note that the node side volume health monitoring logic was an external agent when this feature was first introduced in the Kubernetes 1.19 release. In Kubernetes 1.21, the node side volume health monitoring logic was moved from the external agent into the Kubelet, to avoid making duplicate CSI function calls. With this change in 1.21, a new alpha [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `CSIVolumeHealth` was introduced for the volume health monitoring logic in Kubelet. + +Currently the Volume Health Monitoring feature is informational only as it only reports abnormal volume health events on PVCs or Pods. Users will need to check these events and manually fix the problems. This feature serves as a stepping stone towards programmatic detection and resolution of volume health issues by Kubernetes in the future. + +## How do I use Volume Health on my Kubernetes Cluster? + +To use the Volume Health feature, first make sure the CSI driver you are using supports this feature. Refer to this [CSI drivers doc](https://kubernetes-csi.github.io/docs/drivers.html) to find out which CSI drivers support this feature. + +To enable Volume Health Monitoring from the node side, the alpha feature gate `CSIVolumeHealth` needs to be enabled. + +If a CSI driver supports the Volume Health Monitoring feature from the controller side, events regarding abnormal volume conditions will be recorded on PVCs. + +If a CSI driver supports the Volume Health Monitoring feature from the controller side, user can also get events regarding node failures if the `enable-node-watcher` flag is set to true when deploying the External Health Monitor Controller. When a node failure event is detected, an event will be reported on the PVC to indicate that pods using this PVC are on a failed node. + +If a CSI driver supports the Volume Health Monitoring feature from the node side, events regarding abnormal volume conditions will be recorded on pods using the PVCs. + +## As a storage vendor, how do I add support for volume health to my CSI driver? + +Volume Health Monitoring includes two parts: +* An External Volume Health Monitoring Controller monitors volume health from the controller side. +* Kubelet monitors volume health from the node side. + +For details, see the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) and the [Kubernetes-CSI Driver Developer Guide](https://kubernetes-csi.github.io/docs/volume-health-monitor.html). + +There is a sample implementation for volume health in [CSI host path driver](https://github.com/kubernetes-csi/csi-driver-host-path). + +### Controller Side Volume Health Monitoring + +To learn how to deploy the External Volume Health Monitoring controller, see [CSI external-health-monitor-controller](https://kubernetes-csi.github.io/docs/external-health-monitor-controller.html) in the CSI documentation. + +The External Health Monitor Controller calls either `ListVolumes` or `ControllerGetVolume` CSI RPC and reports VolumeConditionAbnormal events with messages on PVCs if abnormal volume conditions are detected. Only CSI drivers with `LIST_VOLUMES` and `VOLUME_CONDITION` controller capability or `GET_VOLUME` and `VOLUME_CONDITION` controller capability support Volume Health Monitoring in the external controller. + +To implement the volume health feature from the controller side, a CSI driver **must** add support for the new controller capabilities. + +If a CSI driver supports `LIST_VOLUMES` and `VOLUME_CONDITION` controller capabilities, it **must** implement controller RPC `ListVolumes` and report the volume condition in the response. + +If a CSI driver supports `GET_VOLUME` and `VOLUME_CONDITION` controller capability, it **must** implement controller PRC `ControllerGetVolume` and report the volume condition in the response. + +If a CSI driver supports `LIST_VOLUMES`, `GET_VOLUME`, and `VOLUME_CONDITION` controller capabilities, only `ListVolumes` CSI RPC will be invoked by the External Health Monitor Controller. + +### Node Side Volume Health Monitoring + +Kubelet calls `NodeGetVolumeStats` CSI RPC and reports VolumeConditionAbnormal events with messages on Pods if abnormal volume conditions are detected. Only CSI drivers with `VOLUME_CONDITION` node capability support Volume Health Monitoring in Kubelet. + +To implement the volume health feature from the node side, a CSI driver **must** add support for the new node capabilities. + +If a CSI driver supports `VOLUME_CONDITION` node capability, it **must** report the volume condition in node RPC `NodeGetVoumeStats`. + +## What’s next? + +Depending on feedback and adoption, the Kubernetes team plans to push the CSI volume health implementation to beta in either 1.22 or 1.23. + +We are also exploring how to use volume health information for programmatic detection and automatic reconcile in Kubernetes. + +## How can I learn more? + +To learn the design details for Volume Health Monitoring, read the [Volume Health Monitor](https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1432-volume-health-monitor) enhancement proposal. + +The Volume Health Monitor controller source code is at [https://github.com/kubernetes-csi/external-health-monitor](https://github.com/kubernetes-csi/external-health-monitor). + +There are also more details about volume health checks in the [Container Storage Interface Documentation](https://kubernetes-csi.github.io/docs/). + +## How do I get involved? + +The [Kubernetes Slack channel #csi](https://kubernetes.slack.com/messages/csi) and any of the [standard SIG Storage communication channels](https://github.com/kubernetes/community/blob/master/sig-storage/README.md#contact) are great mediums to reach out to the SIG Storage and the CSI team. + +We offer a huge thank you to the contributors who helped release this feature in 1.21. We want to thank Yuquan Ren ([NickrenREN](https://github.com/nickrenren)) who implemented the initial volume health monitor controller and agent in the external health monitor repo, thank Ran Xu ([fengzixu](https://github.com/fengzixu)) who moved the volume health monitoring logic from the external agent to Kubelet in 1.21, and we offer special thanks to the following people for their insightful reviews: David Ashpole ([dashpole](https://github.com/dashpole)), Michelle Au ([msau42](https://github.com/msau42)), David Eads ([deads2k](https://github.com/deads2k)), Elana Hashman ([ehashman](https://github.com/ehashman)), Seth Jennings ([sjenning](https://github.com/sjenning)), and Jiawei Wang ([Jiawei0227](https://github.com/Jiawei0227)). + +Those interested in getting involved with the design and development of CSI or any part of the Kubernetes Storage system, join the [Kubernetes Storage Special Interest Group](https://github.com/kubernetes/community/tree/master/sig-storage) (SIG). We’re rapidly growing and always welcome new contributors. diff --git a/content/en/blog/_posts/2021-04-19-introducing-indexed-jobs.md b/content/en/blog/_posts/2021-04-19-introducing-indexed-jobs.md new file mode 100644 index 0000000000000..990dd6308b825 --- /dev/null +++ b/content/en/blog/_posts/2021-04-19-introducing-indexed-jobs.md @@ -0,0 +1,95 @@ +--- +title: "Introducing Indexed Jobs" +date: 2021-04-19 +slug: introducing-indexed-jobs +--- + +**Author:** Aldo Culquicondor (Google) + +Once you have containerized a non-parallel [Job](/docs/concepts/workloads/controllers/job/), +it is quite easy to get it up and running on Kubernetes without modifications to +the binary. In most cases, when running parallel distributed Jobs, you had +to set a separate system to partition the work among the workers. For +example, you could set up a task queue to [assign one work item to each +Pod](/docs/tasks/job/coarse-parallel-processing-work-queue/) or [multiple items +to each Pod until the queue is emptied](/docs/tasks/job/fine-parallel-processing-work-queue/). + +The Kubernetes 1.21 release introduces a new field to control Job _completion mode_, +a configuration option that allows you to control how Pod completions affect the +overall progress of a Job, with two possible options (for now): + +- `NonIndexed` (default): the Job is considered complete when there has been + a number of successfully completed Pods equal to the specified number in + `.spec.completions`. In other words, each Pod completion is homologous to + each other. Any Job you might have created before the introduction of + completion modes is implicitly NonIndexed. +- `Indexed`: the Job is considered complete when there is one successfully + completed Pod associated with each index from 0 to `.spec.completions-1`. The + index is exposed to each Pod in the `batch.kubernetes.io/job-completion-index` + annotation and the `JOB_COMPLETION_INDEX` environment variable. + +You can start using Jobs with Indexed completion mode, or Indexed Jobs, for +short, to easily start parallel Jobs. Then, each worker Pod can have a statically +assigned partition of the data based on the index. This saves you from having to +set up a queuing system or even having to modify your binary! + +## Creating an Indexed Job + +To create an Indexed Job, you just have to add `completionMode: Indexed` to the +Job spec and make use of the `JOB_COMPLETION_INDEX` environment variable. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: 'sample-job' +spec: + completions: 3 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + containers: + - command: + - 'bash' + - '-c' + - 'echo "My partition: ${JOB_COMPLETION_INDEX}"' + image: 'docker.io/library/bash' + name: 'sample-load' +``` + +Note that completion mode is an alpha feature in the 1.21 release. To be able to +use it in your cluster, make sure to enable the `IndexedJob` [feature +gate](/docs/reference/command-line-tools-reference/feature-gates/) on the +[API server](docs/reference/command-line-tools-reference/kube-apiserver/) and +the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). + +When you run the example, you will see that each of the three created Pods gets a +different completion index. For the user's convenience, the control plane sets the +`JOB_COMPLETION_INDEX` environment variable, but you can choose to [set your +own](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/) +or [expose the index as a file](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/). + +See [Indexed Job for parallel processing with static work +assignment](/docs/tasks/job/indexed-parallel-processing-static/) for a +step-by-step guide, and a few more examples. + +## Future plans + +SIG Apps envisions that there might be more completion modes that enable more +use cases for the Job API. We welcome you to open issues in +[kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) with your +suggestions. + +In particular, we are considering an `IndexedAndUnique` mode where the indexes +are not just available as annotation, but they are part of the Pod names, +similar to {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}. +This should facilitate inter-Pod communication for tightly coupled Pods. +You can join the discussion in the [open issue](https://github.com/kubernetes/kubernetes/issues/99497). + +## Wrap-up + +Indexed Jobs allows you to statically partition work among the workers of your +parallel Jobs. SIG Apps hopes that this feature facilitates the migration of +more batch workloads to Kubernetes. \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-20-Defining-NetworkPolicy-Conformance-For-CNIs.md b/content/en/blog/_posts/2021-04-20-Defining-NetworkPolicy-Conformance-For-CNIs.md new file mode 100644 index 0000000000000..86c005eed1136 --- /dev/null +++ b/content/en/blog/_posts/2021-04-20-Defining-NetworkPolicy-Conformance-For-CNIs.md @@ -0,0 +1,479 @@ +--- +layout: blog +title: "Defining Network Policy Conformance for Container Network Interface (CNI) providers" +date: 2021-04-20 +slug: defining-networkpolicy-conformance-cni-providers +--- + +Authors: Matt Fenwick (Synopsys), Jay Vyas (VMWare), Ricardo Katz, Amim Knabben (Loadsmart), Douglas Schilling Landgraf (Red Hat), Christopher Tomkins (Tigera) + +Special thanks to Tim Hockin and Bowie Du (Google), Dan Winship and Antonio Ojea (Red Hat), +Casey Davenport and Shaun Crampton (Tigera), and Abhishek Raut and Antonin Bas (VMware) for +being supportive of this work, and working with us to resolve issues in different Container Network Interfaces (CNIs) over time. + +A brief conversation around "node local" Network Policies in April of 2020 inspired the creation of a NetworkPolicy subproject from SIG Network. It became clear that as a community, +we need a rock-solid story around how to do pod network security on Kubernetes, and this story needed a community around it, so as to grow the cultural adoption of enterprise security patterns in K8s. + +In this post we'll discuss: + +- Why we created a subproject for [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +- How we changed the Kubernetes e2e framework to `visualize` NetworkPolicy implementation of your CNI provider +- The initial results of our comprehensive NetworkPolicy conformance validator, _Cyclonus_, built around these principles +- Improvements subproject contributors have made to the NetworkPolicy user experience + +## Why we created a subproject for NetworkPolicies + +In April of 2020 it was becoming clear that many CNIs were emerging, and many vendors +implement these CNIs in subtly different ways. Users were beginning to express a little bit +of confusion around how to implement policies for different scenarios, and asking for new features. +It was clear that we needed to begin unifying the way we think about Network Policies +in Kubernetes, to avoid API fragmentation and unnecessary complexity. + +For example: +- In order to be flexible to the user’s environment, Calico as a CNI provider can be run using IPIP or VXLAN mode, or without encapsulation overhead. CNIs such as Antrea + and Cilium offer similar configuration options as well. +- Some CNI plugins offer iptables for NetworkPolicies amongst other options, whereas other CNIs use a completely + different technology stack (for example, the Antrea project uses Open vSwitch rules). +- Some CNI plugins only implement a subset of the Kubernetes NetworkPolicy API, and some a superset. For example, certain plugins don't support the + ability to target a named port; others don't work with certain IP address types, and there are diverging semantics for similar policy types. +- Some CNI plugins combine with OTHER CNI plugins in order to implement NetworkPolicies (canal), some CNI's might mix implementations (multus), and some clouds do routing separately from NetworkPolicy implementation. + +Although this complexity is to some extent necessary to support different environments, end-users find that they need to follow a multistep process to implement Network Policies to secure their applications: +- Confirm that their network plugin supports NetworkPolicies (some don't, such as Flannel) +- Confirm that their cluster's network plugin supports the specific NetworkPolicy features that they are interested in (again, the named port or port range examples come to mind here) +- Confirm that their application's Network Policy definitions are doing the right thing +- Find out the nuances of a vendor's implementation of policy, and check whether or not that implementation has a CNI neutral implementation (which is sometimes adequate for users) + +The NetworkPolicy project in upstream Kubernetes aims at providing a community where +people can learn about, and contribute to, the Kubernetes NetworkPolicy API and the surrounding ecosystem. + +## The First step: A validation framework for NetworkPolicies that was intuitive to use and understand + +The Kubernetes end to end suite has always had NetworkPolicy tests, but these weren't +run in CI, and the way they were implemented didn't provide holistic, easily consumable +information about how a policy was working in a cluster. +This is because the original tests didn't provide any kind of visual summary of connectivity +across a cluster. We thus initially set out to make it easy to confirm CNI support for NetworkPolicies by +making the end to end tests (which are often used by administrators or users to diagnose cluster conformance) easy to interpret. + +To solve the problem of confirming that CNIs support the basic features most users care about +for a policy, we built a new NetworkPolicy validation tool into the Kubernetes e2e +framework which allows for visual inspection of policies and their effect on a standard set of pods in a cluster. +For example, take the following test output. We found a bug in +[OVN Kubernetes](https://github.com/ovn-org/ovn-kubernetes/issues/1782). This bug has now been resolved. With this tool the bug was really +easy to characterize, wherein certain policies caused a state-modification that, +later on, caused traffic to incorrectly be blocked (even after all Network Policies were deleted from the cluster). + +This is the network policy for the test in question: +```yaml +metadata: + creationTimestamp: null + name: allow-ingress-port-80 +spec: + ingress: + - ports: + - port: serve-80-tcp + podSelector: {} +``` + +These are the expected connectivity results. The test setup is 9 pods (3 namespaces: x, y, and z; +and 3 pods in each namespace: a, b, and c); each pod runs a server on the same port and protocol +that can be reached through HTTP calls in the absence of network policies. Connectivity is verified +by using the [agnhost](https://github.com/kubernetes/kubernetes/tree/master/test/images/agnhost) network utility to issue HTTP calls on a port and protocol that other pods are +expected to be serving. A test scenario first +runs a connectivity check to ensure that each pod can reach each other pod, for 81 (= 9 x 9) data +points. This is the "control". Then perturbations are applied, depending on the test scenario: +policies are created, updated, and deleted; labels are added and removed from pods and namespaces, +and so on. After each change, the connectivity matrix is recollected and compared to the expected +connectivity. + +These results give a visual indication of connectivity in a simple matrix. Going down the leftmost column is the "source" +pod, or the pod issuing the request; going across the topmost row is the "destination" pod, or the pod +receiving the request. A `.` means that the connection was allowed; an `X` means the connection was +blocked. For example: + +``` +Nov 4 16:58:43.449: INFO: expected: + +- x/a x/b x/c y/a y/b y/c z/a z/b z/c +x/a . . . . . . . . . +x/b . . . . . . . . . +x/c . . . . . . . . . +y/a . . . . . . . . . +y/b . . . . . . . . . +y/c . . . . . . . . . +z/a . . . . . . . . . +z/b . . . . . . . . . +z/c . . . . . . . . . +``` + +Below are the observed connectivity results in the case of the OVN Kubernetes bug. Notice how the top three rows indicate that +all requests from namespace x regardless of pod and destination were blocked. Since these +experimental results do not match the expected results, a failure will be reported. Note +how the specific pattern of failure provides clear insight into the nature of the problem -- +since all requests from a specific namespace fail, we have a clear clue to start our +investigation. + +``` +Nov 4 16:58:43.449: INFO: observed: + +- x/a x/b x/c y/a y/b y/c z/a z/b z/c +x/a X X X X X X X X X +x/b X X X X X X X X X +x/c X X X X X X X X X +y/a . . . . . . . . . +y/b . . . . . . . . . +y/c . . . . . . . . . +z/a . . . . . . . . . +z/b . . . . . . . . . +z/c . . . . . . . . . +``` + +This was one of our earliest wins in the Network Policy group, as we were able to +identify and work with the OVN Kubernetes group to fix a bug in egress policy processing. + +However, even though this tool has made it easy to validate roughly 30 common scenarios, +it doesn't validate *all* Network Policy scenarios - because there are an enormous number of possible +permutations that one might create (technically, we might say this number is +infinite given that there's an infinite number of possible namespace/pod/port/protocol variations one can create). + +Once these tests were in play, we worked with the Upstream SIG Network and SIG Testing communities +(thanks to Antonio Ojea and Ben Elder) to put a testgrid Network Policy job in place. This job +continuously runs the entire suite of Network Policy tests against +[GCE with Calico as a Network Policy provider](https://testgrid.k8s.io/sig-network-gce#presubmit-network-policies,%20google-gce). + +Part of our role as a subproject is to help make sure that, when these tests break, we can help triage them effectively. + +## Cyclonus: The next step towards Network Policy conformance {#cyclonus} + +Around the time that we were finishing the validation work, it became clear from the community that, +in general, we needed to solve the overall problem of testing ALL possible Network Policy implementations. +For example, a KEP was recently written which introduced the concept of micro versioning to +Network Policies to accommodate [describing this at the API level](https://github.com/kubernetes/enhancements/pull/2137/files), by Dan Winship. + +In response to this increasingly obvious need to comprehensively evaluate Network +Policy implementations from all vendors, Matt Fenwick decided to evolve our approach to Network Policy validation again by creating Cyclonus. + +Cyclonus is a comprehensive Network Policy fuzzing tool which verifies a CNI provider +against hundreds of different Network Policy scenarios, by defining similar truth table/policy +combinations as demonstrated in the end to end tests, while also providing a hierarchical +representation of policy "categories". We've found some interesting nuances and issues +in almost every CNI we've tested so far, and have even contributed some fixes back. + +To perform a Cyclonus validation run, you create a Job manifest similar to: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: cyclonus +spec: + template: + spec: + restartPolicy: Never + containers: + - command: + - ./cyclonus + - generate + - --perturbation-wait-seconds=15 + - --server-protocol=tcp,udp + name: cyclonus + imagePullPolicy: IfNotPresent + image: mfenwick100/cyclonus:latest + serviceAccount: cyclonus +``` + +Cyclonus outputs a report of all the test cases it will run: +``` +test cases to run by tag: +- target: 6 +- peer-ipblock: 4 +- udp: 16 +- delete-pod: 1 +- conflict: 16 +- multi-port/protocol: 14 +- ingress: 51 +- all-pods: 14 +- egress: 51 +- all-namespaces: 10 +- sctp: 10 +- port: 56 +- miscellaneous: 22 +- direction: 100 +- multi-peer: 0 +- any-port-protocol: 2 +- set-namespace-labels: 1 +- upstream-e2e: 0 +- allow-all: 6 +- namespaces-by-label: 6 +- deny-all: 10 +- pathological: 6 +- action: 6 +- rule: 30 +- policy-namespace: 4 +- example: 0 +- tcp: 16 +- target-namespace: 3 +- named-port: 24 +- update-policy: 1 +- any-peer: 2 +- target-pod-selector: 3 +- IP-block-with-except: 2 +- pods-by-label: 6 +- numbered-port: 28 +- protocol: 42 +- peer-pods: 20 +- create-policy: 2 +- policy-stack: 0 +- any-port: 14 +- delete-namespace: 1 +- delete-policy: 1 +- create-pod: 1 +- IP-block-no-except: 2 +- create-namespace: 1 +- set-pod-labels: 1 +testing 112 cases +``` + +Note that Cyclonus tags its tests based on the type of policy being created, because +the policies themselves are auto-generated, and thus have no meaningful names to be recognized by. + +For each test, Cyclonus outputs a truth table, which is again similar to that of the +E2E tests, along with the policy being validated: + +``` +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + creationTimestamp: null + name: base + namespace: x +spec: + egress: + - ports: + - port: 81 + to: + - namespaceSelector: + matchExpressions: + - key: ns + operator: In + values: + - "y" + - z + podSelector: + matchExpressions: + - key: pod + operator: In + values: + - a + - b + - ports: + - port: 53 + protocol: UDP + ingress: + - from: + - namespaceSelector: + matchExpressions: + - key: ns + operator: In + values: + - x + - "y" + podSelector: + matchExpressions: + - key: pod + operator: In + values: + - b + - c + ports: + - port: 80 + protocol: TCP + podSelector: + matchLabels: + pod: a + policyTypes: + - Ingress + - Egress + +0 wrong, 0 ignored, 81 correct ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| TCP/80 | X/A | X/B | X/C | Y/A | Y/B | Y/C | Z/A | Z/B | Z/C | +| TCP/81 | | | | | | | | | | +| UDP/80 | | | | | | | | | | +| UDP/81 | | | | | | | | | | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| x/a | X | X | X | X | X | X | X | X | X | +| | X | X | X | . | . | X | . | . | X | +| | X | X | X | X | X | X | X | X | X | +| | X | X | X | X | X | X | X | X | X | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| x/b | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| x/c | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| y/a | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| y/b | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| y/c | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| z/a | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| z/b | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| z/c | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +``` + +Both Cyclonus and the e2e tests use the same strategy to validate a Network Policy - probing pods over TCP or UDP, with +SCTP support available as well for CNIs that support it (such as Calico). + +As examples of how we use Cyclonus to help make CNI implementations better from a Network Policy perspective, you can see the following issues: + +- [Antrea: NetworkPolicy: unable to allow ingress by CIDR](https://github.com/vmware-tanzu/antrea/issues/1764) +- [Calico: default missing protocol to TCP; don't let single port overwrite all ports](https://github.com/projectcalico/libcalico-go/pull/1373) +- [Cilium: Egress Network Policy allows traffic that should be denied](https://github.com/cilium/cilium/issues/14678) + +The good news is that Antrea and Calico have already merged fixes for all the issues found and other CNI providers are working on it, +with the support of SIG Network and the Network Policy subproject. + +Are you interested in verifying NetworkPolicy functionality on your cluster? +(if you care about security or offer multi-tenant SaaS, you should be) +If so, you can run the upstream end to end tests, or Cyclonus, or both. +- If you're just getting started with NetworkPolicies and want to simply + verify the "common" NetworkPolicy cases that most CNIs should be + implementing correctly, in a way that is quick to diagnose, then you're + better off running the e2e tests only. +- If you are deeply curious about your CNI provider's NetworkPolicy + implementation, and want to verify it: use Cyclonus. +- If you want to test *hundreds* of policies, and evaluate your CNI plugin + for comprehensive functionality, for deep discovery of potential security + holes: use Cyclonus, and also consider running end-to-end cluster tests. +- If you're thinking of getting involved with the upstream NetworkPolicy efforts: + use Cyclonus, and read at least an outline of which e2e tests are relevant. + +## Where to start with NetworkPolicy testing? + +- Cyclonus is easy to run on your cluster, check out the [instructions on github](https://github.com/mattfenwick/cyclonus#run-as-a-kubernetes-job), + and determine whether *your* specific CNI configuration is fully conformant to the hundreds of different + Kubernetes Network Policy API constructs. +- Alternatively, you can use a tool like [sonobuoy](https://github.com/vmware-tanzu/sonobuoy) + to run the existing E2E tests in Kubernetes, with the `--ginkgo.focus=NetworkPolicy` flag. + Make sure that you use the K8s conformance image for K8s 1.21 or above (for example, by using the `--kube-conformance-image-version v1.21.0` flag), + as older images will not have the *new* Network Policy tests in them. + +## Improvements to the NetworkPolicy API and user experience + +In addition to cleaning up the validation story for CNI plugins that implement NetworkPolicies, +subproject contributors have also spent some time improving the Kubernetes NetworkPolicy API for a few commonly requested features. +After months of deliberation, we eventually settled on a few core areas for improvement: + +- Port Range policies: We now allow you to specify a *range* of ports for a policy. + This allows users interested in scenarios like FTP or virtualization to enable advanced policies. + The port range option for network policies will be available to use in Kubernetes 1.21. + Read more in [targeting a range of ports](/docs/concepts/services-networking/network-policies/#targeting-a-range-of-ports). +- Namespace as name policies: Allowing users in Kubernetes >= 1.21 to target namespaces using names, + when building Network Policy objects. This was done in collaboration with Jordan Liggitt and Tim Hockin on the API Machinery side. + This change allowed us to improve the Network Policy user experience without actually + changing the API! For more details, you can read + [Automatic labelling](/docs/concepts/overview/working-with-objects/namespaces/#automatic-labelling) in the page about Namespaces. + The TL,DR; is that for Kubernetes 1.21 and later, **all namespaces** have the following label added by default: + + ``` + kubernetes.io/metadata.name: + ``` + +This means you can write a namespace policy against this namespace, even if you can't edit its labels. +For example, this policy, will 'just work', without needing to run a command such as `kubectl edit namespace`. +In fact, it will even work if you can't edit or view this namespace's data at all, because of the magic of API server defaulting. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: test-network-policy + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Ingress + # Allow inbound traffic to Pods labelled role=db, in the namespace 'default' + # provided that the source is a Pod in the namespace 'my-namespace' + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: my-namespace +``` + +## Results + +In our tests, we found that: + +- Antrea and Calico are at a point where they support all of cyclonus's scenarios, modulo a few very minor tweaks which we've made. +- Cilium also conformed to the majority of the policies, outside known features that aren't fully supported (for example, related to the way Cilium deals with pod CIDR policies). + +If you are a CNI provider and interested in helping us to do a better job curating large tests of network policies, please reach out! We are continuing to curate the Network Policy conformance results from Cyclonus [here](https://raw.githubusercontent.com/K8sbykeshed/cyclonus-artifacts/), but +we are not capable of maintaining all of the subtleties in NetworkPolicy testing data on our own. For now, we use github actions and Kind to test in CI. + +## The Future + +We're also working on some improvements for the future of Network Policies, including: + +- Fully qualified Domain policies: The Google Cloud team created a prototype (which + we are really excited about) of [FQDN policies](https://github.com/GoogleCloudPlatform/gke-fqdnnetworkpolicies-golang). + This tool uses the Network Policy API to enforce policies against L7 URLs, by finding + their IPs and blocking them proactively when requests are made. +- Cluster Administrative policies: We're working hard at enabling *administrative* or + *cluster scoped* Network Policies for the future. These are being presented iteratively to the NetworkPolicy subproject. + You can read about them here in [Cluster Scoped Network Policy](https://docs.google.com/presentation/d/1Jk86jtS3TcGAugVSM_I4Yds5ukXFJ4F1ZCvxN5v2BaY/). + +The Network Policy subproject meets on mondays at 4PM EST. For details, check out the +[SIG Network community repo](https://github.com/kubernetes/community/tree/master/sig-network). We'd love +to hang out with you, hack on stuff, and help you adopt K8s Network Policies for your cluster wherever possible. + +### A quick note on User Feedback + +We've gotten a lot of ideas and feedback from users on Network Policies. A lot of people have interesting ideas about Network Policies, +but we've found that as a subproject, very few people were deeply interested in implementing these ideas to the full extent. + +Almost every change to the NetworkPolicy API includes weeks or months of discussion to cover different cases, and ensure no CVEs are being introduced. Thus, long term ownership +is the biggest impediment in improving the NetworkPolicy user experience for us, over time. + +- We've documented a lot of the history of the Network Policy dialogue [here](https://github.com/jayunit100/network-policy-subproject/blob/master/history.md). +- We've also taken a poll of users, for what they'd like to see in the Network Policy API [here](https://github.com/jayunit100/network-policy-subproject/blob/master/p0_user_stories.md). + +We encourage anyone to provide us with feedback, but our most pressing issues right now +involve finding *long term owners to help us drive changes*. + +This doesn't require a lot of technical knowledge, but rather, just a long term commitment to helping us stay organized, do paperwork, +and iterate through the many stages of the K8s feature process. If you want to help us and get involved, please reach out on the SIG Network mailing list, or in the SIG Network room in the k8s.io slack channel! + +Anyone can put an oar in the water and help make NetworkPolices better! diff --git a/content/en/blog/_posts/2021-04-20-annotating-k8s-for-humans.md b/content/en/blog/_posts/2021-04-20-annotating-k8s-for-humans.md new file mode 100644 index 0000000000000..155ff5a3b31f5 --- /dev/null +++ b/content/en/blog/_posts/2021-04-20-annotating-k8s-for-humans.md @@ -0,0 +1,100 @@ +--- +layout: blog +title: 'Annotating Kubernetes Services for Humans' +date: 2021-04-20 +slug: annotating-k8s-for-humans +--- + +**Author:** Richard Li, Ambassador Labs + +Have you ever been asked to troubleshoot a failing Kubernetes service and struggled to find basic information about the service such as the source repository and owner? + +One of the problems as Kubernetes applications grow is the proliferation of services. As the number of services grows, developers start to specialize working with specific services. When it comes to troubleshooting, however, developers need to be able to find the source, understand the service and dependencies, and chat with the owning team for any service. + +## Human service discovery + +Troubleshooting always begins with information gathering. While much attention has been paid to centralizing machine data (e.g., logs, metrics), much less attention has been given to the human aspect of service discovery. Who owns a particular service? What Slack channel does the team work on? Where is the source for the service? What issues are currently known and being tracked? + +## Kubernetes annotations + +Kubernetes annotations are designed to solve exactly this problem. Oft-overlooked, Kubernetes annotations are designed to add metadata to Kubernetes objects. The Kubernetes documentation says annotations can “attach arbitrary non-identifying metadata to objects.” This means that annotations should be used for attaching metadata that is external to Kubernetes (i.e., metadata that Kubernetes won’t use to identify objects. As such, annotations can contain any type of data. This is a contrast to labels, which are designed for uses internal to Kubernetes. As such, label structure and values are [constrained](/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) so they can be efficiently used by Kubernetes. + + +## Kubernetes annotations in action + +Here is an example. Imagine you have a Kubernetes service for quoting, called the quote service. You can do the following: + +``` +kubectl annotate service quote a8r.io/owner=”@sally” +``` + +In this example, we've just added an annotation called `a8r.io/owner` with the value of @sally. Now, we can use `kubectl describe` to get the information. + +``` +Name: quote +Namespace: default +Labels: +Annotations: a8r.io/owner: @sally +Selector: app=quote +Type: ClusterIP +IP: 10.109.142.131 +Port: http 80/TCP +TargetPort: 8080/TCP +Endpoints: +Session Affinity: None +Events: +``` + +If you’re practicing GitOps (and you should be!) you’ll want to code these values directly into your Kubernetes manifest, e.g., + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: quote + annotations: + a8r.io/owner: “@sally” +spec: + ports: + - name: http + port: 80 + targetPort: 8080 + selector: + app: quote +``` + +## A Convention for Annotations + +Adopting a common convention for annotations ensures consistency and understandability. Typically, you’ll want to attach the annotation to the service object, as services are the high-level resource that maps most clearly to a team’s responsibility. Namespacing your annotations is also very important. Here is one set of conventions, documented at [a8r.io](https://a8r.io), and reproduced below: + +{{< table caption="Annotation convention for human-readable services">}} +| Annotation | Description | +| ------------------------------------------ | ------------------------------------------- | +| `a8r.io/description` | Unstructured text description of the service for humans. | +| `a8r.io/owner` | SSO username (GitHub), email address (linked to GitHub account), or unstructured owner description. | +| `a8r.io/chat` | Slack channel, or link to external chat system. | +| `a8r.io/bugs` | Link to external bug tracker. | +| `a8r.io/logs` | Link to external log viewer. | +| `a8r.io/documentation` | Link to external project documentation. | +| `a8r.io/repository` | Link to external VCS repository. | +| `a8r.io/support` | Link to external support center. | +| `a8r.io/runbook` | Link to external project runbook. | +| `a8r.io/incidents` | Link to external incident dashboard. | +| `a8r.io/uptime` | Link to external uptime dashboard. | +| `a8r.io/performance` | Link to external performance dashboard. | +| `a8r.io/dependencies` | Unstructured text describing the service dependencies for humans. | + + +## Visualizing annotations: Service Catalogs + +As the number of microservices and annotations proliferate, running `kubectl describe` can get tedious. Moreover, using `kubectl describe` requires every developer to have some direct access to the Kubernetes cluster. Over the past few years, service catalogs have gained greater visibility in the Kubernetes ecosystem. Popularized by tools such as [Shopify's ServicesDB](https://shopify.engineering/scaling-mobile-development-by-treating-apps-as-services) and [Spotify's System Z](https://dzone.com/articles/modeling-microservices-at-spotify-with-petter-mari), service catalogs are internally-facing developer portals that present critical information about microservices. + +Note that these service catalogs should not be confused with the [Kubernetes Service Catalog project](https://svc-cat.io/). Built on the Open Service Broker API, the Kubernetes Service Catalog enables Kubernetes operators to plug in different services (e.g., databases) to their cluster. + +## Annotate your services now and thank yourself later + +Much like implementing observability within microservice systems, you often don’t realize that you need human service discovery until it’s too late. Don't wait until something is on fire in production to start wishing you had implemented better metrics and also documented how to get in touch with the part of your organization that looks after it. + +There's enormous benefits to building an effective “version 0” service: a [_dancing skeleton_](https://containerjournal.com/topics/container-management/dancing-skeleton-apis-and-microservices/) application with a thin slice of complete functionality that can be deployed to production with a minimal yet effective continuous delivery pipeline. + +Adding service annotations should be an essential part of your “version 0” for all of your services. Add them now, and you’ll thank yourself later. diff --git a/content/en/blog/_posts/2021-04-21-Graceful-Node-Shutdown-Beta.md b/content/en/blog/_posts/2021-04-21-Graceful-Node-Shutdown-Beta.md new file mode 100644 index 0000000000000..2c169674723cb --- /dev/null +++ b/content/en/blog/_posts/2021-04-21-Graceful-Node-Shutdown-Beta.md @@ -0,0 +1,80 @@ +--- +layout: blog +title: 'Graceful Node Shutdown Goes Beta' +date: 2021-04-21 +slug: graceful-node-shutdown-beta +--- + +**Authors:** David Porter (Google), Mrunal Patel (Red Hat), and Tim Bannister (The Scale Factory) + +Graceful node shutdown, beta in 1.21, enables kubelet to gracefully evict pods during a node shutdown. + +Kubernetes is a distributed system and as such we need to be prepared for inevitable failures — nodes will fail, containers might crash or be restarted, and - ideally - your workloads will be able to withstand these catastrophic events. + +One of the common classes of issues are workload failures on node shutdown or restart. The best practice prior to bringing your node down is to [safely drain and cordon your node](/docs/tasks/administer-cluster/safely-drain-node/). This will ensure that all pods running on this node can safely be evicted. An eviction will ensure your pods can follow the expected [pod termination lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) meaning receiving a SIGTERM in your container and/or running `preStopHooks`. + +Prior to Kubernetes 1.20 (when graceful node shutdown was introduced as an alpha feature), safe node draining was not easy: it required users to manually take action and drain the node beforehand. If someone or something shut down your node without draining it first, most likely your pods would not be safely evicted from your node and shutdown abruptly. Other services talking to those pods might see errors due to the pods exiting abruptly. Some examples of this situation may be caused by a reboot due to security patches or preemption of short lived cloud compute instances. + +Kubernetes 1.21 brings graceful node shutdown to beta. Graceful node shutdown gives you more control over some of those unexpected shutdown situations. With graceful node shutdown, the kubelet is aware of underlying system shutdown events and can propagate these events to pods, ensuring containers can shut down as gracefully as possible. This gives the containers a chance to checkpoint their state or release back any resources they are holding. + +Note, that for the best availability, even with graceful node shutdown, you should still design your deployments to be resilient to node failures. + +## How does it work? +On Linux, your system can shut down in many different situations. For example: +* A user or script running `shutdown -h now` or `systemctl poweroff` or `systemctl reboot`. +* Physically pressing a power button on the machine. +* Stopping a VM instance on a cloud provider, e.g. `gcloud compute instances stop` on GCP. +* A Preemptible VM or Spot Instance that your cloud provider can terminate unexpectedly, but with a brief warning. + + +Many of these situations can be unexpected and there is no guarantee that a cluster administrator drained the node prior to these events. With the graceful node shutdown feature, kubelet uses a systemd mechanism called ["Inhibitor Locks"](https://www.freedesktop.org/wiki/Software/systemd/inhibit) to allow draining in most cases. Using Inhibitor Locks, kubelet instructs systemd to postpone system shutdown for a specified duration, giving a chance for the node to drain and evict pods on the system. + +Kubelet makes use of this mechanism to ensure your pods will be terminated cleanly. When the kubelet starts, it acquires a systemd delay-type inhibitor lock. When the system is about to shut down, the kubelet can delay that shutdown for a configurable, short duration utilizing the delay-type inhibitor lock it acquired earlier. This gives your pods extra time to terminate. As a result, even during unexpected shutdowns, your application will receive a SIGTERM, [preStop hooks](/docs/concepts/containers/container-lifecycle-hooks/#container-hooks) will execute, and kubelet will properly update `Ready` node condition and respective pod statuses to the api-server. + +For example, on a node with graceful node shutdown enabled, you can see that the inhibitor lock is taken by the kubelet: + +``` +kubelet-node ~ # systemd-inhibit --list + Who: kubelet (UID 0/root, PID 1515/kubelet) + What: shutdown + Why: Kubelet needs time to handle node shutdown + Mode: delay + +1 inhibitors listed. +``` + +One important consideration we took when designing this feature is that not all pods are created equal. For example, some of the pods running on a node such as a logging related daemonset should stay running as long as possible to capture important logs during the shutdown itself. As a result, pods are split into two categories: "regular" and "critical". [Critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) are those that have `priorityClassName` set to `system-cluster-critical` or `system-node-critical`; all other pods are considered regular. + +In our example, the logging DaemonSet would run as a critical pod. During the graceful node shutdown, regular pods are terminated first, followed by critical pods. As an example, this would allow a critical pod associated with a logging daemonset to continue functioning, and collecting logs during the termination of regular pods. + +We will evaluate during the beta phase if we need more flexibility for different pod priority classes and add support if needed, please let us know if you have some scenarios in mind. + + +## How do I use it? + +Graceful node shutdown is controlled with the `GracefulNodeShutdown` [feature gate](/docs/reference/command-line-tools-reference/feature-gates) and is enabled by default in Kubernetes 1.21. + +You can configure the graceful node shutdown behavior using two kubelet configuration options: `ShutdownGracePeriod` and `ShutdownGracePeriodCriticalPods`. To configure these options, you edit the kubelet configuration file that is passed to kubelet via the `--config` flag; for more details, refer to [Set kubelet parameters via a configuration file](/docs/tasks/administer-cluster/kubelet-config-file/). + + +During a shutdown, kubelet terminates pods in two phases. You can configure how long each of these phases lasts. +1. Terminate regular pods running on the node. +2. Terminate critical pods running on the node. + +The settings that control the duration of shutdown are: +* `ShutdownGracePeriod` + * Specifies the total duration that the node should delay the shutdown by. This is the total grace period for pod termination for both regular and critical pods. +* `ShutdownGracePeriodCriticalPods` + * Specifies the duration used to terminate critical pods during a node shutdown. This should be less than `ShutdownGracePeriod`. + +For example, if `ShutdownGracePeriod=30s`, and `ShutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by 30 seconds. During this time, the first 20 seconds (30-10) would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating critical pods. + +Note that by default, both configuration options described above, `ShutdownGracePeriod` and `ShutdownGracePeriodCriticalPods` are set to zero, so you will need to configure them as appropriate for your environment to activate graceful node shutdown functionality. + +## How can I learn more? +* Read the [documentation](/docs/concepts/architecture/nodes/#graceful-node-shutdown) +* Read the enhancement proposal, [KEP 2000](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2000-graceful-node-shutdown) +* View the [code](https://github.com/kubernetes/kubernetes/tree/release-1.21/pkg/kubelet/nodeshutdown) + +## How do I get involved? +Your feedback is always welcome! SIG Node meets regularly and can be reached via [Slack](https://slack.k8s.io) (channel `#sig-node`), or the SIG's [mailing list](https://github.com/kubernetes/community/tree/master/sig-node#contact) diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/gateway-api-resources.png b/content/en/blog/_posts/2021-04-22-gateway-api/gateway-api-resources.png new file mode 100644 index 0000000000000..ef589bc9144e0 Binary files /dev/null and b/content/en/blog/_posts/2021-04-22-gateway-api/gateway-api-resources.png differ diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/httproute.png b/content/en/blog/_posts/2021-04-22-gateway-api/httproute.png new file mode 100644 index 0000000000000..4dc0cb9457f2f Binary files /dev/null and b/content/en/blog/_posts/2021-04-22-gateway-api/httproute.png differ diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/index.md b/content/en/blog/_posts/2021-04-22-gateway-api/index.md new file mode 100644 index 0000000000000..a7d54ad6450fc --- /dev/null +++ b/content/en/blog/_posts/2021-04-22-gateway-api/index.md @@ -0,0 +1,197 @@ + +--- +layout: blog +title: 'Evolving Kubernetes networking with the Gateway API' +date: 2021-04-22 +slug: evolving-kubernetes-networking-with-the-gateway-api +--- + +**Authors:** Mark Church (Google), Harry Bagdi (Kong), Daneyon Hanson (Red Hat), Nick Young (VMware), Manuel Zapf (Traefik Labs) + +The Ingress resource is one of the many Kubernetes success stories. It created a [diverse ecosystem of Ingress controllers](/docs/concepts/services-networking/ingress-controllers/) which were used across hundreds of thousands of clusters in a standardized and consistent way. This standardization helped users adopt Kubernetes. However, five years after the creation of Ingress, there are signs of fragmentation into different but [strikingly similar CRDs](https://dave.cheney.net/paste/ingress-is-dead-long-live-ingressroute.pdf) and [overloaded annotations](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). The same portability that made Ingress pervasive also limited its future. + +It was at Kubecon 2019 San Diego when a passionate group of contributors gathered to discuss the [evolution of Ingress](https://static.sched.com/hosted_files/kccncna19/a5/Kubecon%20San%20Diego%202019%20-%20Evolving%20the%20Kubernetes%20Ingress%20APIs%20to%20GA%20and%20Beyond%20%5BPUBLIC%5D.pdf). The discussion overflowed to the hotel lobby across the street and what came out of it would later be known as the [Gateway API](https://gateway-api.sigs.k8s.io). This discussion was based on a few key assumptions: + +1. The API standards underlying route matching, traffic management, and service exposure are commoditized and provide little value to their implementers and users as custom APIs +2. It’s possible to represent L4/L7 routing and traffic management through common core API resources +3. It’s possible to provide extensibility for more complex capabilities in a way that does not sacrifice the user experience of the core API + + +## Introducing the Gateway API + +This led to design principles that allow the Gateway API to improve upon Ingress: + +- **Expressiveness** - In addition to HTTP host/path matching and TLS, Gateway API can express capabilities like HTTP header manipulation, traffic weighting & mirroring, TCP/UDP routing, and other capabilities that were only possible in Ingress through custom annotations. +- **Role-oriented design** - The API resource model reflects the separation of responsibilities that is common in routing and Kubernetes service networking. +- **Extensibility** - The resources allow arbitrary configuration attachment at various layers within the API. This makes granular customization possible at the most appropriate places. +- **Flexible conformance** - The Gateway API defines varying conformance levels - core (mandatory support), extended (portable if supported), and custom (no portability guarantee), known together as [flexible conformance](https://gateway-api.sigs.k8s.io/concepts/guidelines/#conformance). This promotes a highly portable core API (like Ingress) that still gives flexibility for Gateway controller implementers. + +### What does the Gateway API look like? + +The Gateway API introduces a few new resource types: + +- **[GatewayClasses](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.GatewayClass)** are cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived from them. This is similar in concept to StorageClasses, but for networking data-planes. +- **[Gateways](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.Gateway)** are the deployed instances of GatewayClasses. They are the logical representation of the data-plane which performs routing, which may be in-cluster proxies, hardware LBs, or cloud LBs. +- **Routes** are not a single resource, but represent many different protocol-specific Route resources. The [HTTPRoute](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.HTTPRoute) has matching, filtering, and routing rules that get applied to Gateways that can process HTTP and HTTPS traffic. Similarly, there are [TCPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TCPRoute), [UDPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.UDPRoute), and [TLSRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TLSRoute) which also have protocol-specific semantics. This model also allows the Gateway API to incrementally expand its protocol support in the future. + +![The resources of the Gateway API](gateway-api-resources.png) + +### Gateway Controller Implementations + +The good news is that although Gateway is in [Alpha](https://github.com/kubernetes-sigs/gateway-api/releases), there are already several [Gateway controller implementations](https://gateway-api.sigs.k8s.io/references/implementations/) that you can run. Since it’s a standardized spec, the following example could be run on any of them and should function the exact same way. Check out [getting started](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see how to install and use one of these Gateway controllers. + +## Getting Hands-on with the Gateway API + +In the following example, we’ll demonstrate the relationships between the different API Resources and walk you through a common use case: + +* Team foo has their app deployed in the foo Namespace. They need to control the routing logic for the different pages of their app. +* Team bar is running in the bar Namespace. They want to be able to do blue-green rollouts of their application to reduce risk. +* The platform team is responsible for managing the load balancer and network security of all the apps in the Kubernetes cluster. + +The following foo-route does path matching to various Services in the foo Namespace and also has a default route to a 404 server. This exposes foo-auth and foo-home Services via `foo.example.com/login` and `foo.example.com/home` respectively.: + + +```yaml +kind: HTTPRoute +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: foo-route + namespace: foo + labels: + gateway: external-https-prod +spec: + hostnames: + - "foo.example.com" + rules: + - matches: + - path: + type: Prefix + value: /login + forwardTo: + - serviceName: foo-auth + port: 8080 + - matches: + - path: + type: Prefix + value: /home + forwardTo: + - serviceName: foo-home + port: 8080 + - matches: + - path: + type: Prefix + value: / + forwardTo: + - serviceName: foo-404 + port: 8080 +``` + + +The bar team, operating in the bar Namespace of the same Kubernetes cluster, also wishes to expose their application to the internet, but they also want to control their own canary and blue-green rollouts. The following HTTPRoute is configured for the following behavior: + +* For traffic to `bar.example.com`: + * Send 90% of the traffic to bar-v1 + * Send 10% of the traffic to bar-v2 +* For traffic to `bar.example.com` with the HTTP header `env: canary`: + + * Send all the traffic to bar-v2 + +![The routing rules configured for the bar-v1 and bar-v2 Services](httproute.png) + + + +```yaml +kind: HTTPRoute +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: bar-route + namespace: bar + labels: + gateway: external-https-prod +spec: + hostnames: + - "bar.example.com" + rules: + - forwardTo: + - serviceName: bar-v1 + port: 8080 + weight: 90 + - serviceName: bar-v2 + port: 8080 + weight: 10 + - matches: + - headers: + values: + env: canary + forwardTo: + - serviceName: bar-v2 + port: 8080 +``` + + + +### Route and Gateway Binding + +So we have two HTTPRoutes matching and routing traffic to different Services. You might be wondering, where are these Services accessible? Through which networks or IPs are they exposed? + +How Routes are exposed to clients is governed by [Route binding](https://gateway-api.sigs.k8s.io/concepts/api-overview/#route-binding), which describes how Routes and Gateways create a bidirectional relationship between each other. When Routes are bound to a Gateway it means their collective routing rules are configured on the underlying load balancers or proxies and the Routes are accessible through the Gateway. Thus, a Gateway is a logical representation of a networking data plane that can be configured through Routes. + + +![How Routes bind with Gateways](route-binding.png ) + +### Administrative Delegation + +The split between Gateway and Route resources allows the cluster administrator to delegate some of the routing configuration to individual teams while still retaining centralized control. The following Gateway resource exposes HTTPS on port 443 and terminates all traffic on the port with a certificate controlled by the cluster administrator. + + +```yaml +kind: Gateway +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: prod-web +spec: + gatewayClassName: acme-lb + listeners: + - protocol: HTTPS + port: 443 + routes: + kind: HTTPRoute + selector: + matchLabels: + gateway: external-https-prod + namespaces: + from: All + tls: + certificateRef: + name: admin-controlled-cert +``` + + +The following HTTPRoute shows how the Route can ensure it matches the Gateway's selector via it’s `kind` (HTTPRoute) and resource labels (`gateway=external-https-prod`). + + +```yaml +# Matches the required kind selector on the Gateway +kind: HTTPRoute +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: foo-route + namespace: foo-ns + labels: + + # Matches the required label selector on the Gateway + gateway: external-https-prod +... +``` + +### Role Oriented Design + +When you put it all together, you have a single load balancing infrastructure that can be safely shared by multiple teams. The Gateway API not only a more expressive API for advanced routing, but is also a role-oriented API, designed for multi-tenant infrastructure. Its extensibility ensures that it will evolve for future use-cases while preserving portability. Ultimately these characteristics will allow Gateway API to adapt to different organizational models and implementations well into the future. + +### Try it out and get involved + +There are many resources to check out to learn more. + +* Check out the [user guides](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see what use-cases can be addressed. +* Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/references/implementations/) +* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking! \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/route-binding.png b/content/en/blog/_posts/2021-04-22-gateway-api/route-binding.png new file mode 100644 index 0000000000000..4e42643316584 Binary files /dev/null and b/content/en/blog/_posts/2021-04-22-gateway-api/route-binding.png differ diff --git a/content/en/blog/_posts/2021-04-23-metrics-stability-ga/index.md b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/index.md new file mode 100644 index 0000000000000..686016e3c5cef --- /dev/null +++ b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/index.md @@ -0,0 +1,80 @@ +--- +layout: blog +title: 'Kubernetes 1.21: Metrics Stability hits GA' +date: 2021-04-23 +slug: kubernetes-release-1.21-metrics-stability-ga +--- + +**Authors**: Han Kang (Google), Elana Hashman (Red Hat) + +Kubernetes 1.21 marks the graduation of the metrics stability framework and along with it, the first officially supported stable metrics. Not only do stable metrics come with supportability guarantees, the metrics stability framework brings escape hatches that you can use if you encounter problematic metrics. + +See the list of [stable Kubernetes metrics here](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) + +### What are stable metrics and why do we need them? +A stable metric is one which, from a consumption point of view, can be reliably consumed across a number of Kubernetes versions without risk of ingestion failure. + +Metrics stability is an ongoing community concern. Cluster monitoring infrastructure often assumes the stability of some control plane metrics, so we have introduced a mechanism for versioning metrics as a proper API, with stability guarantees around a formal metrics deprecation process. + +### What are the stability levels for metrics? + +Metrics can currently have one of two stability levels: alpha or stable. + +_Alpha metrics_ have no stability guarantees; as such they can be modified or deleted at any time. At this time, all Kubernetes metrics implicitly fall into this category. + +_Stable metrics_ can be guaranteed to not change, except that the metric may become marked deprecated for a future Kubernetes version. By not change, we mean three things: + +1. the metric itself will not be deleted or renamed +2. the type of metric will not be modified +3. no labels can be added or removed from this metric + +From an ingestion point of view, it is backwards-compatible to add or remove possible values for labels which already do exist, but not labels themselves. Therefore, adding or removing values from an existing label is permitted. Stable metrics can also be marked as deprecated for a future Kubernetes version, since this is tracked in a metadata field and does not actually change the metric itself. + +Removing or adding labels from stable metrics is not permitted. In order to add or remove a label from an existing stable metric, one would have to introduce a new metric and deprecate the stable one; otherwise this would violate compatibility agreements. + + +#### How are metrics deprecated? + +While deprecation policies only affect stability guarantees for stable metrics (and not alpha ones), deprecation information may be optionally provided on alpha metrics to help component owners inform users of future intent and assist with transition plans. + +A stable metric undergoing the deprecation process signals that the metric will eventually be deleted. The metrics deprecation lifecycle looks roughly like this (with each stage representing a Kubernetes release): + +![Stable metric → Deprecated metric → Hidden metric → Deletion](lifecycle-metric.png) + +_Deprecated metrics_ have the same stability guarantees of their stable counterparts. If a stable metric is deprecated, then a deprecated stable metric is guaranteed to not change. When deprecating a stable metric, a future Kubernetes release is specified as the point from which the metric will be considered deprecated. + +Deprecated metrics will have their description text prefixed with a deprecation notice string “(Deprecated from x.y)” and a warning log will be emitted during metric registration, in the spirit of the official Kubernetes deprecation policy. + +Like their stable metric counterparts, deprecated metrics will be automatically registered to the metrics endpoint. On a subsequent release (when the metric's deprecatedVersion is equal to _current\_kubernetes\_version - 4_)), a deprecated metric will become a _hidden_ metric. _Hidden metrics_ are not automatically registered, and hence are hidden by default from end users. These hidden metrics can be explicitly re-enabled for one release after they reach the hidden state, to provide a migration path for cluster operators. + + +#### As an owner of a Kubernetes component, how do I add stable metrics? + +During metric instantiation, stability can be specified by setting the metadata field, StabilityLevel, to “Stable”. When a StabilityLevel is not explicitly set, metrics default to “Alpha” stability. Note that metrics which have fields determined at runtime cannot be marked as Stable. Stable metrics will be detected during static analysis during the pre-commit phase, and must be reviewed by sig-instrumentation. + +```golang +var metricDefinition = kubemetrics.CounterOpts{ + Name: "some_metric", + Help: "some description", + StabilityLevel: kubemetrics.STABLE, +} +``` +For more examples of setting metrics stability and deprecation, see the [Metrics Stability KEP](http://bit.ly/metrics-stability). + + +### How do I get involved? + +This project, like all of Kubernetes, is the result of hard work by many contributors from diverse backgrounds working together. +We offer a huge thank you to all the contributors in Kubernetes community who helped review the design and implementation of the project, including but not limited to the following: + +- Han Kang (logicalhan) +- Frederic Branczyk (brancz) +- Marek Siarkowicz (serathius) +- Elana Hashman (ehashman) +- Solly Ross (DirectXMan12) +- Stefan Schimanski (sttts) +- David Ashpole (dashpole) +- Yuchen Zhou (yoyinzyc) +- Yu Yi (erain) + +If you’re interested in getting involved with the design and development of instrumentation or any part of the Kubernetes metrics system, join the [Kubernetes Instrumentation Special Interest Group (SIG)](https://github.com/kubernetes/community/tree/master/sig-instrumentation). We’re rapidly growing and always welcome new contributors. diff --git a/content/en/blog/_posts/2021-04-23-metrics-stability-ga/lifecycle-metric.png b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/lifecycle-metric.png new file mode 100644 index 0000000000000..7618a98c5c7c6 Binary files /dev/null and b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/lifecycle-metric.png differ diff --git a/content/en/case-studies/adform/adform_featured_logo.svg b/content/en/case-studies/adform/adform_featured_logo.svg index ce058af82ed09..b31ef3235a471 100644 --- a/content/en/case-studies/adform/adform_featured_logo.svg +++ b/content/en/case-studies/adform/adform_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/adidas/adidas-featured.svg b/content/en/case-studies/adidas/adidas-featured.svg index 07e595356aa78..a53d4675522fe 100644 --- a/content/en/case-studies/adidas/adidas-featured.svg +++ b/content/en/case-studies/adidas/adidas-featured.svg @@ -1 +1 @@ -kubernetes.io-54664 \ No newline at end of file +kubernetes.io-54664 \ No newline at end of file diff --git a/content/en/case-studies/amadeus/amadeus_featured.svg b/content/en/case-studies/amadeus/amadeus_featured.svg index 6b711f9baeccb..9d0c40b8e18c3 100644 --- a/content/en/case-studies/amadeus/amadeus_featured.svg +++ b/content/en/case-studies/amadeus/amadeus_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/ancestry/ancestry_featured.svg b/content/en/case-studies/ancestry/ancestry_featured.svg index 301e6fec92327..9a3e80186b0be 100644 --- a/content/en/case-studies/ancestry/ancestry_featured.svg +++ b/content/en/case-studies/ancestry/ancestry_featured.svg @@ -1 +1 @@ -kubernetes.io-logos-ancestry \ No newline at end of file +kubernetes.io-logos-ancestry \ No newline at end of file diff --git a/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg b/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg index 1d20786a5d2ef..4eb8a51127ea2 100644 --- a/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg +++ b/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/appdirect/appdirect_featured_logo.svg b/content/en/case-studies/appdirect/appdirect_featured_logo.svg index d655c7f2fa0de..36fcba1abba36 100644 --- a/content/en/case-studies/appdirect/appdirect_featured_logo.svg +++ b/content/en/case-studies/appdirect/appdirect_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/babylon/babylon_featured_logo.svg b/content/en/case-studies/babylon/babylon_featured_logo.svg index 8bea0b8fc3392..e84da19268e33 100644 --- a/content/en/case-studies/babylon/babylon_featured_logo.svg +++ b/content/en/case-studies/babylon/babylon_featured_logo.svg @@ -1 +1 @@ -babylon_featured_logo \ No newline at end of file +babylon_featured_logo \ No newline at end of file diff --git a/content/en/case-studies/blablacar/blablacar_featured.svg b/content/en/case-studies/blablacar/blablacar_featured.svg index f66f6ca95485d..5b887f24a8722 100644 --- a/content/en/case-studies/blablacar/blablacar_featured.svg +++ b/content/en/case-studies/blablacar/blablacar_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/blackrock/blackrock_featured.svg b/content/en/case-studies/blackrock/blackrock_featured.svg index d70c169bc8a12..f98ea323d73e1 100644 --- a/content/en/case-studies/blackrock/blackrock_featured.svg +++ b/content/en/case-studies/blackrock/blackrock_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/booking-com/booking.com_featured_logo.svg b/content/en/case-studies/booking-com/booking.com_featured_logo.svg index 298c77c773922..0b245c27001af 100644 --- a/content/en/case-studies/booking-com/booking.com_featured_logo.svg +++ b/content/en/case-studies/booking-com/booking.com_featured_logo.svg @@ -1 +1 @@ -booking.com_featured_logo \ No newline at end of file +booking.com_featured_logo \ No newline at end of file diff --git a/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg b/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg index b844d998462b0..3ce58c68f7858 100644 --- a/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg +++ b/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg @@ -1 +1 @@ -booz-allen-featured \ No newline at end of file +booz-allen-featured \ No newline at end of file diff --git a/content/en/case-studies/bose/bose_featured_logo.svg b/content/en/case-studies/bose/bose_featured_logo.svg index 13f1ff261622c..58b2add6144b8 100644 --- a/content/en/case-studies/bose/bose_featured_logo.svg +++ b/content/en/case-studies/bose/bose_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/box/box_featured.svg b/content/en/case-studies/box/box_featured.svg index c4ebadaffe526..2b4fb6552b610 100644 --- a/content/en/case-studies/box/box_featured.svg +++ b/content/en/case-studies/box/box_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/buffer/buffer_featured.svg b/content/en/case-studies/buffer/buffer_featured.svg index 6527f94f4d64c..b8e321f31d633 100644 --- a/content/en/case-studies/buffer/buffer_featured.svg +++ b/content/en/case-studies/buffer/buffer_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/capital-one/capitalone_featured_logo.svg b/content/en/case-studies/capital-one/capitalone_featured_logo.svg index 28552e569c441..124adae9af21e 100644 --- a/content/en/case-studies/capital-one/capitalone_featured_logo.svg +++ b/content/en/case-studies/capital-one/capitalone_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg b/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg index a1b39cc109112..aae1978cf2129 100644 --- a/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg +++ b/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg b/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg index 1d90a6536f014..44ac9b0b1d9fa 100644 --- a/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg +++ b/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg b/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg index 3c7a565f11563..a4f020161a784 100644 --- a/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg +++ b/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/denso/denso_featured_logo.svg b/content/en/case-studies/denso/denso_featured_logo.svg index 375d9cefbc091..e2b26b2c8c30c 100644 --- a/content/en/case-studies/denso/denso_featured_logo.svg +++ b/content/en/case-studies/denso/denso_featured_logo.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/content/en/case-studies/golfnow/golfnow_featured.svg b/content/en/case-studies/golfnow/golfnow_featured.svg index 761782a7563c2..b5b42d6fcdc08 100644 --- a/content/en/case-studies/golfnow/golfnow_featured.svg +++ b/content/en/case-studies/golfnow/golfnow_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/haufegroup/haufegroup_featured.svg b/content/en/case-studies/haufegroup/haufegroup_featured.svg index b552d117739db..a61b577ab884e 100644 --- a/content/en/case-studies/haufegroup/haufegroup_featured.svg +++ b/content/en/case-studies/haufegroup/haufegroup_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/huawei/huawei_featured.svg b/content/en/case-studies/huawei/huawei_featured.svg index 860f62dd4e7a9..a8a8f22c8f9a1 100644 --- a/content/en/case-studies/huawei/huawei_featured.svg +++ b/content/en/case-studies/huawei/huawei_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/ibm/ibm_featured_logo.svg b/content/en/case-studies/ibm/ibm_featured_logo.svg index 577d8e97d960d..f79fd7847bf7c 100644 --- a/content/en/case-studies/ibm/ibm_featured_logo.svg +++ b/content/en/case-studies/ibm/ibm_featured_logo.svg @@ -1 +1 @@ -ibm_featured_logo \ No newline at end of file +ibm_featured_logo \ No newline at end of file diff --git a/content/en/case-studies/ing/ing_featured_logo.svg b/content/en/case-studies/ing/ing_featured_logo.svg index 20418a0dbfcbf..5a2df497c7bb0 100644 --- a/content/en/case-studies/ing/ing_featured_logo.svg +++ b/content/en/case-studies/ing/ing_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/naic/naic_featured_logo.svg b/content/en/case-studies/naic/naic_featured_logo.svg index 100d158b9bd1c..b4af63931dbb4 100644 --- a/content/en/case-studies/naic/naic_featured_logo.svg +++ b/content/en/case-studies/naic/naic_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/nav/nav_featured_logo.svg b/content/en/case-studies/nav/nav_featured_logo.svg index 79ae4384598c1..42b4ffa9674d7 100644 --- a/content/en/case-studies/nav/nav_featured_logo.svg +++ b/content/en/case-studies/nav/nav_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg b/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg index 43a001461d6c5..aa2661e503bae 100644 --- a/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg +++ b/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/netease/netease_featured_logo.svg b/content/en/case-studies/netease/netease_featured_logo.svg index 7ddb664f1a225..0ea176812dd65 100644 --- a/content/en/case-studies/netease/netease_featured_logo.svg +++ b/content/en/case-studies/netease/netease_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/newyorktimes/newyorktimes_featured.svg b/content/en/case-studies/newyorktimes/newyorktimes_featured.svg index f006841112b8f..e386c15806625 100644 --- a/content/en/case-studies/newyorktimes/newyorktimes_featured.svg +++ b/content/en/case-studies/newyorktimes/newyorktimes_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/nokia/nokia_featured_logo.svg b/content/en/case-studies/nokia/nokia_featured_logo.svg index bbf2920f7a9a5..1e3cce49565d3 100644 --- a/content/en/case-studies/nokia/nokia_featured_logo.svg +++ b/content/en/case-studies/nokia/nokia_featured_logo.svg @@ -1 +1 @@ -nokia \ No newline at end of file +nokia \ No newline at end of file diff --git a/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg b/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg index 19051f4b21d90..a162e93f03b1e 100644 --- a/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg +++ b/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg b/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg index c907eb6e22aee..7a2f09de54716 100644 --- a/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg +++ b/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/ocado/ocado_featured_logo.svg b/content/en/case-studies/ocado/ocado_featured_logo.svg index 8f30890dfc7b4..d9e2886e36fda 100644 --- a/content/en/case-studies/ocado/ocado_featured_logo.svg +++ b/content/en/case-studies/ocado/ocado_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/openAI/openai_featured.svg b/content/en/case-studies/openAI/openai_featured.svg index 97cf4dab57eab..cf9b79721e8dd 100644 --- a/content/en/case-studies/openAI/openai_featured.svg +++ b/content/en/case-studies/openAI/openai_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/peardeck/peardeck_featured.svg b/content/en/case-studies/peardeck/peardeck_featured.svg index dcaa62f71ea9b..1c42e719207f0 100644 --- a/content/en/case-studies/peardeck/peardeck_featured.svg +++ b/content/en/case-studies/peardeck/peardeck_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/pingcap/pingcap_featured_logo.svg b/content/en/case-studies/pingcap/pingcap_featured_logo.svg index 6beb710c041ef..46d2d2543d784 100644 --- a/content/en/case-studies/pingcap/pingcap_featured_logo.svg +++ b/content/en/case-studies/pingcap/pingcap_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/pinterest/pinterest_feature.svg b/content/en/case-studies/pinterest/pinterest_feature.svg index 32d0c5d60fd40..96cd6ded97560 100644 --- a/content/en/case-studies/pinterest/pinterest_feature.svg +++ b/content/en/case-studies/pinterest/pinterest_feature.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/prowise/prowise_featured_logo.svg b/content/en/case-studies/prowise/prowise_featured_logo.svg index ae03646afc7b7..1f2d5ce41a918 100644 --- a/content/en/case-studies/prowise/prowise_featured_logo.svg +++ b/content/en/case-studies/prowise/prowise_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg b/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg index 60b79ca30c3d8..caefc4b96f97f 100644 --- a/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg +++ b/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/slamtec/slamtec_featured_logo.svg b/content/en/case-studies/slamtec/slamtec_featured_logo.svg index 2a8fde70170ee..7b4f6d6af1683 100644 --- a/content/en/case-studies/slamtec/slamtec_featured_logo.svg +++ b/content/en/case-studies/slamtec/slamtec_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/slingtv/slingtv_featured_logo.svg b/content/en/case-studies/slingtv/slingtv_featured_logo.svg index 36077bc1ace19..764f8ddd884a8 100644 --- a/content/en/case-studies/slingtv/slingtv_featured_logo.svg +++ b/content/en/case-studies/slingtv/slingtv_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/spotify/spotify-featured.svg b/content/en/case-studies/spotify/spotify-featured.svg index fb7d8e750de98..d1cc3418dec97 100644 --- a/content/en/case-studies/spotify/spotify-featured.svg +++ b/content/en/case-studies/spotify/spotify-featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/squarespace/squarespace_featured_logo.svg b/content/en/case-studies/squarespace/squarespace_featured_logo.svg index 4ffcbf078ecc5..a69d7ea5c8ebf 100644 --- a/content/en/case-studies/squarespace/squarespace_featured_logo.svg +++ b/content/en/case-studies/squarespace/squarespace_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/thredup/thredup_featured_logo.svg b/content/en/case-studies/thredup/thredup_featured_logo.svg index 48841f9878d25..987e1a55c1203 100644 --- a/content/en/case-studies/thredup/thredup_featured_logo.svg +++ b/content/en/case-studies/thredup/thredup_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/vsco/vsco_featured_logo.svg b/content/en/case-studies/vsco/vsco_featured_logo.svg index e1181f8a957ad..e65dad8c52d1c 100644 --- a/content/en/case-studies/vsco/vsco_featured_logo.svg +++ b/content/en/case-studies/vsco/vsco_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/wikimedia/wikimedia_featured.svg b/content/en/case-studies/wikimedia/wikimedia_featured.svg index b3e654dd641f3..5fa786aaa52ba 100644 --- a/content/en/case-studies/wikimedia/wikimedia_featured.svg +++ b/content/en/case-studies/wikimedia/wikimedia_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/wink/wink_featured.svg b/content/en/case-studies/wink/wink_featured.svg index 3e27cac771a1c..8168ac2b435c2 100644 --- a/content/en/case-studies/wink/wink_featured.svg +++ b/content/en/case-studies/wink/wink_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/woorank/woorank_featured_logo.svg b/content/en/case-studies/woorank/woorank_featured_logo.svg index a5e3736ee1d48..50b64e9a9c6e8 100644 --- a/content/en/case-studies/woorank/woorank_featured_logo.svg +++ b/content/en/case-studies/woorank/woorank_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/workiva/workiva_featured_logo.svg b/content/en/case-studies/workiva/workiva_featured_logo.svg index 76de3f217ce5a..0cde714f2315e 100644 --- a/content/en/case-studies/workiva/workiva_featured_logo.svg +++ b/content/en/case-studies/workiva/workiva_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg b/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg index 3efabbfde4930..b0baa4a49ee73 100644 --- a/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg +++ b/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/ygrene/ygrene_featured_logo.svg b/content/en/case-studies/ygrene/ygrene_featured_logo.svg index f8806fa33d505..0b0ab458facd4 100644 --- a/content/en/case-studies/ygrene/ygrene_featured_logo.svg +++ b/content/en/case-studies/ygrene/ygrene_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/zalando/zalando_feature_logo.svg b/content/en/case-studies/zalando/zalando_feature_logo.svg index 7560bc54feac2..875d10c030218 100644 --- a/content/en/case-studies/zalando/zalando_feature_logo.svg +++ b/content/en/case-studies/zalando/zalando_feature_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/community/_index.html b/content/en/community/_index.html index e1ebb9e9cb1ac..ad9cab5d945a3 100644 --- a/content/en/community/_index.html +++ b/content/en/community/_index.html @@ -19,6 +19,7 @@
+Community Values      Code of conduct       Videos      Discussions      @@ -41,10 +42,28 @@ Kubernetes Conference Gallery
Kubernetes Conference Gallery - - + +
+
+
+

+

+

Community Values

+The Kubernetes Community values are the keystone to the ongoing success of the project.
+These principles guide every aspect of the Kubernetes project. +
+ +

+ + READ MORE + +
+
+
+
+
diff --git a/content/en/community/static/community-values.md b/content/en/community/static/community-values.md new file mode 100644 index 0000000000000..f6469a3e61ad2 --- /dev/null +++ b/content/en/community/static/community-values.md @@ -0,0 +1,28 @@ + + +# Kubernetes Community Values + +Kubernetes Community culture is frequently cited as a substantial contributor to the meteoric rise of this Open Source project. Below are the distilled values which have evolved over the last many years in our community pushing our project and peers toward constant improvement. + +## Distribution is better than centralization + +The scale of the Kubernetes project is only viable through high-trust and high-visibility distribution of work, which includes delegation of authority, decision making, technical design, code ownership, and documentation. Distributed asynchronous ownership, collaboration, communication and decision making are the cornerstone of our world-wide community. + +## Community over product or company + +We are here as a community first, our allegiance is to the intentional stewardship of the Kubernetes project for the benefit of all its members and users everywhere. We support working together publicly for the common goal of a vibrant interoperable ecosystem providing an excellent experience for our users. Individuals gain status through work, companies gain status through their commitments to support this community and fund the resources necessary for the project to operate. + +## Automation over process + +Large projects have a lot of less exciting, yet, hard work. We value time spent automating repetitive work more highly than toil. Where that work cannot be automated, it is our culture to recognize and reward all types of contributions. However, heroism is not sustainable. + +## Inclusive is better than exclusive + +Broadly successful and useful technology requires different perspectives and skill sets which can only be heard in a welcoming and respectful environment. Community membership is a privilege, not a right. Community Leadership is earned through effort, scope, quality, quantity, and duration of contributions. Our community shows respect for the time and effort put into a discussion regardless of where a contributor is on their growth path. + +## Evolution is better than stagnation + +Openness to new ideas and studied technological evolution make Kubernetes a stronger project. Continual improvement, servant leadership, mentorship and respect are the foundations of the Kubernetes project culture. It is the duty for leaders in the Kubernetes community to find, sponsor, and promote new community members. Leaders should expect to step aside. Community members should expect to step up. + +**"Culture eats strategy for breakfast." --Peter Drucker** diff --git a/content/en/community/values.md b/content/en/community/values.md new file mode 100644 index 0000000000000..4ae1fe30b6d55 --- /dev/null +++ b/content/en/community/values.md @@ -0,0 +1,13 @@ +--- +title: Community +layout: basic +cid: community +css: /css/community.css +--- + +
+ +
+{{< include "/static/community-values.md" >}} +
+
diff --git a/content/en/docs/concepts/architecture/cloud-controller.md b/content/en/docs/concepts/architecture/cloud-controller.md index d97fc94a4cdac..9b64289e82345 100644 --- a/content/en/docs/concepts/architecture/cloud-controller.md +++ b/content/en/docs/concepts/architecture/cloud-controller.md @@ -206,6 +206,8 @@ rules: [Cloud Controller Manager Administration](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager) has instructions on running and managing the cloud controller manager. +To upgrade a HA control plane to use the cloud controller manager, see [Migrate Replicated Control Plane To Use Cloud Controller Manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/). + Want to know how to implement your own cloud controller manager, or extend an existing project? The cloud controller manager uses Go interfaces to allow implementations from any cloud to be plugged in. Specifically, it uses the `CloudProvider` interface defined in [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62) from [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider). diff --git a/content/en/docs/concepts/architecture/control-plane-node-communication.md b/content/en/docs/concepts/architecture/control-plane-node-communication.md index 2e7235a89f793..a4814aab4b45e 100644 --- a/content/en/docs/concepts/architecture/control-plane-node-communication.md +++ b/content/en/docs/concepts/architecture/control-plane-node-communication.md @@ -11,20 +11,20 @@ aliases: -This document catalogs the communication paths between the control plane (really the apiserver) and the Kubernetes cluster. The intent is to allow users to customize their installation to harden the network configuration such that the cluster can be run on an untrusted network (or on fully public IPs on a cloud provider). +This document catalogs the communication paths between the control plane (apiserver) and the Kubernetes cluster. The intent is to allow users to customize their installation to harden the network configuration such that the cluster can be run on an untrusted network (or on fully public IPs on a cloud provider). ## Node to Control Plane -Kubernetes has a "hub-and-spoke" API pattern. All API usage from nodes (or the pods they run) terminate at the apiserver (none of the other control plane components are designed to expose remote services). The apiserver is configured to listen for remote connections on a secure HTTPS port (typically 443) with one or more forms of client [authentication](/docs/reference/access-authn-authz/authentication/) enabled. +Kubernetes has a "hub-and-spoke" API pattern. All API usage from nodes (or the pods they run) terminates at the apiserver. None of the other control plane components are designed to expose remote services. The apiserver is configured to listen for remote connections on a secure HTTPS port (typically 443) with one or more forms of client [authentication](/docs/reference/access-authn-authz/authentication/) enabled. One or more forms of [authorization](/docs/reference/access-authn-authz/authorization/) should be enabled, especially if [anonymous requests](/docs/reference/access-authn-authz/authentication/#anonymous-requests) or [service account tokens](/docs/reference/access-authn-authz/authentication/#service-account-tokens) are allowed. Nodes should be provisioned with the public root certificate for the cluster such that they can connect securely to the apiserver along with valid client credentials. A good approach is that the client credentials provided to the kubelet are in the form of a client certificate. See [kubelet TLS bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) for automated provisioning of kubelet client certificates. Pods that wish to connect to the apiserver can do so securely by leveraging a service account so that Kubernetes will automatically inject the public root certificate and a valid bearer token into the pod when it is instantiated. -The `kubernetes` service (in all namespaces) is configured with a virtual IP address that is redirected (via kube-proxy) to the HTTPS endpoint on the apiserver. +The `kubernetes` service (in `default` namespace) is configured with a virtual IP address that is redirected (via kube-proxy) to the HTTPS endpoint on the apiserver. The control plane components also communicate with the cluster apiserver over the secure port. @@ -42,7 +42,7 @@ The connections from the apiserver to the kubelet are used for: * Attaching (through kubectl) to running pods. * Providing the kubelet's port-forwarding functionality. -These connections terminate at the kubelet's HTTPS endpoint. By default, the apiserver does not verify the kubelet's serving certificate, which makes the connection subject to man-in-the-middle attacks, and **unsafe** to run over untrusted and/or public networks. +These connections terminate at the kubelet's HTTPS endpoint. By default, the apiserver does not verify the kubelet's serving certificate, which makes the connection subject to man-in-the-middle attacks and **unsafe** to run over untrusted and/or public networks. To verify this connection, use the `--kubelet-certificate-authority` flag to provide the apiserver with a root certificate bundle to use to verify the kubelet's serving certificate. @@ -53,20 +53,20 @@ Finally, [Kubelet authentication and/or authorization](/docs/reference/command-l ### apiserver to nodes, pods, and services -The connections from the apiserver to a node, pod, or service default to plain HTTP connections and are therefore neither authenticated nor encrypted. They can be run over a secure HTTPS connection by prefixing `https:` to the node, pod, or service name in the API URL, but they will not validate the certificate provided by the HTTPS endpoint nor provide client credentials so while the connection will be encrypted, it will not provide any guarantees of integrity. These connections **are not currently safe** to run over untrusted and/or public networks. +The connections from the apiserver to a node, pod, or service default to plain HTTP connections and are therefore neither authenticated nor encrypted. They can be run over a secure HTTPS connection by prefixing `https:` to the node, pod, or service name in the API URL, but they will not validate the certificate provided by the HTTPS endpoint nor provide client credentials. So while the connection will be encrypted, it will not provide any guarantees of integrity. These connections **are not currently safe** to run over untrusted or public networks. ### SSH tunnels Kubernetes supports SSH tunnels to protect the control plane to nodes communication paths. In this configuration, the apiserver initiates an SSH tunnel to each node in the cluster (connecting to the ssh server listening on port 22) and passes all traffic destined for a kubelet, node, pod, or service through the tunnel. This tunnel ensures that the traffic is not exposed outside of the network in which the nodes are running. -SSH tunnels are currently deprecated so you shouldn't opt to use them unless you know what you are doing. The Konnectivity service is a replacement for this communication channel. +SSH tunnels are currently deprecated, so you shouldn't opt to use them unless you know what you are doing. The Konnectivity service is a replacement for this communication channel. ### Konnectivity service {{< feature-state for_k8s_version="v1.18" state="beta" >}} -As a replacement to the SSH tunnels, the Konnectivity service provides TCP level proxy for the control plane to cluster communication. The Konnectivity service consists of two parts: the Konnectivity server and the Konnectivity agents, running in the control plane network and the nodes network respectively. The Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. +As a replacement to the SSH tunnels, the Konnectivity service provides TCP level proxy for the control plane to cluster communication. The Konnectivity service consists of two parts: the Konnectivity server in the control plane network and the Konnectivity agents in the nodes network. The Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. After enabling the Konnectivity service, all control plane to nodes traffic goes through these connections. Follow the [Konnectivity service task](/docs/tasks/extend-kubernetes/setup-konnectivity/) to set up the Konnectivity service in your cluster. diff --git a/content/en/docs/concepts/architecture/controller.md b/content/en/docs/concepts/architecture/controller.md index 5ab2cb970a20e..711cf38363202 100644 --- a/content/en/docs/concepts/architecture/controller.md +++ b/content/en/docs/concepts/architecture/controller.md @@ -102,7 +102,7 @@ Other control loops can observe that reported data and take their own actions. In the thermostat example, if the room is very cold then a different controller might also turn on a frost protection heater. With Kubernetes clusters, the control plane indirectly works with IP address management tools, storage services, -cloud provider APIS, and other services by +cloud provider APIs, and other services by [extending Kubernetes](/docs/concepts/extend-kubernetes/) to implement that. ## Desired versus current state {#desired-vs-current} diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index 7bd4b355b60ef..c5830986935e1 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -11,12 +11,13 @@ weight: 10 Kubernetes runs your workload by placing containers into Pods to run on _Nodes_. A node may be a virtual or physical machine, depending on the cluster. Each node -contains the services necessary to run -{{< glossary_tooltip text="Pods" term_id="pod" >}}, managed by the -{{< glossary_tooltip text="control plane" term_id="control-plane" >}}. +is managed by the +{{< glossary_tooltip text="control plane" term_id="control-plane" >}} +and contains the services necessary to run +{{< glossary_tooltip text="Pods" term_id="pod" >}} Typically you have several nodes in a cluster; in a learning or resource-limited -environment, you might have just one. +environment, you might have only one node. The [components](/docs/concepts/overview/components/#node-components) on a node include the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}}, a @@ -30,7 +31,7 @@ The [components](/docs/concepts/overview/components/#node-components) on a node There are two main ways to have Nodes added to the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}}: 1. The kubelet on a node self-registers to the control plane -2. You, or another human user, manually add a Node object +2. You (or another human user) manually add a Node object After you create a Node object, or the kubelet on a node self-registers, the control plane checks whether the new Node object is valid. For example, if you @@ -51,8 +52,8 @@ try to create a Node from the following JSON manifest: Kubernetes creates a Node object internally (the representation). Kubernetes checks that a kubelet has registered to the API server that matches the `metadata.name` -field of the Node. If the node is healthy (if all necessary services are running), -it is eligible to run a Pod. Otherwise, that node is ignored for any cluster activity +field of the Node. If the node is healthy (i.e. all necessary services are running), +then it is eligible to run a Pod. Otherwise, that node is ignored for any cluster activity until it becomes healthy. {{< note >}} @@ -66,6 +67,16 @@ delete the Node object to stop that health checking. The name of a Node object must be a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). +### Node name uniqueness + +The [name](/docs/concepts/overview/working-with-objects/names#names) identifies a Node. Two Nodes +cannot have the same name at the same time. Kubernetes also assumes that a resource with the same +name is the same object. In case of a Node, it is implicitly assumed that an instance using the +same name will have the same state (e.g. network settings, root disk contents). This may lead to +inconsistencies if an instance was modified without changing its name. If the Node needs to be +replaced or updated significantly, the existing Node object needs to be removed from API server +first and re-added after the update. + ### Self-registration of Nodes When the kubelet flag `--register-node` is true (the default), the kubelet will attempt to @@ -95,14 +106,14 @@ You can create and modify Node objects using When you want to create Node objects manually, set the kubelet flag `--register-node=false`. You can modify Node objects regardless of the setting of `--register-node`. -For example, you can set labels on an existing Node, or mark it unschedulable. +For example, you can set labels on an existing Node or mark it unschedulable. You can use labels on Nodes in conjunction with node selectors on Pods to control scheduling. For example, you can constrain a Pod to only be eligible to run on a subset of the available nodes. Marking a node as unschedulable prevents the scheduler from placing new pods onto -that Node, but does not affect existing Pods on the Node. This is useful as a +that Node but does not affect existing Pods on the Node. This is useful as a preparatory step before a node reboot or other maintenance. To mark a Node unschedulable, run: @@ -178,14 +189,14 @@ The node condition is represented as a JSON object. For example, the following s ] ``` -If the Status of the Ready condition remains `Unknown` or `False` for longer than the `pod-eviction-timeout` (an argument passed to the {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}}), all the Pods on the node are scheduled for deletion by the node controller. The default eviction timeout duration is **five minutes**. In some cases when the node is unreachable, the API server is unable to communicate with the kubelet on the node. The decision to delete the pods cannot be communicated to the kubelet until communication with the API server is re-established. In the meantime, the pods that are scheduled for deletion may continue to run on the partitioned node. +If the Status of the Ready condition remains `Unknown` or `False` for longer than the `pod-eviction-timeout` (an argument passed to the {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}}), then all the Pods on the node are scheduled for deletion by the node controller. The default eviction timeout duration is **five minutes**. In some cases when the node is unreachable, the API server is unable to communicate with the kubelet on the node. The decision to delete the pods cannot be communicated to the kubelet until communication with the API server is re-established. In the meantime, the pods that are scheduled for deletion may continue to run on the partitioned node. The node controller does not force delete pods until it is confirmed that they have stopped running in the cluster. You can see the pods that might be running on an unreachable node as being in the `Terminating` or `Unknown` state. In cases where Kubernetes cannot deduce from the underlying infrastructure if a node has permanently left a cluster, the cluster administrator -may need to delete the node object by hand. Deleting the node object from Kubernetes causes -all the Pod objects running on the node to be deleted from the API server, and frees up their +may need to delete the node object by hand. Deleting the node object from Kubernetes causes +all the Pod objects running on the node to be deleted from the API server and frees up their names. The node lifecycle controller automatically creates @@ -198,7 +209,7 @@ for more details. ### Capacity and Allocatable {#capacity} -Describes the resources available on the node: CPU, memory and the maximum +Describes the resources available on the node: CPU, memory, and the maximum number of pods that can be scheduled onto the node. The fields in the capacity block indicate the total amount of resources that a @@ -224,18 +235,20 @@ CIDR block to the node when it is registered (if CIDR assignment is turned on). The second is keeping the node controller's internal list of nodes up to date with the cloud provider's list of available machines. When running in a cloud -environment, whenever a node is unhealthy, the node controller asks the cloud +environment and whenever a node is unhealthy, the node controller asks the cloud provider if the VM for that node is still available. If not, the node controller deletes the node from its list of nodes. The third is monitoring the nodes' health. The node controller is -responsible for updating the NodeReady condition of NodeStatus to -ConditionUnknown when a node becomes unreachable (i.e. the node controller stops -receiving heartbeats for some reason, for example due to the node being down), and then later evicting -all the pods from the node (using graceful termination) if the node continues -to be unreachable. (The default timeouts are 40s to start reporting -ConditionUnknown and 5m after that to start evicting pods.) The node controller -checks the state of each node every `--node-monitor-period` seconds. +responsible for: +- Updating the NodeReady condition of NodeStatus to ConditionUnknown when a node + becomes unreachable, as the node controller stops receiving heartbeats for some + reason such as the node being down. +- Evicting all the pods from the node using graceful termination if + the node continues to be unreachable. The default timeouts are 40s to start + reporting ConditionUnknown and 5m after that to start evicting pods. + +The node controller checks the state of each node every `--node-monitor-period` seconds. #### Heartbeats @@ -251,13 +264,14 @@ of the node heartbeats as the cluster scales. The kubelet is responsible for creating and updating the `NodeStatus` and a Lease object. -- The kubelet updates the `NodeStatus` either when there is change in status, +- The kubelet updates the `NodeStatus` either when there is change in status or if there has been no update for a configured interval. The default interval - for `NodeStatus` updates is 5 minutes (much longer than the 40 second default - timeout for unreachable nodes). + for `NodeStatus` updates is 5 minutes, which is much longer than the 40 second default + timeout for unreachable nodes. - The kubelet creates and then updates its Lease object every 10 seconds (the default update interval). Lease updates occur independently from the - `NodeStatus` updates. If the Lease update fails, the kubelet retries with exponential backoff starting at 200 milliseconds and capped at 7 seconds. + `NodeStatus` updates. If the Lease update fails, the kubelet retries with + exponential backoff starting at 200 milliseconds and capped at 7 seconds. #### Reliability @@ -268,23 +282,25 @@ from more than 1 node per 10 seconds. The node eviction behavior changes when a node in a given availability zone becomes unhealthy. The node controller checks what percentage of nodes in the zone are unhealthy (NodeReady condition is ConditionUnknown or ConditionFalse) at -the same time. If the fraction of unhealthy nodes is at least -`--unhealthy-zone-threshold` (default 0.55) then the eviction rate is reduced: -if the cluster is small (i.e. has less than or equal to -`--large-cluster-size-threshold` nodes - default 50) then evictions are -stopped, otherwise the eviction rate is reduced to -`--secondary-node-eviction-rate` (default 0.01) per second. The reason these -policies are implemented per availability zone is because one availability zone -might become partitioned from the master while the others remain connected. If -your cluster does not span multiple cloud provider availability zones, then -there is only one availability zone (the whole cluster). +the same time: +- If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold` + (default 0.55), then the eviction rate is reduced. +- If the cluster is small (i.e. has less than or equal to + `--large-cluster-size-threshold` nodes - default 50), then evictions are stopped. +- Otherwise, the eviction rate is reduced to `--secondary-node-eviction-rate` + (default 0.01) per second. + +The reason these policies are implemented per availability zone is because one +availability zone might become partitioned from the master while the others remain +connected. If your cluster does not span multiple cloud provider availability zones, +then there is only one availability zone (i.e. the whole cluster). A key reason for spreading your nodes across availability zones is so that the workload can be shifted to healthy zones when one entire zone goes down. -Therefore, if all nodes in a zone are unhealthy then the node controller evicts at +Therefore, if all nodes in a zone are unhealthy, then the node controller evicts at the normal rate of `--node-eviction-rate`. The corner case is when all zones are completely unhealthy (i.e. there are no healthy nodes in the cluster). In such a -case, the node controller assumes that there's some problem with master +case, the node controller assumes that there is some problem with master connectivity and stops all evictions until some connectivity is restored. The node controller is also responsible for evicting pods running on nodes with @@ -293,17 +309,10 @@ The node controller also adds {{< glossary_tooltip text="taints" term_id="taint" corresponding to node problems like node unreachable or not ready. This means that the scheduler won't place Pods onto unhealthy nodes. - -{{< caution >}} -`kubectl cordon` marks a node as 'unschedulable', which has the side effect of the service -controller removing the node from any LoadBalancer node target lists it was previously -eligible for, effectively removing incoming load balancer traffic from the cordoned node(s). -{{< /caution >}} - ### Node capacity -Node objects track information about the Node's resource capacity (for example: the amount -of memory available, and the number of CPUs). +Node objects track information about the Node's resource capacity: for example, the amount +of memory available and the number of CPUs. Nodes that [self register](#self-registration-of-nodes) report their capacity during registration. If you [manually](#manual-node-administration) add a Node, then you need to set the node's capacity information when you add it. @@ -330,26 +339,43 @@ the kubelet can use topology hints when making resource assignment decisions. See [Control Topology Management Policies on a Node](/docs/tasks/administer-cluster/topology-manager/) for more information. -## Graceful Node Shutdown {#graceful-node-shutdown} +## Graceful node shutdown {#graceful-node-shutdown} + +{{< feature-state state="beta" for_k8s_version="v1.21" >}} -{{< feature-state state="alpha" for_k8s_version="v1.20" >}} +The kubelet attempts to detect node system shutdown and terminates pods running on the node. -If you have enabled the `GracefulNodeShutdown` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/), then the kubelet attempts to detect the node system shutdown and terminates pods running on the node. Kubelet ensures that pods follow the normal [pod termination process](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) during the node shutdown. -When the `GracefulNodeShutdown` feature gate is enabled, kubelet uses [systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to delay the node shutdown with a given duration. During a shutdown kubelet terminates pods in two phases: +The Graceful node shutdown feature depends on systemd since it takes advantage of +[systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to +delay the node shutdown with a given duration. + +Graceful node shutdown is controlled with the `GracefulNodeShutdown` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) which is +enabled by default in 1.21. + +Note that by default, both configuration options described below, +`ShutdownGracePeriod` and `ShutdownGracePeriodCriticalPods` are set to zero, +thus not activating Graceful node shutdown functionality. +To activate the feature, the two kubelet config settings should be configured appropriately and set to non-zero values. + +During a graceful shutdown, kubelet terminates pods in two phases: 1. Terminate regular pods running on the node. 2. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) running on the node. -Graceful Node Shutdown feature is configured with two [`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options: +Graceful node shutdown feature is configured with two [`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options: * `ShutdownGracePeriod`: * Specifies the total duration that the node should delay the shutdown by. This is the total grace period for pod termination for both regular and [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical). * `ShutdownGracePeriodCriticalPods`: - * Specifies the duration used to terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) during a node shutdown. This should be less than `ShutdownGracePeriod`. - -For example, if `ShutdownGracePeriod=30s`, and `ShutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by 30 seconds. During the shutdown, the first 20 (30-10) seconds would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical). + * Specifies the duration used to terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) during a node shutdown. This value should be less than `ShutdownGracePeriod`. +For example, if `ShutdownGracePeriod=30s`, and +`ShutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by +30 seconds. During the shutdown, the first 20 (30-10) seconds would be reserved +for gracefully terminating normal pods, and the last 10 seconds would be +reserved for terminating [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical). ## {{% heading "whatsnext" %}} @@ -358,4 +384,3 @@ For example, if `ShutdownGracePeriod=30s`, and `ShutdownGracePeriodCriticalPods= * Read the [Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) section of the architecture design document. * Read about [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/). - diff --git a/content/en/docs/concepts/cluster-administration/_index.md b/content/en/docs/concepts/cluster-administration/_index.md index 14b8165ebf330..7d5aec5078f35 100644 --- a/content/en/docs/concepts/cluster-administration/_index.md +++ b/content/en/docs/concepts/cluster-administration/_index.md @@ -26,12 +26,12 @@ See the guides in [Setup](/docs/setup/) for examples of how to plan, set up, and Before choosing a guide, here are some considerations: - - Do you just want to try out Kubernetes on your computer, or do you want to build a high-availability, multi-node cluster? Choose distros best suited for your needs. + - Do you want to try out Kubernetes on your computer, or do you want to build a high-availability, multi-node cluster? Choose distros best suited for your needs. - Will you be using **a hosted Kubernetes cluster**, such as [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/), or **hosting your own cluster**? - Will your cluster be **on-premises**, or **in the cloud (IaaS)**? Kubernetes does not directly support hybrid clusters. Instead, you can set up multiple clusters. - **If you are configuring Kubernetes on-premises**, consider which [networking model](/docs/concepts/cluster-administration/networking/) fits best. - Will you be running Kubernetes on **"bare metal" hardware** or on **virtual machines (VMs)**? - - Do you **just want to run a cluster**, or do you expect to do **active development of Kubernetes project code**? If the + - Do you **want to run a cluster**, or do you expect to do **active development of Kubernetes project code**? If the latter, choose an actively-developed distro. Some distros only use binary releases, but offer a greater variety of choices. - Familiarize yourself with the [components](/docs/concepts/overview/components/) needed to run a cluster. @@ -45,7 +45,7 @@ Before choosing a guide, here are some considerations: ## Securing a cluster -* [Certificates](/docs/concepts/cluster-administration/certificates/) describes the steps to generate certificates using different tool chains. +* [Generate Certificates](/docs/tasks/administer-cluster/certificates/) describes the steps to generate certificates using different tool chains. * [Kubernetes Container Environment](/docs/concepts/containers/container-environment/) describes the environment for Kubelet managed containers on a Kubernetes node. diff --git a/content/en/docs/concepts/cluster-administration/addons.md b/content/en/docs/concepts/cluster-administration/addons.md index 8a356a826a3a8..726a714151d27 100644 --- a/content/en/docs/concepts/cluster-administration/addons.md +++ b/content/en/docs/concepts/cluster-administration/addons.md @@ -16,6 +16,7 @@ This page lists some of the available add-ons and links to their respective inst ## Networking and Network Policy * [ACI](https://www.github.com/noironetworks/aci-containers) provides integrated container networking and network security with Cisco ACI. +* [Antrea](https://antrea.io/) operates at Layer 3/4 to provide networking and security services for Kubernetes, leveraging Open vSwitch as the networking data plane. * [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options so you can choose the most efficient option for your situation, including non-overlay and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts, pods, and (if using Istio & Envoy) applications at the service mesh layer. * [Canal](https://github.com/tigera/canal/tree/master/k8s-install) unites Flannel and Calico, providing networking and network policy. * [Cilium](https://github.com/cilium/cilium) is a L3 network and network policy plugin that can enforce HTTP/API/L7 policies transparently. Both routing and overlay/encapsulation mode are supported, and it can work on top of other CNI plugins. diff --git a/content/en/docs/concepts/cluster-administration/certificates.md b/content/en/docs/concepts/cluster-administration/certificates.md index 6314420c01e2a..6cce47f13cce8 100644 --- a/content/en/docs/concepts/cluster-administration/certificates.md +++ b/content/en/docs/concepts/cluster-administration/certificates.md @@ -4,249 +4,6 @@ content_type: concept weight: 20 --- - -When using client certificate authentication, you can generate certificates -manually through `easyrsa`, `openssl` or `cfssl`. - - - - - - -### easyrsa - -**easyrsa** can manually generate certificates for your cluster. - -1. Download, unpack, and initialize the patched version of easyrsa3. - - curl -LO https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz - tar xzf easy-rsa.tar.gz - cd easy-rsa-master/easyrsa3 - ./easyrsa init-pki -1. Generate a new certificate authority (CA). `--batch` sets automatic mode; - `--req-cn` specifies the Common Name (CN) for the CA's new root certificate. - - ./easyrsa --batch "--req-cn=${MASTER_IP}@`date +%s`" build-ca nopass -1. Generate server certificate and key. - The argument `--subject-alt-name` sets the possible IPs and DNS names the API server will - be accessed with. The `MASTER_CLUSTER_IP` is usually the first IP from the service CIDR - that is specified as the `--service-cluster-ip-range` argument for both the API server and - the controller manager component. The argument `--days` is used to set the number of days - after which the certificate expires. - The sample below also assumes that you are using `cluster.local` as the default - DNS domain name. - - ./easyrsa --subject-alt-name="IP:${MASTER_IP},"\ - "IP:${MASTER_CLUSTER_IP},"\ - "DNS:kubernetes,"\ - "DNS:kubernetes.default,"\ - "DNS:kubernetes.default.svc,"\ - "DNS:kubernetes.default.svc.cluster,"\ - "DNS:kubernetes.default.svc.cluster.local" \ - --days=10000 \ - build-server-full server nopass -1. Copy `pki/ca.crt`, `pki/issued/server.crt`, and `pki/private/server.key` to your directory. -1. Fill in and add the following parameters into the API server start parameters: - - --client-ca-file=/yourdirectory/ca.crt - --tls-cert-file=/yourdirectory/server.crt - --tls-private-key-file=/yourdirectory/server.key - -### openssl - -**openssl** can manually generate certificates for your cluster. - -1. Generate a ca.key with 2048bit: - - openssl genrsa -out ca.key 2048 -1. According to the ca.key generate a ca.crt (use -days to set the certificate effective time): - - openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt -1. Generate a server.key with 2048bit: - - openssl genrsa -out server.key 2048 -1. Create a config file for generating a Certificate Signing Request (CSR). - Be sure to substitute the values marked with angle brackets (e.g. ``) - with real values before saving this to a file (e.g. `csr.conf`). - Note that the value for `MASTER_CLUSTER_IP` is the service cluster IP for the - API server as described in previous subsection. - The sample below also assumes that you are using `cluster.local` as the default - DNS domain name. - - [ req ] - default_bits = 2048 - prompt = no - default_md = sha256 - req_extensions = req_ext - distinguished_name = dn - - [ dn ] - C = - ST = - L = - O = - OU = - CN = - - [ req_ext ] - subjectAltName = @alt_names - - [ alt_names ] - DNS.1 = kubernetes - DNS.2 = kubernetes.default - DNS.3 = kubernetes.default.svc - DNS.4 = kubernetes.default.svc.cluster - DNS.5 = kubernetes.default.svc.cluster.local - IP.1 = - IP.2 = - - [ v3_ext ] - authorityKeyIdentifier=keyid,issuer:always - basicConstraints=CA:FALSE - keyUsage=keyEncipherment,dataEncipherment - extendedKeyUsage=serverAuth,clientAuth - subjectAltName=@alt_names -1. Generate the certificate signing request based on the config file: - - openssl req -new -key server.key -out server.csr -config csr.conf -1. Generate the server certificate using the ca.key, ca.crt and server.csr: - - openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \ - -CAcreateserial -out server.crt -days 10000 \ - -extensions v3_ext -extfile csr.conf -1. View the certificate: - - openssl x509 -noout -text -in ./server.crt - -Finally, add the same parameters into the API server start parameters. - -### cfssl - -**cfssl** is another tool for certificate generation. - -1. Download, unpack and prepare the command line tools as shown below. - Note that you may need to adapt the sample commands based on the hardware - architecture and cfssl version you are using. - - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64 -o cfssl - chmod +x cfssl - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64 -o cfssljson - chmod +x cfssljson - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl-certinfo_1.5.0_linux_amd64 -o cfssl-certinfo - chmod +x cfssl-certinfo -1. Create a directory to hold the artifacts and initialize cfssl: - - mkdir cert - cd cert - ../cfssl print-defaults config > config.json - ../cfssl print-defaults csr > csr.json -1. Create a JSON config file for generating the CA file, for example, `ca-config.json`: - - { - "signing": { - "default": { - "expiry": "8760h" - }, - "profiles": { - "kubernetes": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "8760h" - } - } - } - } -1. Create a JSON config file for CA certificate signing request (CSR), for example, - `ca-csr.json`. Be sure to replace the values marked with angle brackets with - real values you want to use. - - { - "CN": "kubernetes", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names":[{ - "C": "", - "ST": "", - "L": "", - "O": "", - "OU": "" - }] - } -1. Generate CA key (`ca-key.pem`) and certificate (`ca.pem`): - - ../cfssl gencert -initca ca-csr.json | ../cfssljson -bare ca -1. Create a JSON config file for generating keys and certificates for the API - server, for example, `server-csr.json`. Be sure to replace the values in angle brackets with - real values you want to use. The `MASTER_CLUSTER_IP` is the service cluster - IP for the API server as described in previous subsection. - The sample below also assumes that you are using `cluster.local` as the default - DNS domain name. - - { - "CN": "kubernetes", - "hosts": [ - "127.0.0.1", - "", - "", - "kubernetes", - "kubernetes.default", - "kubernetes.default.svc", - "kubernetes.default.svc.cluster", - "kubernetes.default.svc.cluster.local" - ], - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [{ - "C": "", - "ST": "", - "L": "", - "O": "", - "OU": "" - }] - } -1. Generate the key and certificate for the API server, which are by default - saved into file `server-key.pem` and `server.pem` respectively: - - ../cfssl gencert -ca=ca.pem -ca-key=ca-key.pem \ - --config=ca-config.json -profile=kubernetes \ - server-csr.json | ../cfssljson -bare server - - -## Distributing Self-Signed CA Certificate - -A client node may refuse to recognize a self-signed CA certificate as valid. -For a non-production deployment, or for a deployment that runs behind a company -firewall, you can distribute a self-signed CA certificate to all clients and -refresh the local list for valid certificates. - -On each client, perform the following operations: - -```bash -sudo cp ca.crt /usr/local/share/ca-certificates/kubernetes.crt -sudo update-ca-certificates -``` - -``` -Updating certificates in /etc/ssl/certs... -1 added, 0 removed; done. -Running hooks in /etc/ca-certificates/update.d.... -done. -``` - -## Certificates API - -You can use the `certificates.k8s.io` API to provision -x509 certificates to use for authentication as documented -[here](/docs/tasks/tls/managing-tls-in-a-cluster). - - +To learn how to generate certificates for your cluster, see [Certificates](/docs/tasks/administer-cluster/certificates/). diff --git a/content/en/docs/concepts/cluster-administration/flow-control.md b/content/en/docs/concepts/cluster-administration/flow-control.md index 6cb4c386d3616..3e94277d93107 100644 --- a/content/en/docs/concepts/cluster-administration/flow-control.md +++ b/content/en/docs/concepts/cluster-administration/flow-control.md @@ -59,7 +59,7 @@ kube-apiserver \ ``` Alternatively, you can enable the v1alpha1 version of the API group -with `--runtime-config=flowcontrol.apiserver.k8s.io/v1beta1=true`. +with `--runtime-config=flowcontrol.apiserver.k8s.io/v1alpha1=true`. The command-line flag `--enable-priority-and-fairness=false` will disable the API Priority and Fairness feature, even if other flags have enabled it. @@ -427,7 +427,7 @@ poorly-behaved workloads that may be harming system health. histogram vector of queue lengths for the queues, broken down by the labels `priority_level` and `flow_schema`, as sampled by the enqueued requests. Each request that gets queued contributes one - sample to its histogram, reporting the length of the queue just + sample to its histogram, reporting the length of the queue immediately after the request was added. Note that this produces different statistics than an unbiased survey would. {{< note >}} diff --git a/content/en/docs/concepts/cluster-administration/logging.md b/content/en/docs/concepts/cluster-administration/logging.md index 80c610f963859..e75fdea4a502b 100644 --- a/content/en/docs/concepts/cluster-administration/logging.md +++ b/content/en/docs/concepts/cluster-administration/logging.md @@ -9,23 +9,22 @@ weight: 60 -Application logs can help you understand what is happening inside your application. The logs are particularly useful for debugging problems and monitoring cluster activity. Most modern applications have some kind of logging mechanism; as such, most container engines are likewise designed to support some kind of logging. The easiest and most embraced logging method for containerized applications is to write to the standard output and standard error streams. +Application logs can help you understand what is happening inside your application. The logs are particularly useful for debugging problems and monitoring cluster activity. Most modern applications have some kind of logging mechanism. Likewise, container engines are designed to support logging. The easiest and most adopted logging method for containerized applications is writing to standard output and standard error streams. -However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution. For example, if a container crashes, a pod is evicted, or a node dies, you'll usually still want to access your application's logs. As such, logs should have a separate storage and lifecycle independent of nodes, pods, or containers. This concept is called _cluster-level-logging_. Cluster-level logging requires a separate backend to store, analyze, and query logs. Kubernetes provides no native storage solution for log data, but you can integrate many existing logging solutions into your Kubernetes cluster. +However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution. +For example, you may want access your application's logs if a container crashes; a pod gets evicted; or a node dies. +In a cluster, logs should have a separate storage and lifecycle independent of nodes, pods, or containers. This concept is called _cluster-level logging_. -Cluster-level logging architectures are described in assumption that -a logging backend is present inside or outside of your cluster. If you're -not interested in having cluster-level logging, you might still find -the description of how logs are stored and handled on the node to be useful. +Cluster-level logging architectures require a separate backend to store, analyze, and query logs. Kubernetes +does not provide a native storage solution for log data. Instead, there are many logging solutions that +integrate with Kubernetes. The following sections describe how to handle and store logs on nodes. ## Basic logging in Kubernetes -In this section, you can see an example of basic logging in Kubernetes that -outputs data to the standard output stream. This demonstration uses -a pod specification with a container that writes some text to standard output -once per second. +This example uses a `Pod` specification with a container +to write text to the standard output stream once per second. {{< codenew file="debug/counter-pod.yaml" >}} @@ -34,8 +33,10 @@ To run this pod, use the following command: ```shell kubectl apply -f https://k8s.io/examples/debug/counter-pod.yaml ``` + The output is: -``` + +```console pod/counter created ``` @@ -44,73 +45,76 @@ To fetch the logs, use the `kubectl logs` command, as follows: ```shell kubectl logs counter ``` + The output is: -``` + +```console 0: Mon Jan 1 00:00:00 UTC 2001 1: Mon Jan 1 00:00:01 UTC 2001 2: Mon Jan 1 00:00:02 UTC 2001 ... ``` -You can use `kubectl logs` to retrieve logs from a previous instantiation of a container with `--previous` flag, in case the container has crashed. If your pod has multiple containers, you should specify which container's logs you want to access by appending a container name to the command. See the [`kubectl logs` documentation](/docs/reference/generated/kubectl/kubectl-commands#logs) for more details. +You can use `kubectl logs --previous` to retrieve logs from a previous instantiation of a container. If your pod has multiple containers, specify which container's logs you want to access by appending a container name to the command. See the [`kubectl logs` documentation](/docs/reference/generated/kubectl/kubectl-commands#logs) for more details. ## Logging at the node level ![Node level logging](/images/docs/user-guide/logging/logging-node-level.png) -Everything a containerized application writes to `stdout` and `stderr` is handled and redirected somewhere by a container engine. For example, the Docker container engine redirects those two streams to [a logging driver](https://docs.docker.com/engine/admin/logging/overview), which is configured in Kubernetes to write to a file in json format. +A container engine handles and redirects any output generated to a containerized application's `stdout` and `stderr` streams. +For example, the Docker container engine redirects those two streams to [a logging driver](https://docs.docker.com/engine/admin/logging/overview), which is configured in Kubernetes to write to a file in JSON format. {{< note >}} -The Docker json logging driver treats each line as a separate message. When using the Docker logging driver, there is no direct support for multi-line messages. You need to handle multi-line messages at the logging agent level or higher. +The Docker JSON logging driver treats each line as a separate message. When using the Docker logging driver, there is no direct support for multi-line messages. You need to handle multi-line messages at the logging agent level or higher. {{< /note >}} By default, if a container restarts, the kubelet keeps one terminated container with its logs. If a pod is evicted from the node, all corresponding containers are also evicted, along with their logs. An important consideration in node-level logging is implementing log rotation, so that logs don't consume all available storage on the node. Kubernetes -currently is not responsible for rotating logs, but rather a deployment tool +is not responsible for rotating logs, but rather a deployment tool should set up a solution to address that. For example, in Kubernetes clusters, deployed by the `kube-up.sh` script, there is a [`logrotate`](https://linux.die.net/man/8/logrotate) tool configured to run each hour. You can also set up a container runtime to -rotate application's logs automatically, for example by using Docker's `log-opt`. -In the `kube-up.sh` script, the latter approach is used for COS image on GCP, -and the former approach is used in any other environment. In both cases, by -default rotation is configured to take place when log file exceeds 10MB. +rotate an application's logs automatically. As an example, you can find detailed information about how `kube-up.sh` sets up logging for COS image on GCP in the corresponding -[script](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh). +[`configure-helper` script](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh). + +When using a **CRI container runtime**, the kubelet is responsible for rotating the logs and managing the logging directory structure. The kubelet +sends this information to the CRI container runtime and the runtime writes the container logs to the given location. The two kubelet flags `container-log-max-size` and `container-log-max-files` can be used to configure the maximum size for each log file and the maximum number of files allowed for each container respectively. When you run [`kubectl logs`](/docs/reference/generated/kubectl/kubectl-commands#logs) as in the basic logging example, the kubelet on the node handles the request and -reads directly from the log file, returning the contents in the response. +reads directly from the log file. The kubelet returns the content of the log file. {{< note >}} -Currently, if some external system has performed the rotation, +If an external system has performed the rotation or a CRI container runtime is used, only the contents of the latest log file will be available through -`kubectl logs`. E.g. if there's a 10MB file, `logrotate` performs -the rotation and there are two files, one 10MB in size and one empty, -`kubectl logs` will return an empty response. +`kubectl logs`. For example, if there's a 10MB file, `logrotate` performs +the rotation and there are two files: one file that is 10MB in size and a second file that is empty. +`kubectl logs` returns the latest log file which in this example is an empty response. {{< /note >}} -[cosConfigureHelper]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh ### System component logs There are two types of system components: those that run in a container and those that do not run in a container. For example: * The Kubernetes scheduler and kube-proxy run in a container. -* The kubelet and container runtime, for example Docker, do not run in containers. +* The kubelet and container runtime do not run in containers. On machines with systemd, the kubelet and container runtime write to journald. If -systemd is not present, they write to `.log` files in the `/var/log` directory. -System components inside containers always write to the `/var/log` directory, -bypassing the default logging mechanism. They use the [klog](https://github.com/kubernetes/klog) +systemd is not present, the kubelet and container runtime write to `.log` files +in the `/var/log` directory. System components inside containers always write +to the `/var/log` directory, bypassing the default logging mechanism. +They use the [`klog`](https://github.com/kubernetes/klog) logging library. You can find the conventions for logging severity for those components in the [development docs on logging](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md). -Similarly to the container logs, system component logs in the `/var/log` +Similar to the container logs, system component logs in the `/var/log` directory should be rotated. In Kubernetes clusters brought up by the `kube-up.sh` script, those logs are configured to be rotated by the `logrotate` tool daily or once the size exceeds 100MB. @@ -129,13 +133,14 @@ While Kubernetes does not provide a native solution for cluster-level logging, t You can implement cluster-level logging by including a _node-level logging agent_ on each node. The logging agent is a dedicated tool that exposes logs or pushes logs to a backend. Commonly, the logging agent is a container that has access to a directory with log files from all of the application containers on that node. -Because the logging agent must run on every node, it's common to implement it as either a DaemonSet replica, a manifest pod, or a dedicated native process on the node. However the latter two approaches are deprecated and highly discouraged. +Because the logging agent must run on every node, it is recommended to run the agent +as a `DaemonSet`. -Using a node-level logging agent is the most common and encouraged approach for a Kubernetes cluster, because it creates only one agent per node, and it doesn't require any changes to the applications running on the node. However, node-level logging _only works for applications' standard output and standard error_. +Node-level logging creates only one agent per node and doesn't require any changes to the applications running on the node. -Kubernetes doesn't specify a logging agent, but two optional logging agents are packaged with the Kubernetes release: [Stackdriver Logging](/docs/tasks/debug-application-cluster/logging-stackdriver/) for use with Google Cloud Platform, and [Elasticsearch](/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/). You can find more information and instructions in the dedicated documents. Both use [fluentd](https://www.fluentd.org/) with custom configuration as an agent on the node. +Containers write stdout and stderr, but with no agreed format. A node-level agent collects these logs and forwards them for aggregation. -### Using a sidecar container with the logging agent +### Using a sidecar container with the logging agent {#sidecar-container-with-logging-agent} You can use a sidecar container in one of the following ways: @@ -146,28 +151,27 @@ You can use a sidecar container in one of the following ways: ![Sidecar container with a streaming container](/images/docs/user-guide/logging/logging-with-streaming-sidecar.png) -By having your sidecar containers stream to their own `stdout` and `stderr` +By having your sidecar containers write to their own `stdout` and `stderr` streams, you can take advantage of the kubelet and the logging agent that already run on each node. The sidecar containers read logs from a file, a socket, -or the journald. Each individual sidecar container prints log to its own `stdout` -or `stderr` stream. +or journald. Each sidecar container prints a log to its own `stdout` or `stderr` stream. This approach allows you to separate several log streams from different parts of your application, some of which can lack support for writing to `stdout` or `stderr`. The logic behind redirecting logs -is minimal, so it's hardly a significant overhead. Additionally, because +is minimal, so it's not a significant overhead. Additionally, because `stdout` and `stderr` are handled by the kubelet, you can use built-in tools like `kubectl logs`. -Consider the following example. A pod runs a single container, and the container -writes to two different log files, using two different formats. Here's a +For example, a pod runs a single container, and the container +writes to two different log files using two different formats. Here's a configuration file for the Pod: {{< codenew file="admin/logging/two-files-counter-pod.yaml" >}} -It would be a mess to have log entries of different formats in the same log +It is not recommended to write log entries with different formats to the same log stream, even if you managed to redirect both components to the `stdout` stream of -the container. Instead, you could introduce two sidecar containers. Each sidecar +the container. Instead, you can create two sidecar containers. Each sidecar container could tail a particular log file from a shared volume and then redirect the logs to its own `stdout` stream. @@ -181,7 +185,10 @@ running the following commands: ```shell kubectl logs counter count-log-1 ``` -``` + +The output is: + +```console 0: Mon Jan 1 00:00:00 UTC 2001 1: Mon Jan 1 00:00:01 UTC 2001 2: Mon Jan 1 00:00:02 UTC 2001 @@ -191,7 +198,10 @@ kubectl logs counter count-log-1 ```shell kubectl logs counter count-log-2 ``` -``` + +The output is: + +```console Mon Jan 1 00:00:00 UTC 2001 INFO 0 Mon Jan 1 00:00:01 UTC 2001 INFO 1 Mon Jan 1 00:00:02 UTC 2001 INFO 2 @@ -202,16 +212,15 @@ The node-level agent installed in your cluster picks up those log streams automatically without any further configuration. If you like, you can configure the agent to parse log lines depending on the source container. -Note, that despite low CPU and memory usage (order of couple of millicores +Note, that despite low CPU and memory usage (order of a couple of millicores for cpu and order of several megabytes for memory), writing logs to a file and then streaming them to `stdout` can double disk usage. If you have -an application that writes to a single file, it's generally better to set -`/dev/stdout` as destination rather than implementing the streaming sidecar +an application that writes to a single file, it's recommended to set +`/dev/stdout` as the destination rather than implement the streaming sidecar container approach. Sidecar containers can also be used to rotate log files that cannot be -rotated by the application itself. An example -of this approach is a small container running logrotate periodically. +rotated by the application itself. An example of this approach is a small container running `logrotate` periodically. However, it's recommended to use `stdout` and `stderr` directly and leave rotation and retention policies to the kubelet. @@ -226,21 +235,17 @@ configured specifically to run with your application. {{< note >}} Using a logging agent in a sidecar container can lead to significant resource consumption. Moreover, you won't be able to access -those logs using `kubectl logs` command, because they are not controlled +those logs using `kubectl logs` because they are not controlled by the kubelet. {{< /note >}} -As an example, you could use [Stackdriver](/docs/tasks/debug-application-cluster/logging-stackdriver/), -which uses fluentd as a logging agent. Here are two configuration files that -you can use to implement this approach. The first file contains -a [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) to configure fluentd. +Here are two configuration files that you can use to implement a sidecar container with a logging agent. The first file contains +a [`ConfigMap`](/docs/tasks/configure-pod-container/configure-pod-configmap/) to configure fluentd. {{< codenew file="admin/logging/fluentd-sidecar-config.yaml" >}} {{< note >}} -The configuration of fluentd is beyond the scope of this article. For -information about configuring fluentd, see the -[official fluentd documentation](https://docs.fluentd.org/). +For information about configuring fluentd, see the [fluentd documentation](https://docs.fluentd.org/). {{< /note >}} The second file describes a pod that has a sidecar container running fluentd. @@ -248,18 +253,10 @@ The pod mounts a volume where fluentd can pick up its configuration data. {{< codenew file="admin/logging/two-files-counter-pod-agent-sidecar.yaml" >}} -After some time you can find log messages in the Stackdriver interface. - -Remember, that this is just an example and you can actually replace fluentd -with any logging agent, reading from any source inside an application -container. +In the sample configurations, you can replace fluentd with any logging agent, reading from any source inside an application container. ### Exposing logs directly from the application ![Exposing logs directly from the application](/images/docs/user-guide/logging/logging-from-application.png) -You can implement cluster-level logging by exposing or pushing logs directly from -every application; however, the implementation for such a logging mechanism -is outside the scope of Kubernetes. - - +Cluster-logging that exposes or pushes logs directly from every application is outside the scope of Kubernetes. diff --git a/content/en/docs/concepts/cluster-administration/manage-deployment.md b/content/en/docs/concepts/cluster-administration/manage-deployment.md index 50ed69ff42127..f51911116d4a4 100644 --- a/content/en/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/en/docs/concepts/cluster-administration/manage-deployment.md @@ -45,9 +45,9 @@ kubectl apply -f https://k8s.io/examples/application/nginx/ `kubectl` will read any files with suffixes `.yaml`, `.yml`, or `.json`. -It is a recommended practice to put resources related to the same microservice or application tier into the same file, and to group all of the files associated with your application in the same directory. If the tiers of your application bind to each other using DNS, then you can then simply deploy all of the components of your stack en masse. +It is a recommended practice to put resources related to the same microservice or application tier into the same file, and to group all of the files associated with your application in the same directory. If the tiers of your application bind to each other using DNS, you can deploy all of the components of your stack together. -A URL can also be specified as a configuration source, which is handy for deploying directly from configuration files checked into github: +A URL can also be specified as a configuration source, which is handy for deploying directly from configuration files checked into GitHub: ```shell kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml @@ -70,7 +70,7 @@ deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` -In the case of just two resources, it's also easy to specify both on the command line using the resource/name syntax: +In the case of two resources, you can specify both resources on the command line using the resource/name syntax: ```shell kubectl delete deployments/my-nginx services/my-nginx-svc @@ -87,10 +87,11 @@ deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` -Because `kubectl` outputs resource names in the same syntax it accepts, it's easy to chain operations using `$()` or `xargs`: +Because `kubectl` outputs resource names in the same syntax it accepts, you can chain operations using `$()` or `xargs`: ```shell kubectl get $(kubectl create -f docs/concepts/cluster-administration/nginx/ -o name | grep service) +kubectl create -f docs/concepts/cluster-administration/nginx/ -o name | grep service | xargs -i kubectl get {} ``` ```shell @@ -264,7 +265,7 @@ For a more concrete example, check the [tutorial of deploying Ghost](https://git ## Updating labels Sometimes existing pods and other resources need to be relabeled before creating new resources. This can be done with `kubectl label`. -For example, if you want to label all your nginx pods as frontend tier, simply run: +For example, if you want to label all your nginx pods as frontend tier, run: ```shell kubectl label pods -l app=nginx tier=fe @@ -277,7 +278,7 @@ pod/my-nginx-2035384211-u3t6x labeled ``` This first filters all pods with the label "app=nginx", and then labels them with the "tier=fe". -To see the pods you just labeled, run: +To see the pods you labeled, run: ```shell kubectl get pods -l app=nginx -L tier @@ -301,6 +302,7 @@ Sometimes you would want to attach annotations to resources. Annotations are arb kubectl annotate pods my-nginx-v4-9gw19 description='my frontend running nginx' kubectl get pods my-nginx-v4-9gw19 -o yaml ``` + ```shell apiVersion: v1 kind: pod @@ -314,11 +316,12 @@ For more information, please see [annotations](/docs/concepts/overview/working-w ## Scaling your application -When load on your application grows or shrinks, it's easy to scale with `kubectl`. For instance, to decrease the number of nginx replicas from 3 to 1, do: +When load on your application grows or shrinks, use `kubectl` to scale your application. For instance, to decrease the number of nginx replicas from 3 to 1, do: ```shell kubectl scale deployment/my-nginx --replicas=1 ``` + ```shell deployment.apps/my-nginx scaled ``` @@ -328,6 +331,7 @@ Now you only have one pod managed by the deployment. ```shell kubectl get pods -l app=nginx ``` + ```shell NAME READY STATUS RESTARTS AGE my-nginx-2035384211-j5fhi 1/1 Running 0 30m @@ -338,6 +342,7 @@ To have the system automatically choose the number of nginx replicas as needed, ```shell kubectl autoscale deployment/my-nginx --min=1 --max=3 ``` + ```shell horizontalpodautoscaler.autoscaling/my-nginx autoscaled ``` @@ -406,11 +411,12 @@ and ## Disruptive updates -In some cases, you may need to update resource fields that cannot be updated once initialized, or you may just want to make a recursive change immediately, such as to fix broken pods created by a Deployment. To change such fields, use `replace --force`, which deletes and re-creates the resource. In this case, you can simply modify your original configuration file: +In some cases, you may need to update resource fields that cannot be updated once initialized, or you may want to make a recursive change immediately, such as to fix broken pods created by a Deployment. To change such fields, use `replace --force`, which deletes and re-creates the resource. In this case, you can modify your original configuration file: ```shell kubectl replace -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml --force ``` + ```shell deployment.apps/my-nginx deleted deployment.apps/my-nginx replaced @@ -427,19 +433,22 @@ Let's say you were running version 1.14.2 of nginx: ```shell kubectl create deployment my-nginx --image=nginx:1.14.2 ``` + ```shell deployment.apps/my-nginx created ``` with 3 replicas (so the old and new revisions can coexist): + ```shell kubectl scale deployment my-nginx --current-replicas=1 --replicas=3 ``` + ``` deployment.apps/my-nginx scaled ``` -To update to version 1.16.1, simply change `.spec.template.spec.containers[0].image` from `nginx:1.14.2` to `nginx:1.16.1`, with the kubectl commands we learned above. +To update to version 1.16.1, change `.spec.template.spec.containers[0].image` from `nginx:1.14.2` to `nginx:1.16.1` using the previous kubectl commands. ```shell kubectl edit deployment/my-nginx diff --git a/content/en/docs/concepts/cluster-administration/proxies.md b/content/en/docs/concepts/cluster-administration/proxies.md index 9bf204bd9f246..ba86c969b8d7a 100644 --- a/content/en/docs/concepts/cluster-administration/proxies.md +++ b/content/en/docs/concepts/cluster-administration/proxies.md @@ -39,7 +39,7 @@ There are several different proxies you may encounter when using Kubernetes: - proxies UDP, TCP and SCTP - does not understand HTTP - provides load balancing - - is just used to reach services + - is only used to reach services 1. A Proxy/Load-balancer in front of apiserver(s): diff --git a/content/en/docs/concepts/cluster-administration/system-logs.md b/content/en/docs/concepts/cluster-administration/system-logs.md index 9ccae3adf734b..0466837356023 100644 --- a/content/en/docs/concepts/cluster-administration/system-logs.md +++ b/content/en/docs/concepts/cluster-administration/system-logs.md @@ -31,22 +31,24 @@ I1025 00:15:15.525108 1 httplog.go:79] GET /api/v1/namespaces/kube-system/ {{< feature-state for_k8s_version="v1.19" state="alpha" >}} -{{}} +{{< warning >}} Migration to structured log messages is an ongoing process. Not all log messages are structured in this version. When parsing log files, you must also handle unstructured log messages. Log formatting and value serialization are subject to change. {{< /warning>}} -Structured logging is a effort to introduce a uniform structure in log messages allowing for easy extraction of information, making logs easier and cheaper to store and process. +Structured logging introduces a uniform structure in log messages allowing for programmatic extraction of information. You can store and process structured logs with less effort and cost. New message format is backward compatible and enabled by default. Format of structured logs: -``` + +```ini "" ="" ="" ... ``` Example: -``` + +```ini I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kube-system/kubedns" status="ready" ``` diff --git a/content/en/docs/concepts/cluster-administration/system-metrics.md b/content/en/docs/concepts/cluster-administration/system-metrics.md index 3c7e137ded72c..bfcfec9ecb212 100644 --- a/content/en/docs/concepts/cluster-administration/system-metrics.md +++ b/content/en/docs/concepts/cluster-administration/system-metrics.md @@ -134,7 +134,7 @@ cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} ### kube-scheduler metrics -{{< feature-state for_k8s_version="v1.20" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} The scheduler exposes optional metrics that reports the requested resources and the desired limits of all running pods. These metrics can be used to build capacity planning dashboards, assess current or historical scheduling limits, quickly identify workloads that cannot schedule due to lack of resources, and compare actual usage to the pod's request. @@ -152,8 +152,27 @@ Once a pod reaches completion (has a `restartPolicy` of `Never` or `OnFailure` a The metrics are exposed at the HTTP endpoint `/metrics/resources` and require the same authorization as the `/metrics` endpoint on the scheduler. You must use the `--show-hidden-metrics-for-version=1.20` flag to expose these alpha stability metrics. +## Disabling metrics + +You can explicitly turn off metrics via command line flag `--disabled-metrics`. This may be desired if, for example, a metric is causing a performance problem. The input is a list of disabled metrics (i.e. `--disabled-metrics=metric1,metric2`). + +## Metric cardinality enforcement + +Metrics with unbounded dimensions could cause memory issues in the components they instrument. To limit resource use, you can use the `--allow-label-value` command line option to dynamically configure an allow-list of label values for a metric. + +In alpha stage, the flag can only take in a series of mappings as metric label allow-list. +Each mapping is of the format `,=` where +`` is a comma-separated list of acceptable label names. + +The overall format looks like: +`--allow-label-value ,=', ...', ,=', ...', ...`. + +Here is an example: +`--allow-label-value number_count_metric,odd_number='1,3,5', number_count_metric,even_number='2,4,6', date_gauge_metric,weekend='Saturday,Sunday'` + ## {{% heading "whatsnext" %}} * Read about the [Prometheus text format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) for metrics +* See the list of [stable Kubernetes metrics](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) * Read about the [Kubernetes deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior) diff --git a/content/en/docs/concepts/configuration/configmap.md b/content/en/docs/concepts/configuration/configmap.md index 9a134dfc9901c..cb98bf7439cc3 100644 --- a/content/en/docs/concepts/configuration/configmap.md +++ b/content/en/docs/concepts/configuration/configmap.md @@ -43,7 +43,7 @@ Kubernetes objects that have a `spec`, a ConfigMap has `data` and `binaryData` fields. These fields accept key-value pairs as their values. Both the `data` field and the `binaryData` are optional. The `data` field is designed to contain UTF-8 byte sequences while the `binaryData` field is designed to -contain binary data. +contain binary data as base64-encoded strings. The name of a ConfigMap must be a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). @@ -224,8 +224,8 @@ When a ConfigMap currently consumed in a volume is updated, projected keys are e The kubelet checks whether the mounted ConfigMap is fresh on every periodic sync. However, the kubelet uses its local cache for getting the current value of the ConfigMap. The type of the cache is configurable using the `ConfigMapAndSecretChangeDetectionStrategy` field in -the [KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go). -A ConfigMap can be either propagated by watch (default), ttl-based, or simply redirecting +the [KubeletConfiguration struct](/docs/reference/config-api/kubelet-config.v1beta1/). +A ConfigMap can be either propagated by watch (default), ttl-based, or by redirecting all requests directly to the API server. As a result, the total delay from the moment when the ConfigMap is updated to the moment when new keys are projected to the Pod can be as long as the kubelet sync period + cache @@ -233,11 +233,12 @@ propagation delay, where the cache propagation delay depends on the chosen cache (it equals to watch propagation delay, ttl of cache, or zero correspondingly). ConfigMaps consumed as environment variables are not updated automatically and require a pod restart. + ## Immutable ConfigMaps {#configmap-immutable} -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -The Kubernetes beta feature _Immutable Secrets and ConfigMaps_ provides an option to set +The Kubernetes feature _Immutable Secrets and ConfigMaps_ provides an option to set individual Secrets and ConfigMaps as immutable. For clusters that extensively use ConfigMaps (at least tens of thousands of unique ConfigMap to Pod mounts), preventing changes to their data has the following advantages: diff --git a/content/en/docs/concepts/configuration/manage-resources-containers.md b/content/en/docs/concepts/configuration/manage-resources-containers.md index 2668050d26554..ee4669641cb2c 100644 --- a/content/en/docs/concepts/configuration/manage-resources-containers.md +++ b/content/en/docs/concepts/configuration/manage-resources-containers.md @@ -21,9 +21,6 @@ allowed to use more of that resource than the limit you set. The kubelet also re at least the _request_ amount of that system resource specifically for that container to use. - - - ## Requests and limits @@ -72,8 +69,7 @@ You cannot overcommit `hugepages-*` resources. This is different from the `memory` and `cpu` resources. {{< /note >}} -CPU and memory are collectively referred to as *compute resources*, or just -*resources*. Compute +CPU and memory are collectively referred to as *compute resources*, or *resources*. Compute resources are measurable quantities that can be requested, allocated, and consumed. They are distinct from [API resources](/docs/concepts/overview/kubernetes-api/). API resources, such as Pods and @@ -443,12 +439,15 @@ If you want to use project quotas, you should: * Enable the `LocalStorageCapacityIsolationFSQuotaMonitoring=true` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) - in the kubelet configuration. + using the `featureGates` field in the + [kubelet configuration](/docs/reference/config-api/kubelet-config.v1beta1/) + or the `--feature-gates` command line flag. * Ensure that the root filesystem (or optional runtime filesystem) has project quotas enabled. All XFS filesystems support project quotas. For ext4 filesystems, you need to enable the project quota tracking feature while the filesystem is not mounted. + ```bash # For ext4, with /dev/block-device not mounted sudo tune2fs -O project -Q prjquota /dev/block-device @@ -519,8 +518,7 @@ Cluster-level extended resources are not tied to nodes. They are usually managed by scheduler extenders, which handle the resource consumption and resource quota. You can specify the extended resources that are handled by scheduler extenders -in [scheduler policy -configuration](https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/scheduler/api/v1/types.go#L31). +in [scheduler policy configuration](/docs/reference/config-api/kube-scheduler-policy-config.v1/) **Example:** @@ -554,7 +552,7 @@ extender. ### Consuming extended resources -Users can consume extended resources in Pod specs just like CPU and memory. +Users can consume extended resources in Pod specs like CPU and memory. The scheduler takes care of the resource accounting so that no more than the available amount is simultaneously allocated to Pods. @@ -743,23 +741,14 @@ LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-0 You can see that the Container was terminated because of `reason:OOM Killed`, where `OOM` stands for Out Of Memory. - - - - - ## {{% heading "whatsnext" %}} - * Get hands-on experience [assigning Memory resources to Containers and Pods](/docs/tasks/configure-pod-container/assign-memory-resource/). - * Get hands-on experience [assigning CPU resources to Containers and Pods](/docs/tasks/configure-pod-container/assign-cpu-resource/). - * For more details about the difference between requests and limits, see [Resource QoS](https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md). - * Read the [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) API reference - * Read the [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) API reference - * Read about [project quotas](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html) in XFS +* Read more about the [kube-scheduler Policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) + diff --git a/content/en/docs/concepts/configuration/overview.md b/content/en/docs/concepts/configuration/overview.md index 2239fe08acd19..25cfb2e7f1949 100644 --- a/content/en/docs/concepts/configuration/overview.md +++ b/content/en/docs/concepts/configuration/overview.md @@ -59,13 +59,13 @@ DNS server watches the Kubernetes API for new `Services` and creates a set of DN - Avoid using `hostNetwork`, for the same reasons as `hostPort`. -- Use [headless Services](/docs/concepts/services-networking/service/#headless-services) (which have a `ClusterIP` of `None`) for easy service discovery when you don't need `kube-proxy` load balancing. +- Use [headless Services](/docs/concepts/services-networking/service/#headless-services) (which have a `ClusterIP` of `None`) for service discovery when you don't need `kube-proxy` load balancing. ## Using Labels - Define and use [labels](/docs/concepts/overview/working-with-objects/labels/) that identify __semantic attributes__ of your application or Deployment, such as `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. You can use these labels to select the appropriate Pods for other resources; for example, a Service that selects all `tier: frontend` Pods, or all `phase: test` components of `app: myapp`. See the [guestbook](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) app for examples of this approach. -A Service can be made to span multiple Deployments by omitting release-specific labels from its selector. [Deployments](/docs/concepts/workloads/controllers/deployment/) make it easy to update a running service without downtime. +A Service can be made to span multiple Deployments by omitting release-specific labels from its selector. When you need to update a running service without downtime, use a [Deployment](/docs/concepts/workloads/controllers/deployment/). A desired state of an object is described by a Deployment, and if changes to that spec are _applied_, the deployment controller changes the actual state to the desired state at a controlled rate. @@ -81,9 +81,9 @@ The [imagePullPolicy](/docs/concepts/containers/images/#updating-images) and the - `imagePullPolicy: Always`: every time the kubelet launches a container, the kubelet queries the container image registry to resolve the name to an image digest. If the kubelet has a container image with that exact digest cached locally, the kubelet uses its cached image; otherwise, the kubelet downloads (pulls) the image with the resolved digest, and uses that image to launch the container. -- `imagePullPolicy` is omitted and either the image tag is `:latest` or it is omitted: `Always` is applied. +- `imagePullPolicy` is omitted and either the image tag is `:latest` or it is omitted: `imagePullPolicy` is automatically set to `Always`. Note that this will _not_ be updated to `IfNotPresent` if the tag changes value. -- `imagePullPolicy` is omitted and the image tag is present but not `:latest`: `IfNotPresent` is applied. +- `imagePullPolicy` is omitted and the image tag is present but not `:latest`: `imagePullPolicy` is automatically set to `IfNotPresent`. Note that this will _not_ be updated to `Always` if the tag is later removed or changed to `:latest`. - `imagePullPolicy: Never`: the image is assumed to exist locally. No attempt is made to pull the image. @@ -96,7 +96,7 @@ You should avoid using the `:latest` tag when deploying containers in production {{< /note >}} {{< note >}} -The caching semantics of the underlying image provider make even `imagePullPolicy: Always` efficient. With Docker, for example, if the image already exists, the pull attempt is fast because all image layers are cached and no image download is needed. +The caching semantics of the underlying image provider make even `imagePullPolicy: Always` efficient, as long as the registry is reliably accessible. With Docker, for example, if the image already exists, the pull attempt is fast because all image layers are cached and no image download is needed. {{< /note >}} ## Using kubectl diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index 58b57a0b042fa..111a405a7afe5 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -109,14 +109,14 @@ empty-secret Opaque 0 2m6s ``` The `DATA` column shows the number of data items stored in the Secret. -In this case, `0` means we have just created an empty Secret. +In this case, `0` means we have created an empty Secret. ### Service account token Secrets A `kubernetes.io/service-account-token` type of Secret is used to store a token that identifies a service account. When using this Secret type, you need to ensure that the `kubernetes.io/service-account.name` annotation is set to an -existing service account name. An Kubernetes controller fills in some other +existing service account name. A Kubernetes controller fills in some other fields such as the `kubernetes.io/service-account.uid` annotation and the `token` key in the `data` field set to actual token content. @@ -668,8 +668,8 @@ When a secret currently consumed in a volume is updated, projected keys are even The kubelet checks whether the mounted secret is fresh on every periodic sync. However, the kubelet uses its local cache for getting the current value of the Secret. The type of the cache is configurable using the `ConfigMapAndSecretChangeDetectionStrategy` field in -the [KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go). -A Secret can be either propagated by watch (default), ttl-based, or simply redirecting +the [KubeletConfiguration struct](/docs/reference/config-api/kubelet-config.v1beta1/). +A Secret can be either propagated by watch (default), ttl-based, or by redirecting all requests directly to the API server. As a result, the total delay from the moment when the Secret is updated to the moment when new keys are projected to the Pod can be as long as the kubelet sync period + cache @@ -718,7 +718,7 @@ spec: #### Consuming Secret Values from environment variables -Inside a container that consumes a secret in an environment variables, the secret keys appear as +Inside a container that consumes a secret in the environment variables, the secret keys appear as normal environment variables containing the base64 decoded values of the secret data. This is the result of commands executed inside the container from the example above: @@ -749,9 +749,9 @@ There are third party solutions for triggering restarts when secrets change. ## Immutable Secrets {#secret-immutable} -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -The Kubernetes beta feature _Immutable Secrets and ConfigMaps_ provides an option to set +The Kubernetes feature _Immutable Secrets and ConfigMaps_ provides an option to set individual Secrets and ConfigMaps as immutable. For clusters that extensively use Secrets (at least tens of thousands of unique Secret to Pod mounts), preventing changes to their data has the following advantages: @@ -760,8 +760,8 @@ data has the following advantages: - improves performance of your cluster by significantly reducing load on kube-apiserver, by closing watches for secrets marked as immutable. -This feature is controlled by the `ImmutableEphemeralVolumes` [feature -gate](/docs/reference/command-line-tools-reference/feature-gates/), +This feature is controlled by the `ImmutableEphemeralVolumes` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/), which is enabled by default since v1.19. You can create an immutable Secret by setting the `immutable` field to `true`. For example, ```yaml @@ -801,11 +801,6 @@ field set to that of the service account. See [Add ImagePullSecrets to a service account](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) for a detailed explanation of that process. -### Automatic mounting of manually created Secrets - -Manually created secrets (for example, one containing a token for accessing a GitHub account) -can be automatically attached to pods based on their service account. - ## Details ### Restrictions @@ -870,6 +865,7 @@ start until all the Pod's volumes are mounted. ### Use-Case: As container environment variables Create a secret + ```yaml apiVersion: v1 kind: Secret @@ -882,6 +878,7 @@ data: ``` Create the Secret: + ```shell kubectl apply -f mysecret.yaml ``` @@ -997,7 +994,7 @@ For example, if your actual password is `S!B\*d$zDsb=`, you should execute the c kubectl create secret generic dev-db-secret --from-literal=username=devuser --from-literal=password='S!B\*d$zDsb=' ``` - You do not need to escape special characters in passwords from files (`--from-file`). +You do not need to escape special characters in passwords from files (`--from-file`). {{< /note >}} Now make the Pods: @@ -1178,14 +1175,12 @@ privileged, system-level components. Applications that need to access the Secret API should perform `get` requests on the secrets they need. This lets administrators restrict access to all secrets -while [white-listing access to individual instances]( -/docs/reference/access-authn-authz/rbac/#referring-to-resources) that +while [white-listing access to individual instances](/docs/reference/access-authn-authz/rbac/#referring-to-resources) that the app needs. For improved performance over a looping `get`, clients can design resources that reference a secret then `watch` the resource, re-requesting the secret when the -reference changes. Additionally, a ["bulk watch" API]( -https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/bulk_watch.md) +reference changes. Additionally, a ["bulk watch" API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/bulk_watch.md) to let clients `watch` individual resources has also been proposed, and will likely be available in future releases of Kubernetes. diff --git a/content/en/docs/concepts/containers/container-environment.md b/content/en/docs/concepts/containers/container-environment.md index 7ec28e97b4eff..a1eba4d96da06 100644 --- a/content/en/docs/concepts/containers/container-environment.md +++ b/content/en/docs/concepts/containers/container-environment.md @@ -40,6 +40,7 @@ as are any environment variables specified statically in the Docker image. ### Cluster information A list of all services that were running when a Container was created is available to that Container as environment variables. +This list is limited to services within the same namespace as the new Container's Pod and Kubernetes control plane services. Those environment variables match the syntax of Docker links. For a service named *foo* that maps to a Container named *bar*, diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index b315ba6f597ad..96569f95189cf 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -36,10 +36,13 @@ No parameters are passed to the handler. `PreStop` -This hook is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention and others. A call to the preStop hook fails if the container is already in terminated or completed state. -It is blocking, meaning it is synchronous, -so it must complete before the signal to stop the container can be sent. -No parameters are passed to the handler. +This hook is called immediately before a container is terminated due to an API request or management +event such as a liveness/startup probe failure, preemption, resource contention and others. A call +to the `PreStop` hook fails if the container is already in a terminated or completed state and the +hook must complete before the TERM signal to stop the container can be sent. The Pod's termination +grace period countdown begins before the `PreStop` hook is executed, so regardless of the outcome of +the handler, the container will eventually terminate within the Pod's termination grace period. No +parameters are passed to the handler. A more detailed description of the termination behavior can be found in [Termination of Pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination). @@ -65,19 +68,15 @@ the Container ENTRYPOINT and hook fire asynchronously. However, if the hook takes too long to run or hangs, the Container cannot reach a `running` state. -`PreStop` hooks are not executed asynchronously from the signal -to stop the Container; the hook must complete its execution before -the signal can be sent. -If a `PreStop` hook hangs during execution, -the Pod's phase will be `Terminating` and remain there until the Pod is -killed after its `terminationGracePeriodSeconds` expires. -This grace period applies to the total time it takes for both -the `PreStop` hook to execute and for the Container to stop normally. -If, for example, `terminationGracePeriodSeconds` is 60, and the hook -takes 55 seconds to complete, and the Container takes 10 seconds to stop -normally after receiving the signal, then the Container will be killed -before it can stop normally, since `terminationGracePeriodSeconds` is -less than the total time (55+10) it takes for these two things to happen. +`PreStop` hooks are not executed asynchronously from the signal to stop the Container; the hook must +complete its execution before the TERM signal can be sent. If a `PreStop` hook hangs during +execution, the Pod's phase will be `Terminating` and remain there until the Pod is killed after its +`terminationGracePeriodSeconds` expires. This grace period applies to the total time it takes for +both the `PreStop` hook to execute and for the Container to stop normally. If, for example, +`terminationGracePeriodSeconds` is 60, and the hook takes 55 seconds to complete, and the Container +takes 10 seconds to stop normally after receiving the signal, then the Container will be killed +before it can stop normally, since `terminationGracePeriodSeconds` is less than the total time +(55+10) it takes for these two things to happen. If either a `PostStart` or `PreStop` hook fails, it kills the Container. diff --git a/content/en/docs/concepts/containers/images.md b/content/en/docs/concepts/containers/images.md index 99698668c4d00..6d0db16fe8f34 100644 --- a/content/en/docs/concepts/containers/images.md +++ b/content/en/docs/concepts/containers/images.md @@ -49,16 +49,32 @@ Instead, specify a meaningful tag such as `v1.42.0`. ## Updating images -The default pull policy is `IfNotPresent` which causes the -{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} to skip -pulling an image if it already exists. If you would like to always force a pull, -you can do one of the following: +When you first create a {{< glossary_tooltip text="Deployment" term_id="deployment" >}}, +{{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}, Pod, or other +object that includes a Pod template, then by default the pull policy of all +containers in that pod will be set to `IfNotPresent` if it is not explicitly +specified. This policy causes the +{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} to skip pulling an +image if it already exists. + +If you would like to always force a pull, you can do one of the following: - set the `imagePullPolicy` of the container to `Always`. -- omit the `imagePullPolicy` and use `:latest` as the tag for the image to use. +- omit the `imagePullPolicy` and use `:latest` as the tag for the image to use; + Kubernetes will set the policy to `Always`. - omit the `imagePullPolicy` and the tag for the image to use. - enable the [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) admission controller. +{{< note >}} +The value of `imagePullPolicy` of the container is always set when the object is +first _created_, and is not updated if the image's tag later changes. + +For example, if you create a Deployment with an image whose tag is _not_ +`:latest`, and later update that Deployment's image to a `:latest` tag, the +`imagePullPolicy` field will _not_ change to `Always`. You must manually change +the pull policy of any object after its initial creation. +{{< /note >}} + When `imagePullPolicy` is defined without a specific value, it is also set to `Always`. ## Multi-architecture images with image indexes @@ -119,7 +135,7 @@ Here are the recommended steps to configuring your nodes to use a private regist example, run these on your desktop/laptop: 1. Run `docker login [server]` for each set of credentials you want to use. This updates `$HOME/.docker/config.json` on your PC. - 1. View `$HOME/.docker/config.json` in an editor to ensure it contains just the credentials you want to use. + 1. View `$HOME/.docker/config.json` in an editor to ensure it contains only the credentials you want to use. 1. Get a list of your nodes; for example: - if you want the names: `nodes=$( kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}' )` - if you want to get the IP addresses: `nodes=$( kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}' )` diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index abfec1ef6cb66..6af609636eddf 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -109,7 +109,8 @@ For more details on setting up CRI runtimes, see [CRI installation](/docs/setup/ #### dockershim -Kubernetes built-in dockershim CRI does not support runtime handlers. +RuntimeClasses with dockershim must set the runtime handler to `docker`. Dockershim does not support +custom configurable runtime handlers. #### {{< glossary_tooltip term_id="containerd" >}} @@ -163,7 +164,7 @@ Nodes](/docs/concepts/scheduling-eviction/assign-pod-node/). {{< feature-state for_k8s_version="v1.18" state="beta" >}} You can specify _overhead_ resources that are associated with running a Pod. Declaring overhead allows -the cluster (including the scheduler) to account for it when making decisions about Pods and resources. +the cluster (including the scheduler) to account for it when making decisions about Pods and resources. To use Pod overhead, you must have the PodOverhead [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) enabled (it is on by default). diff --git a/content/en/docs/concepts/extend-kubernetes/_index.md b/content/en/docs/concepts/extend-kubernetes/_index.md index 429912a9eda36..083dca6964545 100644 --- a/content/en/docs/concepts/extend-kubernetes/_index.md +++ b/content/en/docs/concepts/extend-kubernetes/_index.md @@ -7,6 +7,10 @@ reviewers: - lavalamp - cheftako - chenopis +feature: + title: Designed for extensibility + description: > + Add features to your Kubernetes cluster without changing upstream source code. content_type: concept no_list: true --- @@ -80,18 +84,15 @@ and by kubectl. Below is a diagram showing how the extension points interact with the Kubernetes control plane. - - - +![Extension Points and the Control Plane](/docs/concepts/extend-kubernetes/control-plane.png) ## Extension Points This diagram shows the extension points in a Kubernetes system. - - +![Extension Points](/docs/concepts/extend-kubernetes/extension-points.png) 1. Users often interact with the Kubernetes API using `kubectl`. [Kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) extend the kubectl binary. They only affect the individual user's local environment, and so cannot enforce site-wide policies. 2. The apiserver handles all requests. Several types of extension points in the apiserver allow authenticating requests, or blocking them based on their content, editing content, and handling deletion. These are described in the [API Access Extensions](#api-access-extensions) section. @@ -103,12 +104,11 @@ This diagram shows the extension points in a Kubernetes system. If you are unsure where to start, this flowchart can help. Note that some solutions may involve several types of extensions. - - - +![Flowchart for Extension](/docs/concepts/extend-kubernetes/flowchart.png) ## API Extensions + ### User-Defined Types Consider adding a Custom Resource to Kubernetes if you want to define new controllers, application configuration objects or other declarative APIs, and to manage them using Kubernetes tools, such as `kubectl`. @@ -145,7 +145,7 @@ Kubernetes provides several built-in authentication methods, and an [Authenticat ### Authorization -[Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It just works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision. +[Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision. ### Dynamic Admission Control @@ -157,7 +157,6 @@ After a request is authorized, if it is a write operation, it also goes through ## Infrastructure Extensions - ### Storage Plugins [Flex Volumes](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/flexvolume-deployment.md diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index 74147624f584e..d9fe184f85ad0 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -28,9 +28,7 @@ The most common way to implement the APIService is to run an *extension API serv Extension API servers should have low latency networking to and from the kube-apiserver. Discovery requests are required to round-trip from the kube-apiserver in five seconds or less. -If your extension API server cannot achieve that latency requirement, consider making changes that let you meet it. You can also set the -`EnableAggregatedDiscoveryTimeout=false` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) on the kube-apiserver -to disable the timeout restriction. This deprecated feature gate will be removed in a future release. +If your extension API server cannot achieve that latency requirement, consider making changes that let you meet it. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index dcfef3f6b6180..f37a71f278a39 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -31,7 +31,7 @@ Once a custom resource is installed, users can create and access its objects usi ## Custom controllers -On their own, custom resources simply let you store and retrieve structured data. +On their own, custom resources let you store and retrieve structured data. When you combine a custom resource with a *custom controller*, custom resources provide a true _declarative API_. @@ -44,7 +44,7 @@ desired state, and continually maintains this state. You can deploy and update a custom controller on a running cluster, independently of the cluster's lifecycle. Custom controllers can work with any kind of resource, but they are especially effective when combined with custom resources. The -[Operator pattern](https://coreos.com/blog/introducing-operators.html) combines custom +[Operator pattern](/docs/concepts/extend-kubernetes/operator/) combines custom resources and custom controllers. You can use custom controllers to encode domain knowledge for specific applications into an extension of the Kubernetes API. @@ -120,7 +120,7 @@ Kubernetes provides two ways to add custom resources to your cluster: Kubernetes provides these two options to meet the needs of different users, so that neither ease of use nor flexibility is compromised. -Aggregated APIs are subordinate API servers that sit behind the primary API server, which acts as a proxy. This arrangement is called [API Aggregation](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) (AA). To users, it simply appears that the Kubernetes API is extended. +Aggregated APIs are subordinate API servers that sit behind the primary API server, which acts as a proxy. This arrangement is called [API Aggregation](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) (AA). To users, the Kubernetes API appears extended. CRDs allow users to create new types of resources without adding another API server. You do not need to understand API Aggregation to use CRDs. diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 8b1747b857618..8f39284a96681 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -193,9 +193,69 @@ for these devices: // node resources consumed by pods and containers on the node service PodResourcesLister { rpc List(ListPodResourcesRequest) returns (ListPodResourcesResponse) {} + rpc GetAllocatableResources(AllocatableResourcesRequest) returns (AllocatableResourcesResponse) {} } ``` +The `List` endpoint provides information on resources of running pods, with details such as the +id of exclusively allocated CPUs, device id as it was reported by device plugins and id of +the NUMA node where these devices are allocated. + +```gRPC +// ListPodResourcesResponse is the response returned by List function +message ListPodResourcesResponse { + repeated PodResources pod_resources = 1; +} + +// PodResources contains information about the node resources assigned to a pod +message PodResources { + string name = 1; + string namespace = 2; + repeated ContainerResources containers = 3; +} + +// ContainerResources contains information about the resources assigned to a container +message ContainerResources { + string name = 1; + repeated ContainerDevices devices = 2; + repeated int64 cpu_ids = 3; +} + +// Topology describes hardware topology of the resource +message TopologyInfo { + repeated NUMANode nodes = 1; +} + +// NUMA representation of NUMA node +message NUMANode { + int64 ID = 1; +} + +// ContainerDevices contains information about the devices assigned to a container +message ContainerDevices { + string resource_name = 1; + repeated string device_ids = 2; + TopologyInfo topology = 3; +} +``` + +GetAllocatableResources provides information on resources initially available on the worker node. +It provides more information than kubelet exports to APIServer. + +```gRPC +// AllocatableResourcesResponses contains informations about all the devices known by the kubelet +message AllocatableResourcesResponse { + repeated ContainerDevices devices = 1; + repeated int64 cpu_ids = 2; +} + +``` + +`ContainerDevices` do expose the topology information declaring to which NUMA cells the device is affine. +The NUMA cells are identified using a opaque integer ID, which value is consistent to what device +plugins report [when they register themselves to the kubelet](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager). + + The gRPC service is served over a unix socket at `/var/lib/kubelet/pod-resources/kubelet.sock`. Monitoring agents for device plugin resources can be deployed as a daemon, or as a DaemonSet. The canonical directory `/var/lib/kubelet/pod-resources` requires privileged access, so monitoring @@ -204,7 +264,7 @@ DaemonSet, `/var/lib/kubelet/pod-resources` must be mounted as a {{< glossary_tooltip term_id="volume" >}} in the device monitoring agent's [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). -Support for the "PodResources service" requires `KubeletPodResources` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. +Support for the `PodResourcesLister service` requires `KubeletPodResources` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. It is enabled by default starting with Kubernetes 1.15 and is v1 since Kubernetes 1.20. ## Device Plugin integration with the Topology Manager @@ -256,5 +316,3 @@ Here are some examples of device plugin implementations: * Learn about [advertising extended resources](/docs/tasks/administer-cluster/extended-resource-node/) on a node * Read about using [hardware acceleration for TLS ingress](https://kubernetes.io/blog/2019/04/24/hardware-accelerated-ssl/tls-termination-in-ingress-controllers-using-kubernetes-device-plugins-and-runtimeclass/) with Kubernetes * Learn about the [Topology Manager](/docs/tasks/administer-cluster/topology-manager/) - - diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 7b53fa326f3d5..0ec8bf81b1d00 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -24,7 +24,7 @@ Network plugins in Kubernetes come in a few flavors: The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it finds, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for Docker, as CRI manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins: * `cni-bin-dir`: Kubelet probes this directory for plugins on startup -* `network-plugin`: The network plugin to use from `cni-bin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply "cni". +* `network-plugin`: The network plugin to use from `cni-bin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is `cni`. ## Network Plugin Requirements diff --git a/content/en/docs/concepts/extend-kubernetes/control-plane.png b/content/en/docs/concepts/extend-kubernetes/control-plane.png new file mode 100644 index 0000000000000..fa61599e94118 Binary files /dev/null and b/content/en/docs/concepts/extend-kubernetes/control-plane.png differ diff --git a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md b/content/en/docs/concepts/extend-kubernetes/extend-cluster.md deleted file mode 100644 index 84d14cee3e722..0000000000000 --- a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: Extending your Kubernetes Cluster -reviewers: -- erictune -- lavalamp -- cheftako -- chenopis -content_type: concept -weight: 10 ---- - - - -Kubernetes is highly configurable and extensible. As a result, -there is rarely a need to fork or submit patches to the Kubernetes -project code. - -This guide describes the options for customizing a Kubernetes cluster. It is -aimed at {{< glossary_tooltip text="cluster operators" term_id="cluster-operator" >}} -who want to understand how to adapt their -Kubernetes cluster to the needs of their work environment. Developers who are prospective -{{< glossary_tooltip text="Platform Developers" term_id="platform-developer" >}} -or Kubernetes Project {{< glossary_tooltip text="Contributors" term_id="contributor" >}} -will also find it useful as an introduction to what extension points and -patterns exist, and their trade-offs and limitations. - - - - -## Overview - -Customization approaches can be broadly divided into *configuration*, which only involves changing flags, local configuration files, or API resources; and *extensions*, which involve running additional programs or services. This document is primarily about extensions. - -## Configuration - -*Configuration files* and *flags* are documented in the Reference section of the online documentation, under each binary: - -* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) -* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) -* [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) -* [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/). - -Flags and configuration files may not always be changeable in a hosted Kubernetes service or a distribution with managed installation. When they are changeable, they are usually only changeable by the cluster administrator. Also, they are subject to change in future Kubernetes versions, and setting them may require restarting processes. For those reasons, they should be used only when there are no other options. - -*Built-in Policy APIs*, such as [ResourceQuota](/docs/concepts/policy/resource-quotas/), [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/), [NetworkPolicy](/docs/concepts/services-networking/network-policies/) and Role-based Access Control ([RBAC](/docs/reference/access-authn-authz/rbac/)), are built-in Kubernetes APIs. APIs are typically used with hosted Kubernetes services and with managed Kubernetes installations. They are declarative and use the same conventions as other Kubernetes resources like pods, so new cluster configuration can be repeatable and be managed the same way as applications. And, where they are stable, they enjoy a [defined support policy](/docs/reference/using-api/deprecation-policy/) like other Kubernetes APIs. For these reasons, they are preferred over *configuration files* and *flags* where suitable. - -## Extensions - -Extensions are software components that extend and deeply integrate with Kubernetes. -They adapt it to support new types and new kinds of hardware. - -Most cluster administrators will use a hosted or distribution -instance of Kubernetes. As a result, most Kubernetes users will not need to -install extensions and fewer will need to author new ones. - -## Extension Patterns - -Kubernetes is designed to be automated by writing client programs. Any -program that reads and/or writes to the Kubernetes API can provide useful -automation. *Automation* can run on the cluster or off it. By following -the guidance in this doc you can write highly available and robust automation. -Automation generally works with any Kubernetes cluster, including hosted -clusters and managed installations. - -There is a specific pattern for writing client programs that work well with -Kubernetes called the *Controller* pattern. Controllers typically read an -object's `.spec`, possibly do things, and then update the object's `.status`. - -A controller is a client of Kubernetes. When Kubernetes is the client and -calls out to a remote service, it is called a *Webhook*. The remote service -is called a *Webhook Backend*. Like Controllers, Webhooks do add a point of -failure. - -In the webhook model, Kubernetes makes a network request to a remote service. -In the *Binary Plugin* model, Kubernetes executes a binary (program). -Binary plugins are used by the kubelet (e.g. -[Flex Volume Plugins](/docs/concepts/storage/volumes/#flexvolume) -and [Network Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)) -and by kubectl. - -Below is a diagram showing how the extension points interact with the -Kubernetes control plane. - - - - - - -## Extension Points - -This diagram shows the extension points in a Kubernetes system. - - - - - -1. Users often interact with the Kubernetes API using `kubectl`. [Kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) extend the kubectl binary. They only affect the individual user's local environment, and so cannot enforce site-wide policies. -2. The apiserver handles all requests. Several types of extension points in the apiserver allow authenticating requests, or blocking them based on their content, editing content, and handling deletion. These are described in the [API Access Extensions](/docs/concepts/extend-kubernetes/#api-access-extensions) section. -3. The apiserver serves various kinds of *resources*. *Built-in resource kinds*, like `pods`, are defined by the Kubernetes project and can't be changed. You can also add resources that you define, or that other projects have defined, called *Custom Resources*, as explained in the [Custom Resources](/docs/concepts/extend-kubernetes/#user-defined-types) section. Custom Resources are often used with API Access Extensions. -4. The Kubernetes scheduler decides which nodes to place pods on. There are several ways to extend scheduling. These are described in the [Scheduler Extensions](/docs/concepts/extend-kubernetes/#scheduler-extensions) section. -5. Much of the behavior of Kubernetes is implemented by programs called Controllers which are clients of the API-Server. Controllers are often used in conjunction with Custom Resources. -6. The kubelet runs on servers, and helps pods appear like virtual servers with their own IPs on the cluster network. [Network Plugins](/docs/concepts/extend-kubernetes/#network-plugins) allow for different implementations of pod networking. -7. The kubelet also mounts and unmounts volumes for containers. New types of storage can be supported via [Storage Plugins](/docs/concepts/extend-kubernetes/#storage-plugins). - -If you are unsure where to start, this flowchart can help. Note that some solutions may involve several types of extensions. - - - - - - -## API Extensions -### User-Defined Types - -Consider adding a Custom Resource to Kubernetes if you want to define new controllers, application configuration objects or other declarative APIs, and to manage them using Kubernetes tools, such as `kubectl`. - -Do not use a Custom Resource as data storage for application, user, or monitoring data. - -For more about Custom Resources, see the [Custom Resources concept guide](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - - -### Combining New APIs with Automation - -The combination of a custom resource API and a control loop is called the [Operator pattern](/docs/concepts/extend-kubernetes/operator/). The Operator pattern is used to manage specific, usually stateful, applications. These custom APIs and control loops can also be used to control other resources, such as storage or policies. - -### Changing Built-in Resources - -When you extend the Kubernetes API by adding custom resources, the added resources always fall into a new API Groups. You cannot replace or change existing API groups. -Adding an API does not directly let you affect the behavior of existing APIs (e.g. Pods), but API Access Extensions do. - - -### API Access Extensions - -When a request reaches the Kubernetes API Server, it is first Authenticated, then Authorized, then subject to various types of Admission Control. See [Controlling Access to the Kubernetes API](/docs/concepts/security/controlling-access/) for more on this flow. - -Each of these steps offers extension points. - -Kubernetes has several built-in authentication methods that it supports. It can also sit behind an authenticating proxy, and it can send a token from an Authorization header to a remote service for verification (a webhook). All of these methods are covered in the [Authentication documentation](/docs/reference/access-authn-authz/authentication/). - -### Authentication - -[Authentication](/docs/reference/access-authn-authz/authentication/) maps headers or certificates in all requests to a username for the client making the request. - -Kubernetes provides several built-in authentication methods, and an [Authentication webhook](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) method if those don't meet your needs. - - -### Authorization - -[Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It just works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision. - - -### Dynamic Admission Control - -After a request is authorized, if it is a write operation, it also goes through [Admission Control](/docs/reference/access-authn-authz/admission-controllers/) steps. In addition to the built-in steps, there are several extensions: - -* The [Image Policy webhook](/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook) restricts what images can be run in containers. -* To make arbitrary admission control decisions, a general [Admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) can be used. Admission Webhooks can reject creations or updates. - -## Infrastructure Extensions - - -### Storage Plugins - -[Flex Volumes](/docs/concepts/storage/volumes/#flexvolume) -allow users to mount volume types without built-in support by having the -Kubelet call a Binary Plugin to mount the volume. - - -### Device Plugins - -Device plugins allow a node to discover new Node resources (in addition to the -builtin ones like cpu and memory) via a -[Device Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). - -### Network Plugins - -Different networking fabrics can be supported via node-level -[Network Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/). - -### Scheduler Extensions - -The scheduler is a special type of controller that watches pods, and assigns -pods to nodes. The default scheduler can be replaced entirely, while -continuing to use other Kubernetes components, or -[multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) -can run at the same time. - -This is a significant undertaking, and almost all Kubernetes users find they -do not need to modify the scheduler. - -The scheduler also supports a -[webhook](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/scheduler_extender.md) -that permits a webhook backend (scheduler extension) to filter and prioritize -the nodes chosen for a pod. - - -## {{% heading "whatsnext" %}} - -* Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -* Learn about [Dynamic admission control](/docs/reference/access-authn-authz/extensible-admission-controllers/) -* Learn more about Infrastructure extensions - * [Network Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) - * [Device Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) -* Learn about [kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) -* Learn about the [Operator pattern](/docs/concepts/extend-kubernetes/operator/) - - diff --git a/content/en/docs/concepts/extend-kubernetes/extension-points.png b/content/en/docs/concepts/extend-kubernetes/extension-points.png new file mode 100644 index 0000000000000..01fb689e7bc42 Binary files /dev/null and b/content/en/docs/concepts/extend-kubernetes/extension-points.png differ diff --git a/content/en/docs/concepts/extend-kubernetes/flowchart.png b/content/en/docs/concepts/extend-kubernetes/flowchart.png new file mode 100644 index 0000000000000..e75802c0483cc Binary files /dev/null and b/content/en/docs/concepts/extend-kubernetes/flowchart.png differ diff --git a/content/en/docs/concepts/extend-kubernetes/operator.md b/content/en/docs/concepts/extend-kubernetes/operator.md index 0e9d227a53653..323200ec3adab 100644 --- a/content/en/docs/concepts/extend-kubernetes/operator.md +++ b/content/en/docs/concepts/extend-kubernetes/operator.md @@ -103,27 +103,28 @@ as well as keeping the existing service in good shape. ## Writing your own Operator {#writing-operator} If there isn't an Operator in the ecosystem that implements the behavior you -want, you can code your own. In [What's next](#what-s-next) you'll find a few -links to libraries and tools you can use to write your own cloud native -Operator. +want, you can code your own. You also implement an Operator (that is, a Controller) using any language / runtime that can act as a [client for the Kubernetes API](/docs/reference/using-api/client-libraries/). +Following are a few libraries and tools you can use to write your own cloud native +Operator. + +{{% thirdparty-content %}} +* [kubebuilder](https://book.kubebuilder.io/) +* [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) +* [Metacontroller](https://metacontroller.app/) along with WebHooks that + you implement yourself +* [Operator Framework](https://operatorframework.io) ## {{% heading "whatsnext" %}} * Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) * Find ready-made operators on [OperatorHub.io](https://operatorhub.io/) to suit your use case -* Use existing tools to write your own operator, eg: - * using [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) - * using [kubebuilder](https://book.kubebuilder.io/) - * using [Metacontroller](https://metacontroller.app/) along with WebHooks that - you implement yourself - * using the [Operator Framework](https://operatorframework.io) * [Publish](https://operatorhub.io/) your operator for other people to use -* Read [CoreOS' original article](https://coreos.com/blog/introducing-operators.html) that introduced the Operator pattern +* Read [CoreOS' original article](https://web.archive.org/web/20170129131616/https://coreos.com/blog/introducing-operators.html) that introduced the Operator pattern (this is an archived version of the original article). * Read an [article](https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-building-kubernetes-operators-and-stateful-apps) from Google Cloud about best practices for building Operators diff --git a/content/en/docs/concepts/extend-kubernetes/service-catalog.md b/content/en/docs/concepts/extend-kubernetes/service-catalog.md index 3aa967578841c..af0271d9aba70 100644 --- a/content/en/docs/concepts/extend-kubernetes/service-catalog.md +++ b/content/en/docs/concepts/extend-kubernetes/service-catalog.md @@ -26,7 +26,7 @@ Fortunately, there is a cloud provider that offers message queuing as a managed A cluster operator can setup Service Catalog and use it to communicate with the cloud provider's service broker to provision an instance of the message queuing service and make it available to the application within the Kubernetes cluster. The application developer therefore does not need to be concerned with the implementation details or management of the message queue. -The application can simply use it as a service. +The application can access the message queue as a service. ## Architecture diff --git a/content/en/docs/concepts/overview/components.md b/content/en/docs/concepts/overview/components.md index eb17e2dd7ea68..61cd2b0d30027 100644 --- a/content/en/docs/concepts/overview/components.md +++ b/content/en/docs/concepts/overview/components.md @@ -33,7 +33,8 @@ The control plane's components make global decisions about the cluster (for exam Control plane components can be run on any machine in the cluster. However, for simplicity, set up scripts typically start all control plane components on the same machine, and do not run user containers on this machine. See -[Building High-Availability Clusters](/docs/admin/high-availability/) for an example multi-master-VM setup. +[Creating Highly Available clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/) +for an example control plane setup that runs across multiple VMs. ### kube-apiserver @@ -51,11 +52,11 @@ the same machine, and do not run user containers on this machine. See {{< glossary_definition term_id="kube-controller-manager" length="all" >}} -These controllers include: +Some types of these controllers are: * Node controller: Responsible for noticing and responding when nodes go down. - * Replication controller: Responsible for maintaining the correct number of pods for every replication - controller object in the system. + * Job controller: Watches for Job objects that represent one-off tasks, then creates + Pods to run those tasks to completion. * Endpoints controller: Populates the Endpoints object (that is, joins Services & Pods). * Service Account & Token controllers: Create default accounts and API access tokens for new namespaces. @@ -132,4 +133,3 @@ saving container logs to a central log store with search/browsing interface. * Learn about [Controllers](/docs/concepts/architecture/controller/) * Learn about [kube-scheduler](/docs/concepts/scheduling-eviction/kube-scheduler/) * Read etcd's official [documentation](https://etcd.io/docs/) - diff --git a/content/en/docs/concepts/overview/what-is-kubernetes.md b/content/en/docs/concepts/overview/what-is-kubernetes.md index f3c4a1da02b8d..b19c4155ce1c6 100644 --- a/content/en/docs/concepts/overview/what-is-kubernetes.md +++ b/content/en/docs/concepts/overview/what-is-kubernetes.md @@ -43,7 +43,7 @@ Each VM is a full machine running all the components, including its own operatin Containers have become popular because they provide extra benefits, such as: * Agile application creation and deployment: increased ease and efficiency of container image creation compared to VM image use. -* Continuous development, integration, and deployment: provides for reliable and frequent container image build and deployment with quick and easy rollbacks (due to image immutability). +* Continuous development, integration, and deployment: provides for reliable and frequent container image build and deployment with quick and efficient rollbacks (due to image immutability). * Dev and Ops separation of concerns: create application container images at build/release time rather than deployment time, thereby decoupling applications from infrastructure. * Observability not only surfaces OS-level information and metrics, but also application health and other signals. * Environmental consistency across development, testing, and production: Runs the same on a laptop as it does in the cloud. diff --git a/content/en/docs/concepts/overview/working-with-objects/labels.md b/content/en/docs/concepts/overview/working-with-objects/labels.md index 560ab23dff501..25eb5da66ec9c 100644 --- a/content/en/docs/concepts/overview/working-with-objects/labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/labels.md @@ -42,7 +42,7 @@ Example labels: * `"partition" : "customerA"`, `"partition" : "customerB"` * `"track" : "daily"`, `"track" : "weekly"` -These are just examples of commonly used labels; you are free to develop your own conventions. Keep in mind that label Key must be unique for a given object. +These are examples of commonly used labels; you are free to develop your own conventions. Keep in mind that label Key must be unique for a given object. ## Syntax and character set @@ -52,7 +52,10 @@ If the prefix is omitted, the label Key is presumed to be private to the user. A The `kubernetes.io/` and `k8s.io/` prefixes are reserved for Kubernetes core components. -Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. +Valid label value: +* must be 63 characters or less (can be empty), +* unless empty, must begin and end with an alphanumeric character (`[a-z0-9A-Z]`), +* could contain dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. For example, here's the configuration file for a Pod that has two labels `environment: production` and `app: nginx` : @@ -98,7 +101,7 @@ For both equality-based and set-based conditions there is no logical _OR_ (`||`) ### _Equality-based_ requirement _Equality-_ or _inequality-based_ requirements allow filtering by label keys and values. Matching objects must satisfy all of the specified label constraints, though they may have additional labels as well. -Three kinds of operators are admitted `=`,`==`,`!=`. The first two represent _equality_ (and are simply synonyms), while the latter represents _inequality_. For example: +Three kinds of operators are admitted `=`,`==`,`!=`. The first two represent _equality_ (and are synonyms), while the latter represents _inequality_. For example: ``` environment = production @@ -234,4 +237,3 @@ selector: One use case for selecting over labels is to constrain the set of nodes onto which a pod can schedule. See the documentation on [node selection](/docs/concepts/scheduling-eviction/assign-pod-node/) for more information. - diff --git a/content/en/docs/concepts/overview/working-with-objects/names.md b/content/en/docs/concepts/overview/working-with-objects/names.md index bc89d1c30a6ef..8e74eb5c0b912 100644 --- a/content/en/docs/concepts/overview/working-with-objects/names.md +++ b/content/en/docs/concepts/overview/working-with-objects/names.md @@ -24,6 +24,10 @@ For non-unique user-provided attributes, Kubernetes provides [labels](/docs/conc {{< glossary_definition term_id="name" length="all" >}} +{{< note >}} +In cases when objects represent a physical entity, like a Node representing a physical host, when the host is re-created under the same name without deleting and re-creating the Node, Kubernetes treats the new host as the old one, which may lead to inconsistencies. +{{< /note >}} + Below are three types of commonly used name constraints for resources. ### DNS Subdomain Names @@ -86,4 +90,3 @@ UUIDs are standardized as ISO/IEC 9834-8 and as ITU-T X.667. * Read about [labels](/docs/concepts/overview/working-with-objects/labels/) in Kubernetes. * See the [Identifiers and Names in Kubernetes](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) design document. - diff --git a/content/en/docs/concepts/overview/working-with-objects/namespaces.md b/content/en/docs/concepts/overview/working-with-objects/namespaces.md index f078cb86360d8..8f740c866b54e 100644 --- a/content/en/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/en/docs/concepts/overview/working-with-objects/namespaces.md @@ -28,9 +28,9 @@ resource can only be in one namespace. Namespaces are a way to divide cluster resources between multiple users (via [resource quota](/docs/concepts/policy/resource-quotas/)). -It is not necessary to use multiple namespaces just to separate slightly different +It is not necessary to use multiple namespaces to separate slightly different resources, such as different versions of the same software: use -[labels](/docs/concepts/overview/working-with-objects/labels) to distinguish +{{< glossary_tooltip text="labels" term_id="label" >}} to distinguish resources within the same namespace. ## Working with Namespaces @@ -91,7 +91,7 @@ kubectl config view --minify | grep namespace: When you create a [Service](/docs/concepts/services-networking/service/), it creates a corresponding [DNS entry](/docs/concepts/services-networking/dns-pod-service/). This entry is of the form `..svc.cluster.local`, which means -that if a container just uses ``, it will resolve to the service which +that if a container only uses ``, it will resolve to the service which is local to a namespace. This is useful for using the same configuration across multiple namespaces such as Development, Staging and Production. If you want to reach across namespaces, you need to use the fully qualified domain name (FQDN). @@ -114,6 +114,16 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` +## Automatic labelling + +{{< feature-state state="beta" for_k8s_version="1.21" >}} + +The Kubernetes control plane sets an immutable {{< glossary_tooltip text="label" term_id="label" >}} +`kubernetes.io/metadata.name` on all namespaces, provided that the `NamespaceDefaultLabelName` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled. +The value of the label is the namespace name. + + ## {{% heading "whatsnext" %}} * Learn more about [creating a new namespace](/docs/tasks/administer-cluster/namespaces/#creating-a-new-namespace). diff --git a/content/en/docs/concepts/overview/working-with-objects/object-management.md b/content/en/docs/concepts/overview/working-with-objects/object-management.md index a2dd737e7786d..b85c622823165 100644 --- a/content/en/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/en/docs/concepts/overview/working-with-objects/object-management.md @@ -31,7 +31,7 @@ When using imperative commands, a user operates directly on live objects in a cluster. The user provides operations to the `kubectl` command as arguments or flags. -This is the simplest way to get started or to run a one-off task in +This is the recommended way to get started or to run a one-off task in a cluster. Because this technique operates directly on live objects, it provides no history of previous configurations. @@ -47,7 +47,7 @@ kubectl create deployment nginx --image nginx Advantages compared to object configuration: -- Commands are simple, easy to learn and easy to remember. +- Commands are expressed as a single action word. - Commands require only a single step to make changes to the cluster. Disadvantages compared to object configuration: diff --git a/content/en/docs/concepts/policy/node-resource-managers.md b/content/en/docs/concepts/policy/node-resource-managers.md new file mode 100644 index 0000000000000..719e8b1151f0e --- /dev/null +++ b/content/en/docs/concepts/policy/node-resource-managers.md @@ -0,0 +1,22 @@ +--- +reviewers: +- derekwaynecarr +- klueska +title: Node Resource Managers +content_type: concept +weight: 50 +--- + + + +In order to support latency-critical and high-throughput workloads, Kubernetes offers a suite of Resource Managers. The managers aim to co-ordinate and optimise node's resources alignment for pods configured with a specific requirement for CPUs, devices, and memory (hugepages) resources. + + + +The main manager, the Topology Manager, is a Kubelet component that co-ordinates the overall resource management process through its [policy](/docs/tasks/administer-cluster/topology-manager/). + +The configuration of individual managers is elaborated in dedicated documents: + +- [CPU Manager Policies](/docs/tasks/administer-cluster/cpu-management-policies/) +- [Device Manager](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager) +- [Memory Manager Policies](/docs/tasks/administer-cluster/memory-manager/) diff --git a/content/en/docs/concepts/policy/pod-security-policy.md b/content/en/docs/concepts/policy/pod-security-policy.md index 17f30906bfb7b..f0884c3dea3a3 100644 --- a/content/en/docs/concepts/policy/pod-security-policy.md +++ b/content/en/docs/concepts/policy/pod-security-policy.md @@ -9,7 +9,9 @@ weight: 30 -{{< feature-state state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} + +PodSecurityPolicy is deprecated as of Kubernetes v1.21, and will be removed in v1.25. Pod Security Policies enable fine-grained authorization of pod creation and updates. @@ -197,7 +199,7 @@ alias kubectl-user='kubectl --as=system:serviceaccount:psp-example:fake-user -n ### Create a policy and a pod Define the example PodSecurityPolicy object in a file. This is a policy that -simply prevents the creation of privileged pods. +prevents the creation of privileged pods. The name of a PodSecurityPolicy object must be a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). diff --git a/content/en/docs/concepts/policy/resource-quotas.md b/content/en/docs/concepts/policy/resource-quotas.md index 0edb1be338047..1d0e9d4ecd001 100644 --- a/content/en/docs/concepts/policy/resource-quotas.md +++ b/content/en/docs/concepts/policy/resource-quotas.md @@ -58,7 +58,7 @@ Neither contention nor changes to quota will affect already created resources. ## Enabling Resource Quota Resource Quota support is enabled by default for many Kubernetes distributions. It is -enabled when the API server `--enable-admission-plugins=` flag has `ResourceQuota` as +enabled when the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} `--enable-admission-plugins=` flag has `ResourceQuota` as one of its arguments. A resource quota is enforced in a particular namespace when there is a @@ -124,6 +124,10 @@ In release 1.8, quota support for local ephemeral storage is added as an alpha f | `limits.ephemeral-storage` | Across all pods in the namespace, the sum of local ephemeral storage limits cannot exceed this value. | | `ephemeral-storage` | Same as `requests.ephemeral-storage`. | +{{< note >}} +When using a CRI container runtime, container logs will count against the ephemeral storage quota. This can result in the unexpected eviction of pods that have exhausted their storage quotas. Refer to [Logging Architecture](/docs/concepts/cluster-administration/logging/) for details. +{{< /note >}} + ## Object Count Quota You can set quota for the total number of certain resources of all standard, @@ -189,6 +193,7 @@ Resources specified on the quota outside of the allowed set results in a validat | `BestEffort` | Match pods that have best effort quality of service. | | `NotBestEffort` | Match pods that do not have best effort quality of service. | | `PriorityClass` | Match pods that references the specified [priority class](/docs/concepts/configuration/pod-priority-preemption). | +| `CrossNamespacePodAffinity` | Match pods that have cross-namespace pod [(anti)affinity terms](/docs/concepts/scheduling-eviction/assign-pod-node). | The `BestEffort` scope restricts a quota to tracking the following resource: @@ -429,6 +434,63 @@ memory 0 20Gi pods 0 10 ``` +### Cross-namespace Pod Affinity Quota + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +Operators can use `CrossNamespacePodAffinity` quota scope to limit which namespaces are allowed to +have pods with affinity terms that cross namespaces. Specifically, it controls which pods are allowed +to set `namespaces` or `namespaceSelector` fields in pod affinity terms. + +Preventing users from using cross-namespace affinity terms might be desired since a pod +with anti-affinity constraints can block pods from all other namespaces +from getting scheduled in a failure domain. + +Using this scope operators can prevent certain namespaces (`foo-ns` in the example below) +from having pods that use cross-namespace pod affinity by creating a resource quota object in +that namespace with `CrossNamespaceAffinity` scope and hard limit of 0: + +```yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: disable-cross-namespace-affinity + namespace: foo-ns +spec: + hard: + pods: "0" + scopeSelector: + matchExpressions: + - scopeName: CrossNamespaceAffinity +``` + +If operators want to disallow using `namespaces` and `namespaceSelector` by default, and +only allow it for specific namespaces, they could configure `CrossNamespaceAffinity` +as a limited resource by setting the kube-apiserver flag --admission-control-config-file +to the path of the following configuration file: + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: "ResourceQuota" + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: ResourceQuotaConfiguration + limitedResources: + - resource: pods + matchScopes: + - scopeName: CrossNamespaceAffinity +``` + +With the above configuration, pods can use `namespaces` and `namespaceSelector` in pod affinity only +if the namespace where they are created have a resource quota object with +`CrossNamespaceAffinity` scope and a hard limit greater than or equal to the number of pods using those fields. + +This feature is alpha and disabled by default. You can enable it by setting the +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +`PodAffinityNamespaceSelector` in both kube-apiserver and kube-scheduler. + ## Requests compared to Limits {#requests-vs-limits} When allocating compute resources, each container may specify a request and a limit value for either CPU or memory. @@ -610,17 +672,28 @@ plugins: values: ["cluster-services"] ``` -Now, "cluster-services" pods will be allowed in only those namespaces where a quota object with a matching `scopeSelector` is present. -For example: +Then, create a resource quota object in the `kube-system` namespace: -```yaml - scopeSelector: - matchExpressions: - - scopeName: PriorityClass - operator: In - values: ["cluster-services"] +{{< codenew file="policy/priority-class-resourcequota.yaml" >}} + +```shell +$ kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system ``` +``` +resourcequota/pods-cluster-services created +``` + +In this case, a pod creation will be allowed if: + +1. the Pod's `priorityClassName` is not specified. +1. the Pod's `priorityClassName` is specified to a value other than `cluster-services`. +1. the Pod's `priorityClassName` is set to `cluster-services`, it is to be created + in the `kube-system` namespace, and it has passed the resource quota check. + +A Pod creation request is rejected if its `priorityClassName` is set to `cluster-services` +and it is to be created in a namespace other than `kube-system`. + ## {{% heading "whatsnext" %}} - See [ResourceQuota design doc](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) for more information. diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index abe4f4b9eb84b..3c779dda79965 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -5,24 +5,23 @@ reviewers: - bsalamat title: Assigning Pods to Nodes content_type: concept -weight: 50 +weight: 20 --- -You can constrain a {{< glossary_tooltip text="Pod" term_id="pod" >}} to only be able to run on particular -{{< glossary_tooltip text="Node(s)" term_id="node" >}}, or to prefer to run on particular nodes. -There are several ways to do this, and the recommended approaches all use -[label selectors](/docs/concepts/overview/working-with-objects/labels/) to make the selection. +You can constrain a {{< glossary_tooltip text="Pod" term_id="pod" >}} so that it can only run on particular set of +{{< glossary_tooltip text="Node(s)" term_id="node" >}}. +There are several ways to do this and the recommended approaches all use +[label selectors](/docs/concepts/overview/working-with-objects/labels/) to facilitate the selection. Generally such constraints are unnecessary, as the scheduler will automatically do a reasonable placement -(e.g. spread your pods across nodes, not place the pod on a node with insufficient free resources, etc.) -but there are some circumstances where you may want more control on a node where a pod lands, for example to ensure +(e.g. spread your pods across nodes so as not place the pod on a node with insufficient free resources, etc.) +but there are some circumstances where you may want to control which node the pod deploys to - for example to ensure that a pod ends up on a machine with an SSD attached to it, or to co-locate pods from two different services that communicate a lot into the same availability zone. - ## nodeSelector @@ -73,7 +72,7 @@ verify that it worked by running `kubectl get pods -o wide` and looking at the ## Interlude: built-in node labels {#built-in-node-labels} In addition to labels you [attach](#step-one-attach-label-to-the-node), nodes come pre-populated -with a standard set of labels. See [Well-Known Labels, Annotations and Taints](/docs/reference/kubernetes-api/labels-annotations-taints/) for a list of these. +with a standard set of labels. See [Well-Known Labels, Annotations and Taints](/docs/reference/labels-annotations-taints/) for a list of these. {{< note >}} The value of these labels is cloud provider specific and is not guaranteed to be reliable. @@ -120,12 +119,12 @@ pod is eligible to be scheduled on, based on labels on the node. There are currently two types of node affinity, called `requiredDuringSchedulingIgnoredDuringExecution` and `preferredDuringSchedulingIgnoredDuringExecution`. You can think of them as "hard" and "soft" respectively, -in the sense that the former specifies rules that *must* be met for a pod to be scheduled onto a node (just like +in the sense that the former specifies rules that *must* be met for a pod to be scheduled onto a node (similar to `nodeSelector` but using a more expressive syntax), while the latter specifies *preferences* that the scheduler will try to enforce but will not guarantee. The "IgnoredDuringExecution" part of the names means that, similar to how `nodeSelector` works, if labels on a node change at runtime such that the affinity rules on a pod are no longer -met, the pod will still continue to run on the node. In the future we plan to offer -`requiredDuringSchedulingRequiredDuringExecution` which will be just like `requiredDuringSchedulingIgnoredDuringExecution` +met, the pod continues to run on the node. In the future we plan to offer +`requiredDuringSchedulingRequiredDuringExecution` which will be identical to `requiredDuringSchedulingIgnoredDuringExecution` except that it will evict pods from nodes that cease to satisfy the pods' node affinity requirements. Thus an example of `requiredDuringSchedulingIgnoredDuringExecution` would be "only run the pod on nodes with Intel CPUs" @@ -261,7 +260,7 @@ for performance and security reasons, there are some constraints on topologyKey: and `preferredDuringSchedulingIgnoredDuringExecution`. 2. For pod anti-affinity, empty `topologyKey` is also not allowed in both `requiredDuringSchedulingIgnoredDuringExecution` and `preferredDuringSchedulingIgnoredDuringExecution`. -3. For `requiredDuringSchedulingIgnoredDuringExecution` pod anti-affinity, the admission controller `LimitPodHardAntiAffinityTopology` was introduced to limit `topologyKey` to `kubernetes.io/hostname`. If you want to make it available for custom topologies, you may modify the admission controller, or simply disable it. +3. For `requiredDuringSchedulingIgnoredDuringExecution` pod anti-affinity, the admission controller `LimitPodHardAntiAffinityTopology` was introduced to limit `topologyKey` to `kubernetes.io/hostname`. If you want to make it available for custom topologies, you may modify the admission controller, or disable it. 4. Except for the above cases, the `topologyKey` can be any legal label-key. In addition to `labelSelector` and `topologyKey`, you can optionally specify a list `namespaces` @@ -271,6 +270,18 @@ If omitted or empty, it defaults to the namespace of the pod where the affinity/ All `matchExpressions` associated with `requiredDuringSchedulingIgnoredDuringExecution` affinity and anti-affinity must be satisfied for the pod to be scheduled onto a node. +#### Namespace selector +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +Users can also select matching namespaces using `namespaceSelector`, which is a label query over the set of namespaces. +The affinity term is applied to the union of the namespaces selected by `namespaceSelector` and the ones listed in the `namespaces` field. +Note that an empty `namespaceSelector` ({}) matches all namespaces, while a null or empty `namespaces` list and +null `namespaceSelector` means "this pod's namespace". + +This feature is alpha and disabled by default. You can enable it by setting the +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +`PodAffinityNamespaceSelector` in both kube-apiserver and kube-scheduler. + #### More Practical Use-cases Interpod Affinity and AntiAffinity can be even more useful when they are used with higher diff --git a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md index a3e548d237af5..0944ecc768c5a 100644 --- a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -87,6 +87,7 @@ of the scheduler: * Read about [scheduler performance tuning](/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Read the [reference documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for kube-scheduler +* Read the [kube-scheduler config (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) reference * Learn about [configuring multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) * Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/) * Learn about [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/) @@ -94,3 +95,4 @@ of the scheduler: * [Volume Topology Support](/docs/concepts/storage/storage-classes/#volume-binding-mode) * [Storage Capacity Tracking](/docs/concepts/storage/storage-capacity/) * [Node-specific Volume Limits](/docs/concepts/storage/storage-limits/) + diff --git a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md index a327f1de247db..94bfaa1280625 100644 --- a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -5,7 +5,7 @@ reviewers: - ahg-g title: Resource Bin Packing for Extended Resources content_type: concept -weight: 50 +weight: 30 --- diff --git a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 932e076dfca76..24283f2efaf9e 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -24,8 +24,6 @@ in a process called _Binding_. This page explains performance tuning optimizations that are relevant for large Kubernetes clusters. - - In large clusters, you can tune the scheduler's behaviour balancing @@ -44,8 +42,10 @@ should use its compiled-in default. If you set `percentageOfNodesToScore` above 100, kube-scheduler acts as if you had set a value of 100. -To change the value, edit the kube-scheduler configuration file (this is likely -to be `/etc/kubernetes/config/kube-scheduler.yaml`), then restart the scheduler. +To change the value, edit the +[kube-scheduler configuration file](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +and then restart the scheduler. +In many cases, the configuration file can be found at `/etc/kubernetes/config/kube-scheduler.yaml`. After you have made this change, you can run @@ -99,7 +99,6 @@ algorithmSource: percentageOfNodesToScore: 50 ``` - ## Tuning percentageOfNodesToScore `percentageOfNodesToScore` must be a value between 1 and 100 with the default @@ -107,7 +106,7 @@ value being calculated based on the cluster size. There is also a hardcoded minimum value of 50 nodes. {{< note >}}In clusters with less than 50 feasible nodes, the scheduler still -checks all the nodes, simply because there are not enough feasible nodes to stop +checks all the nodes because there are not enough feasible nodes to stop the scheduler's search early. In a small cluster, if you set a low value for `percentageOfNodesToScore`, your @@ -160,4 +159,7 @@ Node 1, Node 5, Node 2, Node 6, Node 3, Node 4 After going over all the Nodes, it goes back to Node 1. +## {{% heading "whatsnext" %}} + +* Check the [kube-scheduler configuration reference (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) diff --git a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md index 61b6619d01366..06ed901c2a8bb 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md @@ -10,11 +10,9 @@ weight: 70 {{< feature-state for_k8s_version="v1.15" state="alpha" >}} -The scheduling framework is a pluggable architecture for Kubernetes Scheduler -that makes scheduler customizations easy. It adds a new set of "plugin" APIs to -the existing scheduler. Plugins are compiled into the scheduler. The APIs -allow most scheduling features to be implemented as plugins, while keeping the -scheduling "core" simple and maintainable. Refer to the [design proposal of the +The scheduling framework is a pluggable architecture for the Kubernetes scheduler. +It adds a new set of "plugin" APIs to the existing scheduler. Plugins are compiled into the scheduler. The APIs allow most scheduling features to be implemented as plugins, while keeping the +scheduling "core" lightweight and maintainable. Refer to the [design proposal of the scheduling framework][kep] for more technical information on the design of the framework. @@ -185,7 +183,7 @@ the three things: {{< note >}} While any plugin can access the list of "waiting" Pods and approve them -(see [`FrameworkHandle`](https://github.com/kubernetes/enhancements/blob/master/keps/sig-scheduling/20180409-scheduling-framework.md#frameworkhandle)), we expect only the permit +(see [`FrameworkHandle`](https://git.k8s.io/enhancements/keps/sig-scheduling/624-scheduling-framework#frameworkhandle)), we expect only the permit plugins to approve binding of reserved Pods that are in "waiting" state. Once a Pod is approved, it is sent to the [PreBind](#pre-bind) phase. {{< /note >}} diff --git a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md index 079024c9d66a8..946e858a02388 100644 --- a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -210,9 +210,9 @@ are true. The following taints are built in: the NodeCondition `Ready` being "`False`". * `node.kubernetes.io/unreachable`: Node is unreachable from the node controller. This corresponds to the NodeCondition `Ready` being "`Unknown`". - * `node.kubernetes.io/out-of-disk`: Node becomes out of disk. * `node.kubernetes.io/memory-pressure`: Node has memory pressure. * `node.kubernetes.io/disk-pressure`: Node has disk pressure. + * `node.kubernetes.io/pid-pressure`: Node has PID pressure. * `node.kubernetes.io/network-unavailable`: Node's network is unavailable. * `node.kubernetes.io/unschedulable`: Node is unschedulable. * `node.cloudprovider.kubernetes.io/uninitialized`: When the kubelet is started @@ -275,7 +275,7 @@ tolerations to all daemons, to prevent DaemonSets from breaking. * `node.kubernetes.io/memory-pressure` * `node.kubernetes.io/disk-pressure` - * `node.kubernetes.io/out-of-disk` (*only for critical pods*) + * `node.kubernetes.io/pid-pressure` (1.14 or later) * `node.kubernetes.io/unschedulable` (1.10 or later) * `node.kubernetes.io/network-unavailable` (*host network only*) diff --git a/content/en/docs/concepts/security/controlling-access.md b/content/en/docs/concepts/security/controlling-access.md index 62dc273cf7020..9d6c2b9617ef7 100644 --- a/content/en/docs/concepts/security/controlling-access.md +++ b/content/en/docs/concepts/security/controlling-access.md @@ -28,7 +28,7 @@ a private certificate authority (CA), or based on a public key infrastructure li to a generally recognized CA. If your cluster uses a private certificate authority, you need a copy of that CA -certifcate configured into your `~/.kube/config` on the client, so that you can +certificate configured into your `~/.kube/config` on the client, so that you can trust the connection and be confident it was not intercepted. Your client can present a TLS client certificate at this stage. @@ -43,7 +43,7 @@ Authenticators are described in more detail in [Authentication](/docs/reference/access-authn-authz/authentication/). The input to the authentication step is the entire HTTP request; however, it typically -just examines the headers and/or client certificate. +examines the headers and/or client certificate. Authentication modules include client certificates, password, and plain tokens, bootstrap tokens, and JSON Web Tokens (used for service accounts). @@ -135,7 +135,7 @@ for the corresponding API object, and then written to the object store (shown as The previous discussion applies to requests sent to the secure port of the API server (the typical case). The API server can actually serve on 2 ports: -By default the Kubernetes API server serves HTTP on 2 ports: +By default, the Kubernetes API server serves HTTP on 2 ports: 1. `localhost` port: diff --git a/content/en/docs/concepts/security/overview.md b/content/en/docs/concepts/security/overview.md index fe9129c109bd8..b23a07c79ab2d 100644 --- a/content/en/docs/concepts/security/overview.md +++ b/content/en/docs/concepts/security/overview.md @@ -120,6 +120,7 @@ Area of Concern for Containers | Recommendation | Container Vulnerability Scanning and OS Dependency Security | As part of an image build step, you should scan your containers for known vulnerabilities. Image Signing and Enforcement | Sign container images to maintain a system of trust for the content of your containers. Disallow privileged users | When constructing containers, consult your documentation for how to create users inside of the containers that have the least level of operating system privilege necessary in order to carry out the goal of the container. +Use container runtime with stronger isolation | Select [container runtime classes](/docs/concepts/containers/runtime-class/) that provider stronger isolation ## Code @@ -152,3 +153,4 @@ Learn about related Kubernetes security topics: * [Data encryption in transit](/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane * [Data encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) * [Secrets in Kubernetes](/docs/concepts/configuration/secret/) +* [Runtime class](/docs/concepts/containers/runtime-class) diff --git a/content/en/docs/concepts/security/pod-security-standards.md b/content/en/docs/concepts/security/pod-security-standards.md index 18c1b7e86269b..a3c9ee138e8fd 100644 --- a/content/en/docs/concepts/security/pod-security-standards.md +++ b/content/en/docs/concepts/security/pod-security-standards.md @@ -32,7 +32,7 @@ should range from highly restricted to highly flexible: - **_Privileged_** - Unrestricted policy, providing the widest possible level of permissions. This policy allows for known privilege escalations. -- **_Baseline/Default_** - Minimally restrictive policy while preventing known privilege +- **_Baseline_** - Minimally restrictive policy while preventing known privilege escalations. Allows the default (minimally specified) Pod configuration. - **_Restricted_** - Heavily restricted policy, following current Pod hardening best practices. @@ -48,9 +48,9 @@ mechanisms (such as gatekeeper), the privileged profile may be an absence of app rather than an instantiated policy. In contrast, for a deny-by-default mechanism (such as Pod Security Policy) the privileged policy should enable all controls (disable all restrictions). -### Baseline/Default +### Baseline -The Baseline/Default policy is aimed at ease of adoption for common containerized workloads while +The Baseline policy is aimed at ease of adoption for common containerized workloads while preventing known privilege escalations. This policy is targeted at application operators and developers of non-critical applications. The following listed controls should be enforced/disallowed: @@ -115,7 +115,9 @@ enforced/disallowed: AppArmor (optional) - On supported hosts, the 'runtime/default' AppArmor profile is applied by default. The default policy should prevent overriding or disabling the policy, or restrict overrides to an allowed set of profiles.
+ On supported hosts, the 'runtime/default' AppArmor profile is applied by default. + The baseline policy should prevent overriding or disabling the default AppArmor + profile, or restrict overrides to an allowed set of profiles.

Restricted Fields:
metadata.annotations['container.apparmor.security.beta.kubernetes.io/*']

Allowed Values: 'runtime/default', undefined
@@ -175,7 +177,7 @@ well as lower-trust users.The following listed controls should be enforced/disal Policy - Everything from the default profile. + Everything from the baseline profile. Volume Types @@ -275,7 +277,7 @@ of individual policies are not defined here. ## FAQ -### Why isn't there a profile between privileged and default? +### Why isn't there a profile between privileged and baseline? The three profiles defined here have a clear linear progression from most secure (restricted) to least secure (privileged), and cover a broad set of workloads. Privileges required above the baseline diff --git a/content/en/docs/concepts/services-networking/connect-applications-service.md b/content/en/docs/concepts/services-networking/connect-applications-service.md index 402c3c57ca1cc..14bc98101fea0 100644 --- a/content/en/docs/concepts/services-networking/connect-applications-service.md +++ b/content/en/docs/concepts/services-networking/connect-applications-service.md @@ -387,7 +387,7 @@ $ curl https://: -k

Welcome to nginx!

``` -Let's now recreate the Service to use a cloud load balancer, just change the `Type` of `my-nginx` Service from `NodePort` to `LoadBalancer`: +Let's now recreate the Service to use a cloud load balancer. Change the `Type` of `my-nginx` Service from `NodePort` to `LoadBalancer`: ```shell kubectl edit svc my-nginx diff --git a/content/en/docs/concepts/services-networking/dns-pod-service.md b/content/en/docs/concepts/services-networking/dns-pod-service.md index 93474f24fa021..2888064c2e3e9 100644 --- a/content/en/docs/concepts/services-networking/dns-pod-service.md +++ b/content/en/docs/concepts/services-networking/dns-pod-service.md @@ -7,8 +7,8 @@ content_type: concept weight: 20 --- -This page provides an overview of DNS support by Kubernetes. - +Kubernetes creates DNS records for services and pods. You can contact +services with consistent DNS names instead of IP addresses. @@ -18,19 +18,47 @@ Kubernetes DNS schedules a DNS Pod and Service on the cluster, and configures the kubelets to tell individual containers to use the DNS Service's IP to resolve DNS names. -### What things get DNS names? - Every Service defined in the cluster (including the DNS server itself) is -assigned a DNS name. By default, a client Pod's DNS search list will -include the Pod's own namespace and the cluster's default domain. This is best -illustrated by example: +assigned a DNS name. By default, a client Pod's DNS search list includes the +Pod's own namespace and the cluster's default domain. + +### Namespaces of Services + +A DNS query may return different results based on the namespace of the pod making +it. DNS queries that don't specify a namespace are limited to the pod's +namespace. Access services in other namespaces by specifying it in the DNS query. + +For example, consider a pod in a `test` namespace. A `data` service is in +the `prod` namespace. + +A query for `data` returns no results, because it uses the pod's `test` namespace. + +A query for `data.prod` returns the intended result, because it specifies the +namespace. + +DNS queries may be expanded using the pod's `/etc/resolv.conf`. Kubelet +sets this file for each pod. For example, a query for just `data` may be +expanded to `data.test.cluster.local`. The values of the `search` option +are used to expand queries. To learn more about DNS queries, see +[the `resolv.conf` manual page.](https://www.man7.org/linux/man-pages/man5/resolv.conf.5.html) + +``` +nameserver 10.32.0.10 +search .svc.cluster.local svc.cluster.local cluster.local +options ndots:5 +``` + +In summary, a pod in the _test_ namespace can successfully resolve either +`data.prod` or `data.prod.cluster.local`. + +### DNS Records + +What objects get DNS records? -Assume a Service named `foo` in the Kubernetes namespace `bar`. A Pod running -in namespace `bar` can look up this service by simply doing a DNS query for -`foo`. A Pod running in namespace `quux` can look up this service by doing a -DNS query for `foo.bar`. +1. Services +2. Pods -The following sections detail the supported record types and layout that is +The following sections detail the supported DNS record types and layout that is supported. Any other layout or names or queries that happen to work are considered implementation details and are subject to change without warning. For more up-to-date specification, see diff --git a/content/en/docs/concepts/services-networking/dual-stack.md b/content/en/docs/concepts/services-networking/dual-stack.md index 2981bffec806b..a85226beed05c 100644 --- a/content/en/docs/concepts/services-networking/dual-stack.md +++ b/content/en/docs/concepts/services-networking/dual-stack.md @@ -16,11 +16,11 @@ weight: 70 -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} - IPv4/IPv6 dual-stack enables the allocation of both IPv4 and IPv6 addresses to {{< glossary_tooltip text="Pods" term_id="pod" >}} and {{< glossary_tooltip text="Services" term_id="service" >}}. +IPv4/IPv6 dual-stack networking enables the allocation of both IPv4 and IPv6 addresses to {{< glossary_tooltip text="Pods" term_id="pod" >}} and {{< glossary_tooltip text="Services" term_id="service" >}}. -If you enable IPv4/IPv6 dual-stack networking for your Kubernetes cluster, the cluster will support the simultaneous assignment of both IPv4 and IPv6 addresses. +IPv4/IPv6 dual-stack networking is enabled by default for your Kubernetes cluster starting in 1.21, allowing the simultaneous assignment of both IPv4 and IPv6 addresses. @@ -28,7 +28,7 @@ If you enable IPv4/IPv6 dual-stack networking for your Kubernetes cluster, the c ## Supported Features -Enabling IPv4/IPv6 dual-stack on your Kubernetes cluster provides the following features: +IPv4/IPv6 dual-stack on your Kubernetes cluster provides the following features: * Dual-stack Pod networking (a single IPv4 and IPv6 address assignment per Pod) * IPv4 and IPv6 enabled Services @@ -45,47 +45,45 @@ The following prerequisites are needed in order to utilize IPv4/IPv6 dual-stack * Provider support for dual-stack networking (Cloud provider or otherwise must be able to provide Kubernetes nodes with routable IPv4/IPv6 network interfaces) * A network plugin that supports dual-stack (such as Kubenet or Calico) -## Enable IPv4/IPv6 dual-stack +## Configure IPv4/IPv6 dual-stack -To enable IPv4/IPv6 dual-stack, enable the `IPv6DualStack` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the relevant components of your cluster, and set dual-stack cluster network assignments: +To use IPv4/IPv6 dual-stack, ensure the `IPv6DualStack` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled for the relevant components of your cluster. (Starting in 1.21, IPv4/IPv6 dual-stack defaults to enabled.) + +To configure IPv4/IPv6 dual-stack, set dual-stack cluster network assignments: * kube-apiserver: - * `--feature-gates="IPv6DualStack=true"` * `--service-cluster-ip-range=,` * kube-controller-manager: - * `--feature-gates="IPv6DualStack=true"` * `--cluster-cidr=,` * `--service-cluster-ip-range=,` * `--node-cidr-mask-size-ipv4|--node-cidr-mask-size-ipv6` defaults to /24 for IPv4 and /64 for IPv6 - * kubelet: - * `--feature-gates="IPv6DualStack=true"` * kube-proxy: * `--cluster-cidr=,` - * `--feature-gates="IPv6DualStack=true"` {{< note >}} An example of an IPv4 CIDR: `10.244.0.0/16` (though you would supply your own address range) An example of an IPv6 CIDR: `fdXY:IJKL:MNOP:15::/64` (this shows the format but is not a valid address - see [RFC 4193](https://tools.ietf.org/html/rfc4193)) +Starting in 1.21, IPv4/IPv6 dual-stack defaults to enabled. +You can disable it when necessary by specifying `--feature-gates="IPv6DualStack=false"` +on the kube-apiserver, kube-controller-manager, kubelet, and kube-proxy command line. {{< /note >}} ## Services -If your cluster has dual-stack enabled, you can create {{< glossary_tooltip text="Services" term_id="service" >}} which can use IPv4, IPv6, or both. +You can create {{< glossary_tooltip text="Services" term_id="service" >}} which can use IPv4, IPv6, or both. -The address family of a Service defaults to the address family of the first service cluster IP range (configured via the `--service-cluster-ip-range` flag to the kube-controller-manager). +The address family of a Service defaults to the address family of the first service cluster IP range (configured via the `--service-cluster-ip-range` flag to the kube-apiserver). When you define a Service you can optionally configure it as dual stack. To specify the behavior you want, you set the `.spec.ipFamilyPolicy` field to one of the following values: * `SingleStack`: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range. * `PreferDualStack`: - * Only used if the cluster has dual-stack enabled. Allocates IPv4 and IPv6 cluster IPs for the Service - * If the cluster does not have dual-stack enabled, this setting follows the same behavior as `SingleStack`. + * Allocates IPv4 and IPv6 cluster IPs for the Service. (If the cluster has `--feature-gates="IPv6DualStack=false"`, this setting follows the same behavior as `SingleStack`.) * `RequireDualStack`: Allocates Service `.spec.ClusterIPs` from both IPv4 and IPv6 address ranges. * Selects the `.spec.ClusterIP` from the list of `.spec.ClusterIPs` based on the address family of the first element in the `.spec.ipFamilies` array. - * The cluster must have dual-stack networking configured. If you would like to define which IP family to use for single stack or define the order of IP families for dual-stack, you can choose the address families by setting an optional field, `.spec.ipFamilies`, on the Service. @@ -126,7 +124,7 @@ These examples demonstrate the behavior of various dual-stack Service configurat #### Dual-stack defaults on existing Services -These examples demonstrate the default behavior when dual-stack is newly enabled on a cluster where Services already exist. +These examples demonstrate the default behavior when dual-stack is newly enabled on a cluster where Services already exist. (Upgrading an existing cluster to 1.21 will enable dual-stack unless `--feature-gates="IPv6DualStack=false"` is set.) 1. When dual-stack is enabled on a cluster, existing Services (whether `IPv4` or `IPv6`) are configured by the control plane to set `.spec.ipFamilyPolicy` to `SingleStack` and set `.spec.ipFamilies` to the address family of the existing Service. The existing Service cluster IP will be stored in `.spec.ClusterIPs`. @@ -163,7 +161,7 @@ status: loadBalancer: {} ``` -1. When dual-stack is enabled on a cluster, existing [headless Services](/docs/concepts/services-networking/service/#headless-services) with selectors are configured by the control plane to set `.spec.ipFamilyPolicy` to `SingleStack` and set `.spec.ipFamilies` to the address family of the first service cluster IP range (configured via the `--service-cluster-ip-range` flag to the kube-controller-manager) even though `.spec.ClusterIP` is set to `None`. +1. When dual-stack is enabled on a cluster, existing [headless Services](/docs/concepts/services-networking/service/#headless-services) with selectors are configured by the control plane to set `.spec.ipFamilyPolicy` to `SingleStack` and set `.spec.ipFamilies` to the address family of the first service cluster IP range (configured via the `--service-cluster-ip-range` flag to the kube-apiserver) even though `.spec.ClusterIP` is set to `None`. {{< codenew file="service/networking/dual-stack-default-svc.yaml" >}} @@ -242,3 +240,5 @@ Ensure your {{< glossary_tooltip text="CNI" term_id="cni" >}} provider supports * [Validate IPv4/IPv6 dual-stack](/docs/tasks/network/validate-dual-stack) networking +* [Enable dual-stack networking using kubeadm +](/docs/setup/production-environment/tools/kubeadm/dual-stack-support/) diff --git a/content/en/docs/concepts/services-networking/endpoint-slices.md b/content/en/docs/concepts/services-networking/endpoint-slices.md index 6abf1f6ed12e2..fdcbd0ed50f87 100644 --- a/content/en/docs/concepts/services-networking/endpoint-slices.md +++ b/content/en/docs/concepts/services-networking/endpoint-slices.md @@ -3,13 +3,13 @@ reviewers: - freehan title: EndpointSlices content_type: concept -weight: 35 +weight: 45 --- -{{< feature-state for_k8s_version="v1.17" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} _EndpointSlices_ provide a simple way to track network endpoints within a Kubernetes cluster. They offer a more scalable and extensible alternative to @@ -52,7 +52,7 @@ As an example, here's a sample EndpointSlice resource for the `example` Kubernetes Service. ```yaml -apiVersion: discovery.k8s.io/v1beta1 +apiVersion: discovery.k8s.io/v1 kind: EndpointSlice metadata: name: example-abc @@ -69,9 +69,8 @@ endpoints: conditions: ready: true hostname: pod-1 - topology: - kubernetes.io/hostname: node-1 - topology.kubernetes.io/zone: us-west2-a + nodeName: node-1 + zone: us-west2-a ``` By default, the control plane creates and manages EndpointSlices to have no @@ -135,30 +134,26 @@ For pods, this is any pod that has a deletion timestamp set. ### Topology information {#topology} -{{< feature-state for_k8s_version="v1.20" state="deprecated" >}} +Each endpoint within an EndpointSlice can contain relevant topology information. +The topology information includes the location of the endpoint and information +about the corresponding Node and zone. These are available in the following +per endpoint fields on EndpointSlices: + +* `nodeName` - The name of the Node this endpoint is on. +* `zone` - The zone this endpoint is in. {{< note >}} -The topology field in EndpointSlices has been deprecated and will be removed in -a future release. A new `nodeName` field will be used instead of setting -`kubernetes.io/hostname` in topology. It was determined that other topology -fields covering zone and region would be better represented as EndpointSlice -labels that would apply to all endpoints within the EndpointSlice. +In the v1 API, the per endpoint `topology` was effectively removed in favor of +the dedicated fields `nodeName` and `zone`. + +Setting arbitrary topology fields on the `endpoint` field of an `EndpointSlice` +resource has been deprecated and is not be supported in the v1 API. Instead, +the v1 API supports setting individual `nodeName` and `zone` fields. These +fields are automatically translated between API versions. For example, the +value of the `"topology.kubernetes.io/zone"` key in the `topology` field in +the v1beta1 API is accessible as the `zone` field in the v1 API. {{< /note >}} -Each endpoint within an EndpointSlice can contain relevant topology information. -This is used to indicate where an endpoint is, containing information about the -corresponding Node, zone, and region. When the values are available, the -control plane sets the following Topology labels for EndpointSlices: - -* `kubernetes.io/hostname` - The name of the Node this endpoint is on. -* `topology.kubernetes.io/zone` - The zone this endpoint is in. -* `topology.kubernetes.io/region` - The region this endpoint is in. - -The values of these labels are derived from resources associated with each -endpoint in a slice. The hostname label represents the value of the NodeName -field on the corresponding Pod. The zone and region labels represent the value -of the labels with the same names on the corresponding Node. - ### Management Most often, the control plane (specifically, the endpoint slice diff --git a/content/en/docs/concepts/services-networking/ingress-controllers.md b/content/en/docs/concepts/services-networking/ingress-controllers.md index 25f7f4363952d..d0405a060da06 100644 --- a/content/en/docs/concepts/services-networking/ingress-controllers.md +++ b/content/en/docs/concepts/services-networking/ingress-controllers.md @@ -31,14 +31,15 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet * The [Citrix ingress controller](https://github.com/citrix/citrix-k8s-ingress-controller#readme) works with Citrix Application Delivery Controller. * [Contour](https://projectcontour.io/) is an [Envoy](https://www.envoyproxy.io/) based ingress controller. +* [EnRoute](https://getenroute.io/) is an [Envoy](https://www.envoyproxy.io) based API gateway that can run as an ingress controller. * F5 BIG-IP [Container Ingress Services for Kubernetes](https://clouddocs.f5.com/containers/latest/userguide/kubernetes/) lets you use an Ingress to configure F5 BIG-IP virtual servers. * [Gloo](https://gloo.solo.io) is an open-source ingress controller based on [Envoy](https://www.envoyproxy.io), which offers API gateway functionality. * [HAProxy Ingress](https://haproxy-ingress.github.io/) is an ingress controller for - [HAProxy](http://www.haproxy.org/#desc). + [HAProxy](https://www.haproxy.org/#desc). * The [HAProxy Ingress Controller for Kubernetes](https://github.com/haproxytech/kubernetes-ingress#readme) - is also an ingress controller for [HAProxy](http://www.haproxy.org/#desc). + is also an ingress controller for [HAProxy](https://www.haproxy.org/#desc). * [Istio Ingress](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress/) is an [Istio](https://istio.io/) based ingress controller. * The [Kong Ingress Controller for Kubernetes](https://github.com/Kong/kubernetes-ingress-controller#readme) @@ -48,8 +49,9 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet * [Skipper](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/) HTTP router and reverse proxy for service composition, including use cases like Kubernetes Ingress, designed as a library to build your custom proxy. * The [Traefik Kubernetes Ingress provider](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) is an ingress controller for the [Traefik](https://traefik.io/traefik/) proxy. +* [Tyk Operator](https://github.com/TykTechnologies/tyk-operator) extends Ingress with Custom Resources to bring API Management capabilities to Ingress. Tyk Operator works with the Open Source Tyk Gateway & Tyk Cloud control plane. * [Voyager](https://appscode.com/products/voyager) is an ingress controller for - [HAProxy](http://www.haproxy.org/#desc). + [HAProxy](https://www.haproxy.org/#desc). ## Using multiple Ingress controllers diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 7a189a401b00c..de4e665af15c9 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -220,7 +220,19 @@ of the controller that should implement the class. {{< codenew file="service/networking/external-lb.yaml" >}} IngressClass resources contain an optional parameters field. This can be used to -reference additional configuration for this class. +reference additional implementation-specific configuration for this class. + +#### Namespace-scoped parameters + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +`Parameters` field has a `scope` and `namespace` field that can be used to +reference a namespace-specific resource for configuration of an Ingress class. +`Scope` field defaults to `Cluster`, meaning, the default is cluster-scoped +resource. Setting `Scope` to `Namespace` and setting the `Namespace` field +will reference a parameters resource in a specific namespace: + +{{< codenew file="service/networking/namespaced-params.yaml" >}} ### Deprecated annotation @@ -260,7 +272,7 @@ There are existing Kubernetes concepts that allow you to expose a single Service {{< codenew file="service/networking/test-ingress.yaml" >}} If you create it using `kubectl apply -f` you should be able to view the state -of the Ingress you just added: +of the Ingress you added: ```bash kubectl get ingress test-ingress diff --git a/content/en/docs/concepts/services-networking/network-policies.md b/content/en/docs/concepts/services-networking/network-policies.md index 2000d838b481c..764fedbcc778f 100644 --- a/content/en/docs/concepts/services-networking/network-policies.md +++ b/content/en/docs/concepts/services-networking/network-policies.md @@ -221,18 +221,72 @@ When the feature gate is enabled, you can set the `protocol` field of a NetworkP You must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin that supports SCTP protocol NetworkPolicies. {{< /note >}} +## Targeting a range of Ports + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +When writing a NetworkPolicy, you can target a range of ports instead of a single port. + +This is achievable with the usage of the `endPort` field, as the following example: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: multi-port-egress + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 32000 + endPort: 32768 +``` + +The above rule allows any Pod with label `db` on the namespace `default` to communicate with any IP within the range `10.0.0.0/24` over TCP, provided that the target port is between the range 32000 and 32768. + +The following restrictions apply when using this field: +* As an alpha feature, this is disabled by default. To enable the `endPort` field at a cluster level, you (or your cluster administrator) need to enable the `NetworkPolicyEndPort` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=NetworkPolicyEndPort=true,…`. +* The `endPort` field must be equal than or greater to the `port` field. +* `endPort` can only be defined if `port` is also defined. +* Both ports must be numeric. + +{{< note >}} +Your cluster must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin that +supports the `endPort` field in NetworkPolicy specifications. +{{< /note >}} + +## Targeting a Namespace by its name + +{{< feature-state state="beta" for_k8s_version="1.21" >}} + +The Kubernetes control plane sets an immutable label `kubernetes.io/metadata.name` on all +namespaces, provided that the `NamespaceDefaultLabelName` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled. +The value of the label is the namespace name. + +While NetworkPolicy cannot target a namespace by its name with some object field, you can use the +standardized label to target a specific namespace. + ## What you can't do with network policies (at least, not yet) -As of Kubernetes 1.20, the following functionality does not exist in the NetworkPolicy API, but you might be able to implement workarounds using Operating System components (such as SELinux, OpenVSwitch, IPTables, and so on) or Layer 7 technologies (Ingress controllers, Service Mesh implementations) or admission controllers. In case you are new to network security in Kubernetes, its worth noting that the following User Stories cannot (yet) be implemented using the NetworkPolicy API. Some (but not all) of these user stories are actively being discussed for future releases of the NetworkPolicy API. +As of Kubernetes {{< skew latestVersion >}}, the following functionality does not exist in the NetworkPolicy API, but you might be able to implement workarounds using Operating System components (such as SELinux, OpenVSwitch, IPTables, and so on) or Layer 7 technologies (Ingress controllers, Service Mesh implementations) or admission controllers. In case you are new to network security in Kubernetes, its worth noting that the following User Stories cannot (yet) be implemented using the NetworkPolicy API. - Forcing internal cluster traffic to go through a common gateway (this might be best served with a service mesh or other proxy). - Anything TLS related (use a service mesh or ingress controller for this). - Node specific policies (you can use CIDR notation for these, but you cannot target nodes by their Kubernetes identities specifically). -- Targeting of namespaces or services by name (you can, however, target pods or namespaces by their {{< glossary_tooltip text="labels" term_id="label" >}}, which is often a viable workaround). +- Targeting of services by name (you can, however, target pods or namespaces by their {{< glossary_tooltip text="labels" term_id="label" >}}, which is often a viable workaround). - Creation or management of "Policy requests" that are fulfilled by a third party. - Default policies which are applied to all namespaces or pods (there are some third party Kubernetes distributions and projects which can do this). - Advanced policy querying and reachability tooling. -- The ability to target ranges of Ports in a single policy declaration. - The ability to log network security events (for example connections that are blocked or accepted). - The ability to explicitly deny policies (currently the model for NetworkPolicies are deny by default, with only the ability to add allow rules). - The ability to prevent loopback or incoming host traffic (Pods cannot currently block localhost access, nor do they have the ability to block access from their resident node). diff --git a/content/en/docs/concepts/services-networking/service-topology.md b/content/en/docs/concepts/services-networking/service-topology.md index d36b76f55f003..e906dff81d1c5 100644 --- a/content/en/docs/concepts/services-networking/service-topology.md +++ b/content/en/docs/concepts/services-networking/service-topology.md @@ -2,12 +2,7 @@ reviewers: - johnbelamaric - imroc -title: Service Topology -feature: - title: Service Topology - description: > - Routing of service traffic based upon cluster topology. - +title: Topology-aware traffic routing with topology keys content_type: concept weight: 10 --- @@ -15,7 +10,16 @@ weight: 10 -{{< feature-state for_k8s_version="v1.17" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} + +{{< note >}} + +This feature, specifically the alpha `topologyKeys` API, is deprecated since +Kubernetes v1.21. +[Topology Aware Hints](/docs/concepts/services-networking/topology-aware-hints/), +introduced in Kubernetes v1.21, provide similar functionality. + +{{}} _Service Topology_ enables a service to route traffic based upon the Node topology of the cluster. For example, a service can specify that traffic be @@ -23,33 +27,32 @@ preferentially routed to endpoints that are on the same Node as the client, or in the same availability zone. - -## Introduction +## Topology-aware traffic routing By default, traffic sent to a `ClusterIP` or `NodePort` Service may be routed to -any backend address for the Service. Since Kubernetes 1.7 it has been possible -to route "external" traffic to the Pods running on the Node that received the -traffic, but this is not supported for `ClusterIP` Services, and more complex -topologies — such as routing zonally — have not been possible. The -_Service Topology_ feature resolves this by allowing the Service creator to -define a policy for routing traffic based upon the Node labels for the -originating and destination Nodes. - -By using Node label matching between the source and destination, the operator -may designate groups of Nodes that are "closer" and "farther" from one another, -using whatever metric makes sense for that operator's requirements. For many -operators in public clouds, for example, there is a preference to keep service -traffic within the same zone, because interzonal traffic has a cost associated -with it, while intrazonal traffic does not. Other common needs include being able -to route traffic to a local Pod managed by a DaemonSet, or keeping traffic to -Nodes connected to the same top-of-rack switch for the lowest latency. - +any backend address for the Service. Kubernetes 1.7 made it possible to +route "external" traffic to the Pods running on the same Node that received the +traffic. For `ClusterIP` Services, the equivalent same-node preference for +routing wasn't possible; nor could you configure your cluster to favor routing +to endpoints within the same zone. +By setting `topologyKeys` on a Service, you're able to define a policy for routing +traffic based upon the Node labels for the originating and destination Nodes. + +The label matching between the source and destination lets you, as a cluster +operator, designate sets of Nodes that are "closer" and "farther" from one another. +You can define labels to represent whatever metric makes sense for your own +requirements. +In public clouds, for example, you might prefer to keep network traffic within the +same zone, because interzonal traffic has a cost associated with it (and intrazonal +traffic typically does not). Other common needs include being able to route traffic +to a local Pod managed by a DaemonSet, or directing traffic to Nodes connected to the +same top-of-rack switch for the lowest latency. ## Using Service Topology -If your cluster has Service Topology enabled, you can control Service traffic +If your cluster has the `ServiceTopology` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) enabled, you can control Service traffic routing by specifying the `topologyKeys` field on the Service spec. This field is a preference-order list of Node labels which will be used to sort endpoints when accessing this Service. Traffic will be directed to a Node whose value for @@ -57,7 +60,7 @@ the first label matches the originating Node's value for that label. If there is no backend for the Service on a matching Node, then the second label will be considered, and so forth, until no labels remain. -If no match is found, the traffic will be rejected, just as if there were no +If no match is found, the traffic will be rejected, as if there were no backends for the Service at all. That is, endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no @@ -87,7 +90,7 @@ traffic as follows. * Service topology is not compatible with `externalTrafficPolicy=Local`, and therefore a Service cannot use both of these features. It is possible to use - both features in the same cluster on different Services, just not on the same + both features in the same cluster on different Services, only not on the same Service. * Valid topology keys are currently limited to `kubernetes.io/hostname`, @@ -200,4 +203,3 @@ spec: * Read about [enabling Service Topology](/docs/tasks/administer-cluster/enabling-service-topology) * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) - diff --git a/content/en/docs/concepts/services-networking/service-traffic-policy.md b/content/en/docs/concepts/services-networking/service-traffic-policy.md new file mode 100644 index 0000000000000..fb55a3d833e8a --- /dev/null +++ b/content/en/docs/concepts/services-networking/service-traffic-policy.md @@ -0,0 +1,73 @@ +--- +reviewers: +- maplain +title: Service Internal Traffic Policy +content_type: concept +weight: 45 +--- + + + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +_Service Internal Traffic Policy_ enables internal traffic restrictions to only route +internal traffic to endpoints within the node the traffic originated from. The +"internal" traffic here refers to traffic originated from Pods in the current +cluster. This can help to reduce costs and improve performance. + + + +## Using Service Internal Traffic Policy + +Once you have enabled the `ServiceInternalTrafficPolicy` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/), +you can enable an internal-only traffic policy for a +{{< glossary_tooltip text="Services" term_id="service" >}}, by setting its +`.spec.internalTrafficPolicy` to `Local`. +This tells kube-proxy to only use node local endpoints for cluster internal traffic. + +{{< note >}} +For pods on nodes with no endpoints for a given Service, the Service +behaves as if it has zero endpoints (for Pods on this node) even if the service +does have endpoints on other nodes. +{{< /note >}} + +The following example shows what a Service looks like when you set +`.spec.internalTrafficPolicy` to `Local`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + internalTrafficPolicy: Local +``` + +## How it works + +The kube-proxy filters the endpoints it routes to based on the +`spec.internalTrafficPolicy` setting. When it's set to `Local`, only node local +endpoints are considered. When it's `Cluster` or missing, all endpoints are +considered. +When the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +`ServiceInternalTrafficPolicy` is enabled, `spec.internalTrafficPolicy` defaults to "Cluster". + +## Constraints + +* Service Internal Traffic Policy is not used when `externalTrafficPolicy` is set + to `Local` on a Service. It is possible to use both features in the same cluster + on different Services, just not on the same Service. + +## {{% heading "whatsnext" %}} + +* Read about [enabling Topology Aware Hints](/docs/tasks/administer-cluster/enabling-topology-aware-hints) +* Read about [Service External Traffic Policy](/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip) +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index 368bf02fb1fef..2c9e6e89969f5 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -74,8 +74,8 @@ a new instance. The name of a Service object must be a valid [DNS label name](/docs/concepts/overview/working-with-objects/names#dns-label-names). -For example, suppose you have a set of Pods that each listen on TCP port 9376 -and carry a label `app=MyApp`: +For example, suppose you have a set of Pods where each listens on TCP port 9376 +and contains a label `app=MyApp`: ```yaml apiVersion: v1 @@ -151,9 +151,9 @@ spec: targetPort: 9376 ``` -Because this Service has no selector, the corresponding Endpoint object is not +Because this Service has no selector, the corresponding Endpoints object is not created automatically. You can manually map the Service to the network address and port -where it's running, by adding an Endpoint object manually: +where it's running, by adding an Endpoints object manually: ```yaml apiVersion: v1 @@ -187,9 +187,14 @@ An ExternalName Service is a special case of Service that does not have selectors and uses DNS names instead. For more information, see the [ExternalName](#externalname) section later in this document. +### Over Capacity Endpoints +If an Endpoints resource has more than 1000 endpoints then a Kubernetes v1.21 (or later) +cluster annotates that Endpoints with `endpoints.kubernetes.io/over-capacity: warning`. +This annotation indicates that the affected Endpoints object is over capacity. + ### EndpointSlices -{{< feature-state for_k8s_version="v1.17" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} EndpointSlices are an API resource that can provide a more scalable alternative to Endpoints. Although conceptually quite similar to Endpoints, EndpointSlices @@ -430,7 +435,7 @@ Services by their DNS name. For example, if you have a Service called `my-service` in a Kubernetes namespace `my-ns`, the control plane and the DNS Service acting together create a DNS record for `my-service.my-ns`. Pods in the `my-ns` namespace -should be able to find it by simply doing a name lookup for `my-service` +should be able to find the service by doing a name lookup for `my-service` (`my-service.my-ns` would also work). Pods in other namespaces must qualify the name as `my-service.my-ns`. These names @@ -463,7 +468,7 @@ selectors defined: For headless Services that define selectors, the endpoints controller creates `Endpoints` records in the API, and modifies the DNS configuration to return -records (addresses) that point directly to the `Pods` backing the `Service`. +A records (IP addresses) that point directly to the `Pods` backing the `Service`. ### Without selectors @@ -513,8 +518,13 @@ allocates a port from a range specified by `--service-node-port-range` flag (def Each node proxies that port (the same port number on every Node) into your Service. Your Service reports the allocated port in its `.spec.ports[*].nodePort` field. -If you want to specify particular IP(s) to proxy the port, you can set the `--nodeport-addresses` flag in kube-proxy to particular IP block(s); this is supported since Kubernetes v1.10. -This flag takes a comma-delimited list of IP blocks (e.g. 10.0.0.0/8, 192.0.2.0/25) to specify IP address ranges that kube-proxy should consider as local to this node. +If you want to specify particular IP(s) to proxy the port, you can set the +`--nodeport-addresses` flag for kube-proxy or the equivalent `nodePortAddresses` +field of the +[kube-proxy configuration file](/docs/reference/config-api/kube-proxy-config.v1alpha1/) +to particular IP block(s). + +This flag takes a comma-delimited list of IP blocks (e.g. `10.0.0.0/8`, `192.0.2.0/25`) to specify IP address ranges that kube-proxy should consider as local to this node. For example, if you start kube-proxy with the `--nodeport-addresses=127.0.0.0/8` flag, kube-proxy only selects the loopback interface for NodePort Services. The default for `--nodeport-addresses` is an empty list. This means that kube-proxy should consider all available network interfaces for NodePort. (That's also compatible with earlier Kubernetes releases). @@ -527,10 +537,12 @@ for NodePort use. Using a NodePort gives you the freedom to set up your own load balancing solution, to configure environments that are not fully supported by Kubernetes, or even -to just expose one or more nodes' IPs directly. +to expose one or more nodes' IPs directly. Note that this Service is visible as `:spec.ports[*].nodePort` -and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, would be filtered NodeIP(s).) +and `.spec.clusterIP:spec.ports[*].port`. +If the `--nodeport-addresses` flag for kube-proxy or the equivalent field +in the kube-proxy configuration file is set, `` would be filtered node IP(s). For example: @@ -628,6 +640,25 @@ is set to `false` on an existing Service with allocated node ports, those node p You must explicitly remove the `nodePorts` entry in every Service port to de-allocate those node ports. You must enable the `ServiceLBNodePortControl` feature gate to use this field. +#### Specifying class of load balancer implementation {#load-balancer-class} + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +Starting in v1.21, you can optionally specify the class of a load balancer implementation for +`LoadBalancer` type of Service by setting the field `spec.loadBalancerClass`. +By default, `spec.loadBalancerClass` is `nil` and a `LoadBalancer` type of Service uses +the cloud provider's default load balancer implementation. +If `spec.loadBalancerClass` is specified, it is assumed that a load balancer +implementation that matches the specified class is watching for Services. +Any default load balancer implementation (for example, the one provided by +the cloud provider) will ignore Services that have this field set. +`spec.loadBalancerClass` can be set on a Service of type `LoadBalancer` only. +Once set, it cannot be changed. +The value of `spec.loadBalancerClass` must be a label-style identifier, +with an optional prefix such as "`internal-vip`" or "`example.com/internal-vip`". +Unprefixed names are reserved for end-users. +You must enable the `ServiceLoadBalancerClass` feature gate to use this field. + #### Internal load balancer In a mixed environment it is sometimes necessary to route traffic from Services inside the same @@ -785,8 +816,7 @@ you can use the following annotations: ``` In the above example, if the Service contained three ports, `80`, `443`, and -`8443`, then `443` and `8443` would use the SSL certificate, but `80` would just -be proxied HTTP. +`8443`, then `443` and `8443` would use the SSL certificate, but `80` would be proxied HTTP. From Kubernetes v1.9 onwards you can use [predefined AWS SSL policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) with HTTPS or SSL listeners for your Services. To see which policies are available for use, you can use the `aws` command line tool: @@ -906,11 +936,18 @@ There are other annotations to manage Classic Elastic Load Balancers that are de # value. Defaults to 5, must be between 2 and 60 service.beta.kubernetes.io/aws-load-balancer-security-groups: "sg-53fae93f" - # A list of existing security groups to be added to ELB created. Unlike the annotation - # service.beta.kubernetes.io/aws-load-balancer-extra-security-groups, this replaces all other security groups previously assigned to the ELB. + # A list of existing security groups to be configured on the ELB created. Unlike the annotation + # service.beta.kubernetes.io/aws-load-balancer-extra-security-groups, this replaces all other security groups previously assigned to the ELB and also overrides the creation + # of a uniquely generated security group for this ELB. + # The first security group ID on this list is used as a source to permit incoming traffic to target worker nodes (service traffic and health checks). + # If multiple ELBs are configured with the same security group ID, only a single permit line will be added to the worker node security groups, that means if you delete any + # of those ELBs it will remove the single permit line and block access for all ELBs that shared the same security group ID. + # This can cause a cross-service outage if not used properly service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" - # A list of additional security groups to be added to the ELB + # A list of additional security groups to be added to the created ELB, this leaves the uniquely generated security group in place, this ensures that every ELB + # has a unique security group ID and a matching permit line to allow traffic to the target worker nodes (service traffic and health checks). + # Security groups defined here can be shared between services. service.beta.kubernetes.io/aws-load-balancer-target-node-labels: "ingress-gw,gw-name=public-api" # A comma separated list of key-value pairs which are used @@ -958,7 +995,7 @@ groups are modified with the following IP rules: | Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | |------|----------|---------|------------|---------------------| -| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | Subnet CIDR | kubernetes.io/rule/nlb/health=\ | | Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | | MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | @@ -1107,7 +1144,7 @@ but the current API requires it. ## Virtual IP implementation {#the-gory-details-of-virtual-ips} -The previous information should be sufficient for many people who just want to +The previous information should be sufficient for many people who want to use Services. However, there is a lot going on behind the scenes that may be worth understanding. @@ -1163,7 +1200,7 @@ rule kicks in, and redirects the packets to the proxy's own port. The "Service proxy" chooses a backend, and starts proxying traffic from the client to the backend. This means that Service owners can choose any port they want without risk of -collision. Clients can simply connect to an IP and port, without being aware +collision. Clients can connect to an IP and port, without being aware of which Pods they are actually accessing. #### iptables diff --git a/content/en/docs/concepts/services-networking/topology-aware-hints.md b/content/en/docs/concepts/services-networking/topology-aware-hints.md new file mode 100644 index 0000000000000..c2c15878ff72a --- /dev/null +++ b/content/en/docs/concepts/services-networking/topology-aware-hints.md @@ -0,0 +1,160 @@ +--- +reviewers: +- robscott +title: Topology Aware Hints +content_type: concept +weight: 45 +--- + + + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +_Topology Aware Hints_ enable topology aware routing by including suggestions +for how clients should consume endpoints. This approach adds metadata to enable +consumers of EndpointSlice and / or and Endpoints objects, so that traffic to +those network endpoints can be routed closer to where it originated. + +For example, you can route traffic within a locality to reduce +costs, or to improve network performance. + + + +## Motivation + +Kubernetes clusters are increasingly deployed in multi-zone environments. +_Topology Aware Hints_ provides a mechanism to help keep traffic within the zone +it originated from. This concept is commonly referred to as "Topology Aware +Routing". When calculating the endpoints for a {{< glossary_tooltip term_id="Service" >}}, +the EndpointSlice controller considers the topology (region and zone) of each endpoint +and populates the hints field to allocate it to a zone. +Cluster components such as the {{< glossary_tooltip term_id="kube-proxy" text="kube-proxy" >}} +can then consume those hints, and use them to influence how traffic to is routed +(favoring topologically closer endpoints). + +## Using Topology Aware Hints + +If you have [enabled](/docs/tasks/administer-cluster/enabling-topology-aware-hints) the +overall feature, you can activate Topology Aware Hints for a Service by setting the +`service.kubernetes.io/topology-aware-hints` annotation to `auto`. This tells +the EndpointSlice controller to set topology hints if it is deemed safe. +Importantly, this does not guarantee that hints will always be set. + +## How it works {#implementation} + +The functionality enabling this feature is split into two components: The +EndpointSlice controller and the kube-proxy. This section provides a high level overview +of how each component implements this feature. + +### EndpointSlice controller {#implementation-control-plane} + +The EndpointSlice controller is responsible for setting hints on EndpointSlices +when this feature is enabled. The controller allocates a proportional amount of +endpoints to each zone. This proportion is based on the +[allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) +CPU cores for nodes running in that zone. For example, if one zone had 2 CPU +cores and another zone only had 1 CPU core, the controller would allocated twice +as many endpoints to the zone with 2 CPU cores. + +The following example shows what an EndpointSlice looks like when hints have +been populated: + +```yaml +apiVersion: discovery.k8s.io/v1 +kind: EndpointSlice +metadata: + name: example-hints + labels: + kubernetes.io/service-name: example-svc +addressType: IPv4 +ports: + - name: http + protocol: TCP + port: 80 +endpoints: + - addresses: + - "10.1.2.3" + conditions: + ready: true + hostname: pod-1 + zone: zone-a + hints: + forZones: + - name: "zone-a" +``` + +### kube-proxy {#implementation-kube-proxy} + +The kube-proxy component filters the endpoints it routes to based on the hints set by +the EndpointSlice controller. In most cases, this means that the kube-proxy is able +to route traffic to endpoints in the same zone. Sometimes the controller allocates endpoints +from a different zone to ensure more even distribution of endpoints between zones. +This would result in some traffic being routed to other zones. + +## Safeguards + +The Kubernetes control plane and the kube-proxy on each node apply some +safeguard rules before using Topology Aware Hints. If these don't check out, +the kube-proxy selects endpoints from anywhere in your cluster, regardless of the +zone. + +1. **Insufficient number of endpoints:** If there are less endpoints than zones + in a cluster, the controller will not assign any hints. + +2. **Impossible to achieve balanced allocation:** In some cases, it will be + impossible to achieve a balanced allocation of endpoints among zones. For + example, if zone-a is twice as large as zone-b, but there are only 2 + endpoints, an endpoint allocated to zone-a may receive twice as much traffic + as zone-b. The controller does not assign hints if it can't get this "expected + overload" value below an acceptable threshold for each zone. Importantly this + is not based on real-time feedback. It is still possible for individual + endpoints to become overloaded. + +3. **One or more Nodes has insufficient information:** If any node does not have + a `topology.kubernetes.io/zone` label or is not reporting a value for + allocatable CPU, the control plane does not set any topology-aware endpoint + hints and so kube-proxy does not filter endpoints by zone. + +4. **One or more endpoints does not have a zone hint:** When this happens, + the kube-proxy assumes that a transition from or to Topology Aware Hints is + underway. Filtering endpoints for a Service in this state would be dangerous + so the kube-proxy falls back to using all endpoints. + +5. **A zone is not represented in hints:** If the kube-proxy is unable to find + at least one endpoint with a hint targeting the zone it is running in, it falls + to using endpoints from all zones. This is most likely to happen as you add + a new zone into your existing cluster. + +## Constraints + +* Topology Aware Hints are not used when either `externalTrafficPolicy` or + `internalTrafficPolicy` is set to `Local` on a Service. It is possible to use + both features in the same cluster on different Services, just not on the same + Service. + +* This approach will not work well for Services that have a large proportion of + traffic originating from a subset of zones. Instead this assumes that incoming + traffic will be roughly proportional to the capacity of the Nodes in each + zone. + +* The EndpointSlice controller ignores unready nodes as it calculates the + proportions of each zone. This could have unintended consequences if a large + portion of nodes are unready. + +* The EndpointSlice controller does not take into account {{< glossary_tooltip + text="tolerations" term_id="toleration" >}} when deploying calculating the + proportions of each zone. If the Pods backing a Service are limited to a + subset of Nodes in the cluster, this will not be taken into account. + +* This may not work well with autoscaling. For example, if a lot of traffic is + originating from a single zone, only the endpoints allocated to that zone will + be handling that traffic. That could result in {{< glossary_tooltip + text="Horizontal Pod Autoscaler" term_id="horizontal-pod-autoscaler" >}} + either not picking up on this event, or newly added pods starting in a + different zone. + +## {{% heading "whatsnext" %}} + +* Read about [enabling Topology Aware Hints](/docs/tasks/administer-cluster/enabling-topology-aware-hints/) +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/en/docs/concepts/storage/dynamic-provisioning.md b/content/en/docs/concepts/storage/dynamic-provisioning.md index bedd431dc972b..63263fb370890 100644 --- a/content/en/docs/concepts/storage/dynamic-provisioning.md +++ b/content/en/docs/concepts/storage/dynamic-provisioning.md @@ -80,7 +80,7 @@ parameters: Users request dynamically provisioned storage by including a storage class in their `PersistentVolumeClaim`. Before Kubernetes v1.6, this was done via the `volume.beta.kubernetes.io/storage-class` annotation. However, this annotation -is deprecated since v1.6. Users now can and should instead use the +is deprecated since v1.9. Users now can and should instead use the `storageClassName` field of the `PersistentVolumeClaim` object. The value of this field must match the name of a `StorageClass` configured by the administrator (see [below](#enabling-dynamic-provisioning)). diff --git a/content/en/docs/concepts/storage/ephemeral-volumes.md b/content/en/docs/concepts/storage/ephemeral-volumes.md index 9b0b9464f5c92..e76f76f4922f8 100644 --- a/content/en/docs/concepts/storage/ephemeral-volumes.md +++ b/content/en/docs/concepts/storage/ephemeral-volumes.md @@ -130,13 +130,16 @@ As a cluster administrator, you can use a [PodSecurityPolicy](/docs/concepts/pol ### Generic ephemeral volumes -{{< feature-state for_k8s_version="v1.19" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} This feature requires the `GenericEphemeralVolume` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be -enabled. Because this is an alpha feature, it is disabled by default. +enabled. Because this is a beta feature, it is enabled by default. + +Generic ephemeral volumes are similar to `emptyDir` volumes in the +sense that they provide a per-pod directory for scratch data that is +usually empty after provisioning. But they may also have additional +features: -Generic ephemeral volumes are similar to `emptyDir` volumes, just more -flexible: - Storage can be local or network-attached. - Volumes can have a fixed size that Pods are not able to exceed. - Volumes may have some initial data, depending on the driver and @@ -242,14 +245,16 @@ PVCs indirectly if they can create Pods, even if they do not have permission to create PVCs directly. Cluster administrators must be aware of this. If this does not fit their security model, they have two choices: -- Explicitly disable the feature through the feature gate, to avoid - being surprised when some future Kubernetes version enables it - by default. +- Explicitly disable the feature through the feature gate. - Use a [Pod Security Policy](/docs/concepts/policy/pod-security-policy/) where the - `volumes` list does not contain the `ephemeral` volume type. + `volumes` list does not contain the `ephemeral` volume type + (deprecated in Kubernetes 1.21). +- Use an [admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/) + which rejects objects like Pods that have a generic ephemeral + volume. -The normal namespace quota for PVCs in a namespace still applies, so +The normal [namespace quota for PVCs](/docs/concepts/policy/resource-quotas/#storage-resource-quota) still applies, so even if users are allowed to use this new mechanism, they cannot use it to circumvent other policies. diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index 971939d882acc..54e42bae9ee50 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -29,7 +29,7 @@ A _PersistentVolume_ (PV) is a piece of storage in the cluster that has been pro A _PersistentVolumeClaim_ (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see [AccessModes](#access-modes)). -While PersistentVolumeClaims allow a user to consume abstract storage resources, it is common that users need PersistentVolumes with varying properties, such as performance, for different problems. Cluster administrators need to be able to offer a variety of PersistentVolumes that differ in more ways than just size and access modes, without exposing users to the details of how those volumes are implemented. For these needs, there is the _StorageClass_ resource. +While PersistentVolumeClaims allow a user to consume abstract storage resources, it is common that users need PersistentVolumes with varying properties, such as performance, for different problems. Cluster administrators need to be able to offer a variety of PersistentVolumes that differ in more ways than size and access modes, without exposing users to the details of how those volumes are implemented. For these needs, there is the _StorageClass_ resource. See the [detailed walkthrough with working examples](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/). @@ -487,7 +487,7 @@ The following volume types support mount options: * VsphereVolume * iSCSI -Mount options are not validated, so mount will simply fail if one is invalid. +Mount options are not validated. If a mount option is invalid, the mount fails. In the past, the annotation `volume.beta.kubernetes.io/mount-options` was used instead of the `mountOptions` attribute. This annotation is still working; however, @@ -629,6 +629,11 @@ spec: PersistentVolumes binds are exclusive, and since PersistentVolumeClaims are namespaced objects, mounting claims with "Many" modes (`ROX`, `RWX`) is only possible within one namespace. +### PersistentVolumes typed `hostPath` + +A `hostPath` PersistentVolume uses a file or directory on the Node to emulate network-attached storage. +See [an example of `hostPath` typed volume](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume). + ## Raw Block Volume Support {{< feature-state for_k8s_version="v1.18" state="stable" >}} diff --git a/content/en/docs/concepts/storage/storage-capacity.md b/content/en/docs/concepts/storage/storage-capacity.md index d5993d4f59a13..13ae8ab722a31 100644 --- a/content/en/docs/concepts/storage/storage-capacity.md +++ b/content/en/docs/concepts/storage/storage-capacity.md @@ -17,6 +17,7 @@ which a pod runs: network-attached storage might not be accessible by all nodes, or storage is local to a node to begin with. {{< feature-state for_k8s_version="v1.19" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} This page describes how Kubernetes keeps track of storage capacity and how the scheduler uses that information to schedule Pods onto nodes @@ -103,34 +104,10 @@ to handle this automatically. ## Enabling storage capacity tracking -Storage capacity tracking is an *alpha feature* and only enabled when -the `CSIStorageCapacity` [feature -gate](/docs/reference/command-line-tools-reference/feature-gates/) and -the `storage.k8s.io/v1alpha1` {{< glossary_tooltip text="API group" term_id="api-group" >}} are enabled. For details on -that, see the `--feature-gates` and `--runtime-config` [kube-apiserver -parameters](/docs/reference/command-line-tools-reference/kube-apiserver/). - -A quick check -whether a Kubernetes cluster supports the feature is to list -CSIStorageCapacity objects with: -```shell -kubectl get csistoragecapacities --all-namespaces -``` - -If your cluster supports CSIStorageCapacity, the response is either a list of CSIStorageCapacity objects or: -``` -No resources found -``` - -If not supported, this error is printed instead: -``` -error: the server doesn't have a resource type "csistoragecapacities" -``` - -In addition to enabling the feature in the cluster, a CSI -driver also has to -support it. Please refer to the driver's documentation for -details. +Storage capacity tracking is a beta feature and enabled by default in +a Kubernetes cluster since Kubernetes 1.21. In addition to having the +feature enabled in the cluster, a CSI driver also has to support +it. Please refer to the driver's documentation for details. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index e6846c7ea4cd4..0abdf6b545eac 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -37,7 +37,7 @@ request a particular class. Administrators set the name and other parameters of a class when first creating StorageClass objects, and the objects cannot be updated once they are created. -Administrators can specify a default StorageClass just for PVCs that don't +Administrators can specify a default StorageClass only for PVCs that don't request any particular class to bind to: see the [PersistentVolumeClaim section](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) for details. @@ -149,7 +149,7 @@ mount options specified in the `mountOptions` field of the class. If the volume plugin does not support mount options but mount options are specified, provisioning will fail. Mount options are not validated on either -the class or PV, so mount of the PV will simply fail if one is invalid. +the class or PV. If a mount option is invalid, the PV mount fails. ### Volume Binding Mode @@ -569,7 +569,7 @@ parameters: `"http(s)://api-server:7860"` * `registry`: Quobyte registry to use to mount the volume. You can specify the registry as ``:`` pair or if you want to specify multiple - registries you just have to put a comma between them e.q. + registries, put a comma between them. ``:,:,:``. The host can be an IP address or if you have a working DNS you can also provide the DNS names. diff --git a/content/en/docs/concepts/storage/volume-health-monitoring.md b/content/en/docs/concepts/storage/volume-health-monitoring.md new file mode 100644 index 0000000000000..c5fb9c19296e7 --- /dev/null +++ b/content/en/docs/concepts/storage/volume-health-monitoring.md @@ -0,0 +1,35 @@ +--- +reviewers: +- jsafrane +- saad-ali +- msau42 +- xing-yang +title: Volume Health Monitoring +content_type: concept +--- + + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +{{< glossary_tooltip text="CSI" term_id="csi" >}} volume health monitoring allows CSI Drivers to detect abnormal volume conditions from the underlying storage systems and report them as events on {{< glossary_tooltip text="PVCs" term_id="persistent-volume-claim" >}} or {{< glossary_tooltip text="Pods" term_id="pod" >}}. + + + +## Volume health monitoring + +Kubernetes _volume health monitoring_ is part of how Kubernetes implements the Container Storage Interface (CSI). Volume health monitoring feature is implemented in two components: an External Health Monitor controller, and the {{< glossary_tooltip term_id="kubelet" text="kubelet" >}}. + +If a CSI Driver supports Volume Health Monitoring feature from the controller side, an event will be reported on the related {{< glossary_tooltip text="PersistentVolumeClaim" term_id="persistent-volume-claim" >}} (PVC) when an abnormal volume condition is detected on a CSI volume. + +The External Health Monitor {{< glossary_tooltip text="controller" term_id="controller" >}} also watches for node failure events. You can enable node failure monitoring by setting the `enable-node-watcher` flag to true. When the external health monitor detects a node failure event, the controller reports an Event will be reported on the PVC to indicate that pods using this PVC are on a failed node. + +If a CSI Driver supports Volume Health Monitoring feature from the node side, an Event will be reported on every Pod using the PVC when an abnormal volume condition is detected on a CSI volume. + +{{< note >}} +You need to enable the `CSIVolumeHealth` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to use this feature from the node side. +{{< /note >}} + +## {{% heading "whatsnext" %}} + +See the [CSI driver documentation](https://kubernetes-csi.github.io/docs/drivers.html) to find out which CSI drivers have implemented this feature. diff --git a/content/en/docs/concepts/storage/volume-pvc-datasource.md b/content/en/docs/concepts/storage/volume-pvc-datasource.md index ac8d16041da71..9e59560d1d460 100644 --- a/content/en/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/en/docs/concepts/storage/volume-pvc-datasource.md @@ -24,7 +24,7 @@ The {{< glossary_tooltip text="CSI" term_id="csi" >}} Volume Cloning feature add A Clone is defined as a duplicate of an existing Kubernetes Volume that can be consumed as any standard Volume would be. The only difference is that upon provisioning, rather than creating a "new" empty Volume, the back end device creates an exact duplicate of the specified Volume. -The implementation of cloning, from the perspective of the Kubernetes API, simply adds the ability to specify an existing PVC as a dataSource during new PVC creation. The source PVC must be bound and available (not in use). +The implementation of cloning, from the perspective of the Kubernetes API, adds the ability to specify an existing PVC as a dataSource during new PVC creation. The source PVC must be bound and available (not in use). Users need to be aware of the following when using this feature: @@ -40,7 +40,7 @@ Users need to be aware of the following when using this feature: ## Provisioning -Clones are provisioned just like any other PVC with the exception of adding a dataSource that references an existing PVC in the same namespace. +Clones are provisioned like any other PVC with the exception of adding a dataSource that references an existing PVC in the same namespace. ```yaml apiVersion: v1 diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index cc5ea38b4e325..d693e057efbdc 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -33,11 +33,11 @@ drivers, but the functionality is somewhat limited. Kubernetes supports many types of volumes. A {{< glossary_tooltip term_id="pod" text="Pod" >}} can use any number of volume types simultaneously. Ephemeral volume types have a lifetime of a pod, but persistent volumes exist beyond -the lifetime of a pod. Consequently, a volume outlives any containers -that run within the pod, and data is preserved across container restarts. When a -pod ceases to exist, the volume is destroyed. +the lifetime of a pod. When a pod ceases to exist, Kubernetes destroys ephemeral volumes; +however, Kubernetes does not destroy persistent volumes. +For any kind of volume in a given pod, data is preserved across container restarts. -At its core, a volume is just a directory, possibly with some data in it, which +At its core, a volume is a directory, possibly with some data in it, which is accessible to the containers in a pod. How that directory comes to be, the medium that backs it, and the contents of it are determined by the particular volume type used. @@ -106,6 +106,8 @@ spec: fsType: ext4 ``` +If the EBS volume is partitioned, you can supply the optional field `partition: ""` to specify which parition to mount on. + #### AWS EBS CSI migration {{< feature-state for_k8s_version="v1.17" state="beta" >}} @@ -150,14 +152,16 @@ For more details, see the [`azureFile` volume plugin](https://github.com/kuberne #### azureFile CSI migration -{{< feature-state for_k8s_version="v1.15" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} The `CSIMigration` feature for `azureFile`, when enabled, redirects all plugin operations from the existing in-tree plugin to the `file.csi.azure.com` Container Storage Interface (CSI) Driver. In order to use this feature, the [Azure File CSI Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) must be installed on the cluster and the `CSIMigration` and `CSIMigrationAzureFile` -alpha features must be enabled. +[feature gates](/docs/reference/command-line-tools-reference/feature-gates/) must be enabled. + +Azure File CSI driver does not support using same volume with different fsgroups, if Azurefile CSI migration is enabled, using same volume with different fsgroups won't be supported at all. ### cephfs @@ -206,14 +210,17 @@ spec: #### OpenStack CSI migration -{{< feature-state for_k8s_version="v1.18" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} -The `CSIMigration` feature for Cinder, when enabled, redirects all plugin operations -from the existing in-tree plugin to the `cinder.csi.openstack.org` Container -Storage Interface (CSI) Driver. In order to use this feature, the [Openstack Cinder CSI -Driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md) -must be installed on the cluster and the `CSIMigration` and `CSIMigrationOpenStack` -beta features must be enabled. +The `CSIMigration` feature for Cinder is enabled by default in Kubernetes 1.21. +It redirects all plugin operations from the existing in-tree plugin to the +`cinder.csi.openstack.org` Container Storage Interface (CSI) Driver. +[OpenStack Cinder CSI Driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) +must be installed on the cluster. +You can disable Cinder CSI migration for your cluster by setting the `CSIMigrationOpenStack` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to `false`. +If you disable the `CSIMigrationOpenStack` feature, the in-tree Cinder volume plugin takes responsibility +for all aspects of Cinder volume storage management. ### configMap @@ -929,7 +936,7 @@ GitHub project has [instructions](https://github.com/quobyte/quobyte-csi#quobyte ### rbd An `rbd` volume allows a -[Rados Block Device](https://ceph.com/docs/master/rbd/rbd/) (RBD) volume to mount into your +[Rados Block Device](https://docs.ceph.com/en/latest/rbd/) (RBD) volume to mount into your Pod. Unlike `emptyDir`, which is erased when a pod is removed, the contents of an `rbd` volume are preserved and the volume is unmounted. This means that a RBD volume can be pre-populated with data, and that data can diff --git a/content/en/docs/concepts/workloads/controllers/cron-jobs.md b/content/en/docs/concepts/workloads/controllers/cron-jobs.md index 481c6f5017559..7127924411dba 100644 --- a/content/en/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/en/docs/concepts/workloads/controllers/cron-jobs.md @@ -10,7 +10,7 @@ weight: 80 -{{< feature-state for_k8s_version="v1.8" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} A _CronJob_ creates {{< glossary_tooltip term_id="job" text="Jobs" >}} on a repeating schedule. @@ -90,6 +90,11 @@ If `startingDeadlineSeconds` is set to a large value or left unset (the default) and if `concurrencyPolicy` is set to `Allow`, the jobs will always run at least once. +{{< caution >}} +If `startingDeadlineSeconds` is set to a value less than 10 seconds, the CronJob may not be scheduled. This is because the CronJob controller checks things every 10 seconds. +{{< /caution >}} + + For every CronJob, the CronJob {{< glossary_tooltip term_id="controller" >}} checks how many schedules it missed in the duration from its last scheduled time until now. If there are more than 100 missed schedules, then it does not start the job and logs the error ```` @@ -111,12 +116,17 @@ be down for the same period as the previous example (`08:29:00` to `10:21:00`,) The CronJob is only responsible for creating Jobs that match its schedule, and the Job in turn is responsible for the management of the Pods it represents. -## New controller +## Controller version {#new-controller} -There's an alternative implementation of the CronJob controller, available as an alpha feature since Kubernetes 1.20. To select version 2 of the CronJob controller, pass the following [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) flag to the {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}. +Starting with Kubernetes v1.21 the second version of the CronJob controller +is the default implementation. To disable the default CronJob controller +and use the original CronJob controller instead, one pass the `CronJobControllerV2` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +flag to the {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}, +and set this flag to `false`. For example: ``` ---feature-gates="CronJobControllerV2=true" +--feature-gates="CronJobControllerV2=false" ``` @@ -128,4 +138,3 @@ documents the format of CronJob `schedule` fields. For instructions on creating and working with cron jobs, and for an example of CronJob manifest, see [Running automated tasks with cron jobs](/docs/tasks/job/automated-tasks-with-cron-jobs). - diff --git a/content/en/docs/concepts/workloads/controllers/daemonset.md b/content/en/docs/concepts/workloads/controllers/daemonset.md index 4dd784d905aed..5a98dcf7cbb24 100644 --- a/content/en/docs/concepts/workloads/controllers/daemonset.md +++ b/content/en/docs/concepts/workloads/controllers/daemonset.md @@ -147,8 +147,8 @@ the related features. | ---------------------------------------- | ---------- | ------- | ----------- | | `node.kubernetes.io/not-ready` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | | `node.kubernetes.io/unreachable` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | -| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | | -| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | | +| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | DaemonSet pods tolerate disk-pressure attributes by default scheduler. | +| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | DaemonSet pods tolerate memory-pressure attributes by default scheduler. | | `node.kubernetes.io/unschedulable` | NoSchedule | 1.12+ | DaemonSet pods tolerate unschedulable attributes by default scheduler. | | `node.kubernetes.io/network-unavailable` | NoSchedule | 1.12+ | DaemonSet pods, who uses host network, tolerate network-unavailable attributes by default scheduler. | diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index d8e7646aed1ba..22b95255c50bf 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -47,14 +47,14 @@ In this example: * A Deployment named `nginx-deployment` is created, indicated by the `.metadata.name` field. * The Deployment creates three replicated Pods, indicated by the `.spec.replicas` field. * The `.spec.selector` field defines how the Deployment finds which Pods to manage. - In this case, you simply select a label that is defined in the Pod template (`app: nginx`). + In this case, you select a label that is defined in the Pod template (`app: nginx`). However, more sophisticated selection rules are possible, as long as the Pod template itself satisfies the rule. {{< note >}} The `.spec.selector.matchLabels` field is a map of {key,value} pairs. A single {key,value} in the `matchLabels` map is equivalent to an element of `matchExpressions`, - whose key field is "key" the operator is "In", and the values array contains only "value". + whose `key` field is "key", the `operator` is "In", and the `values` array contains only "value". All of the requirements, from both `matchLabels` and `matchExpressions`, must be satisfied in order to match. {{< /note >}} @@ -171,13 +171,15 @@ Follow the steps given below to update your Deployment: ```shell kubectl --record deployment.apps/nginx-deployment set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1 ``` - or simply use the following command: - + + or use the following command: + ```shell kubectl set image deployment/nginx-deployment nginx=nginx:1.16.1 --record ``` - The output is similar to this: + The output is similar to: + ``` deployment.apps/nginx-deployment image updated ``` @@ -188,7 +190,8 @@ Follow the steps given below to update your Deployment: kubectl edit deployment.v1.apps/nginx-deployment ``` - The output is similar to this: + The output is similar to: + ``` deployment.apps/nginx-deployment edited ``` @@ -200,10 +203,13 @@ Follow the steps given below to update your Deployment: ``` The output is similar to this: + ``` Waiting for rollout to finish: 2 out of 3 new replicas have been updated... ``` + or + ``` deployment "nginx-deployment" successfully rolled out ``` @@ -212,10 +218,11 @@ Get more details on your updated Deployment: * After the rollout succeeds, you can view the Deployment by running `kubectl get deployments`. The output is similar to this: - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - nginx-deployment 3/3 3 3 36s - ``` + + ```ini + NAME READY UP-TO-DATE AVAILABLE AGE + nginx-deployment 3/3 3 3 36s + ``` * Run `kubectl get rs` to see that the Deployment updated the Pods by creating a new ReplicaSet and scaling it up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas. @@ -701,7 +708,7 @@ nginx-deployment-618515232 11 11 11 7m You can pause a Deployment before triggering one or more updates and then resume it. This allows you to apply multiple fixes in between pausing and resuming without triggering unnecessary rollouts. -* For example, with a Deployment that was just created: +* For example, with a Deployment that was created: Get the Deployment details: ```shell kubectl get deploy diff --git a/content/en/docs/concepts/workloads/controllers/job.md b/content/en/docs/concepts/workloads/controllers/job.md index 2c99a704d1efd..9a49e2afd794d 100644 --- a/content/en/docs/concepts/workloads/controllers/job.md +++ b/content/en/docs/concepts/workloads/controllers/job.md @@ -16,7 +16,8 @@ weight: 50 A Job creates one or more Pods and will continue to retry execution of the Pods until a specified number of them successfully terminate. As pods successfully complete, the Job tracks the successful completions. When a specified number of successful completions is reached, the task (ie, Job) is complete. Deleting a Job will clean up -the Pods it created. +the Pods it created. Suspending a Job will delete its active Pods until the Job +is resumed again. A simple case is to create one Job object in order to reliably run one Pod to completion. The Job object will start a new Pod if the first Pod fails or is deleted (for example @@ -99,7 +100,7 @@ pi-5rwd7 ``` Here, the selector is the same as the selector for the Job. The `--output=jsonpath` option specifies an expression -that just gets the name from each Pod in the returned list. +with the name from each Pod in the returned list. View the standard output of one of the pods: @@ -145,8 +146,8 @@ There are three main types of task suitable to run as a Job: - the Job is complete as soon as its Pod terminates successfully. 1. Parallel Jobs with a *fixed completion count*: - specify a non-zero positive value for `.spec.completions`. - - the Job represents the overall task, and is complete when there is one successful Pod for each value in the range 1 to `.spec.completions`. - - **not implemented yet:** Each Pod is passed a different index in the range 1 to `.spec.completions`. + - the Job represents the overall task, and is complete when there are `.spec.completions` successful Pods. + - when using `.spec.completionMode="Indexed"`, each Pod gets a different index in the range 0 to `.spec.completions-1`. 1. Parallel Jobs with a *work queue*: - do not specify `.spec.completions`, default to `.spec.parallelism`. - the Pods must coordinate amongst themselves or an external service to determine what each should work on. For example, a Pod might fetch a batch of up to N items from the work queue. @@ -166,7 +167,6 @@ a non-negative integer. For more information about how to make use of the different types of job, see the [job patterns](#job-patterns) section. - #### Controlling parallelism The requested parallelism (`.spec.parallelism`) can be set to any non-negative value. @@ -185,6 +185,33 @@ parallelism, for a variety of reasons: - The Job controller may throttle new Pod creation due to excessive previous pod failures in the same Job. - When a Pod is gracefully shut down, it takes time to stop. +### Completion mode + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +{{< note >}} +To be able to create Indexed Jobs, make sure to enable the `IndexedJob` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) +and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). +{{< /note >}} + +Jobs with _fixed completion count_ - that is, jobs that have non null +`.spec.completions` - can have a completion mode that is specified in `.spec.completionMode`: + +- `NonIndexed` (default): the Job is considered complete when there have been + `.spec.completions` successfully completed Pods. In other words, each Pod + completion is homologous to each other. Note that Jobs that have null + `.spec.completions` are implicitly `NonIndexed`. +- `Indexed`: the Pods of a Job get an associated completion index from 0 to + `.spec.completions-1`, available in the annotation `batch.kubernetes.io/job-completion-index`. + The Job is considered complete when there is one successfully completed Pod + for each index. For more information about how to use this mode, see + [Indexed Job for Parallel Processing with Static Work Assignment](/docs/tasks/job/indexed-parallel-processing-static/). + Note that, although rare, more than one Pod could be started for the same + index, but only one of them will count towards the completion count. + + ## Handling Pod and container failures A container in a Pod may fail for a number of reasons, such as because the process in it exited with @@ -348,12 +375,12 @@ The tradeoffs are: The tradeoffs are summarized here, with columns 2 to 4 corresponding to the above tradeoffs. The pattern names are also links to examples and more detailed description. -| Pattern | Single Job object | Fewer pods than work items? | Use app unmodified? | Works in Kube 1.1? | -| -------------------------------------------------------------------- |:-----------------:|:---------------------------:|:-------------------:|:-------------------:| -| [Job Template Expansion](/docs/tasks/job/parallel-processing-expansion/) | | | ✓ | ✓ | -| [Queue with Pod Per Work Item](/docs/tasks/job/coarse-parallel-processing-work-queue/) | ✓ | | sometimes | ✓ | -| [Queue with Variable Pod Count](/docs/tasks/job/fine-parallel-processing-work-queue/) | ✓ | ✓ | | ✓ | -| Single Job with Static Work Assignment | ✓ | | ✓ | | +| Pattern | Single Job object | Fewer pods than work items? | Use app unmodified? | +| ----------------------------------------- |:-----------------:|:---------------------------:|:-------------------:| +| [Queue with Pod Per Work Item] | ✓ | | sometimes | +| [Queue with Variable Pod Count] | ✓ | ✓ | | +| [Indexed Job with Static Work Assignment] | ✓ | | ✓ | +| [Job Template Expansion] | | | ✓ | When you specify completions with `.spec.completions`, each Pod created by the Job controller has an identical [`spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). This means that @@ -364,16 +391,121 @@ are different ways to arrange for pods to work on different things. This table shows the required settings for `.spec.parallelism` and `.spec.completions` for each of the patterns. Here, `W` is the number of work items. -| Pattern | `.spec.completions` | `.spec.parallelism` | -| -------------------------------------------------------------------- |:-------------------:|:--------------------:| -| [Job Template Expansion](/docs/tasks/job/parallel-processing-expansion/) | 1 | should be 1 | -| [Queue with Pod Per Work Item](/docs/tasks/job/coarse-parallel-processing-work-queue/) | W | any | -| [Queue with Variable Pod Count](/docs/tasks/job/fine-parallel-processing-work-queue/) | 1 | any | -| Single Job with Static Work Assignment | W | any | +| Pattern | `.spec.completions` | `.spec.parallelism` | +| ----------------------------------------- |:-------------------:|:--------------------:| +| [Queue with Pod Per Work Item] | W | any | +| [Queue with Variable Pod Count] | null | any | +| [Indexed Job with Static Work Assignment] | W | any | +| [Job Template Expansion] | 1 | should be 1 | +[Queue with Pod Per Work Item]: /docs/tasks/job/coarse-parallel-processing-work-queue/ +[Queue with Variable Pod Count]: /docs/tasks/job/fine-parallel-processing-work-queue/ +[Indexed Job with Static Work Assignment]: /docs/tasks/job/indexed-parallel-processing-static/ +[Job Template Expansion]: /docs/tasks/job/parallel-processing-expansion/ ## Advanced usage +### Suspending a Job + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +{{< note >}} +Suspending Jobs is available in Kubernetes versions 1.21 and above. You must +enable the `SuspendJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) +and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) +in order to use this feature. +{{< /note >}} + +When a Job is created, the Job controller will immediately begin creating Pods +to satisfy the Job's requirements and will continue to do so until the Job is +complete. However, you may want to temporarily suspend a Job's execution and +resume it later. To suspend a Job, you can update the `.spec.suspend` field of +the Job to true; later, when you want to resume it again, update it to false. +Creating a Job with `.spec.suspend` set to true will create it in the suspended +state. + +When a Job is resumed from suspension, its `.status.startTime` field will be +reset to the current time. This means that the `.spec.activeDeadlineSeconds` +timer will be stopped and reset when a Job is suspended and resumed. + +Remember that suspending a Job will delete all active Pods. When the Job is +suspended, your [Pods will be terminated](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) +with a SIGTERM signal. The Pod's graceful termination period will be honored and +your Pod must handle this signal in this period. This may involve saving +progress for later or undoing changes. Pods terminated this way will not count +towards the Job's `completions` count. + +An example Job definition in the suspended state can be like so: + +```shell +kubectl get job myjob -o yaml +``` + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: myjob +spec: + suspend: true + parallelism: 1 + completions: 5 + template: + spec: + ... +``` + +The Job's status can be used to determine if a Job is suspended or has been +suspended in the past: + +```shell +kubectl get jobs/myjob -o yaml +``` + +```json +apiVersion: batch/v1 +kind: Job +# .metadata and .spec omitted +status: + conditions: + - lastProbeTime: "2021-02-05T13:14:33Z" + lastTransitionTime: "2021-02-05T13:14:33Z" + status: "True" + type: Suspended + startTime: "2021-02-05T13:13:48Z" +``` + +The Job condition of type "Suspended" with status "True" means the Job is +suspended; the `lastTransitionTime` field can be used to determine how long the +Job has been suspended for. If the status of that condition is "False", then the +Job was previously suspended and is now running. If such a condition does not +exist in the Job's status, the Job has never been stopped. + +Events are also created when the Job is suspended and resumed: + +```shell +kubectl describe jobs/myjob +``` + +``` +Name: myjob +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 12m job-controller Created pod: myjob-hlrpl + Normal SuccessfulDelete 11m job-controller Deleted pod: myjob-hlrpl + Normal Suspended 11m job-controller Job suspended + Normal SuccessfulCreate 3s job-controller Created pod: myjob-jvb44 + Normal Resumed 3s job-controller Job resumed +``` + +The last four events, particularly the "Suspended" and "Resumed" events, are +directly a result of toggling the `.spec.suspend` field. In the time between +these two events, we see that no Pods were created, but Pod creation restarted +as soon as the Job was resumed. + ### Specifying your own Pod selector Normally, when you create a Job object, you do not specify `.spec.selector`. diff --git a/content/en/docs/concepts/workloads/controllers/replicaset.md b/content/en/docs/concepts/workloads/controllers/replicaset.md index e45d20c8f7d6c..f47f3be439c3d 100644 --- a/content/en/docs/concepts/workloads/controllers/replicaset.md +++ b/content/en/docs/concepts/workloads/controllers/replicaset.md @@ -222,7 +222,7 @@ In this manner, a ReplicaSet can own a non-homogenous set of Pods ## Writing a ReplicaSet manifest As with all other Kubernetes API objects, a ReplicaSet needs the `apiVersion`, `kind`, and `metadata` fields. -For ReplicaSets, the kind is always just ReplicaSet. +For ReplicaSets, the `kind` is always a ReplicaSet. In Kubernetes 1.9 the API version `apps/v1` on the ReplicaSet kind is the current version and is enabled by default. The API version `apps/v1beta2` is deprecated. Refer to the first lines of the `frontend.yaml` example for guidance. @@ -237,7 +237,7 @@ The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/#pod-temp required to have labels in place. In our `frontend.yaml` example we had one label: `tier: frontend`. Be careful not to overlap with the selectors of other controllers, lest they try to adopt this Pod. -For the template's [restart policy](/docs/concepts/workloads/Pods/pod-lifecycle/#restart-policy) field, +For the template's [restart policy](/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) field, `.spec.template.spec.restartPolicy`, the only allowed value is `Always`, which is the default. ### Pod Selector @@ -310,6 +310,48 @@ assuming that the number of replicas is not also changed). A ReplicaSet can be easily scaled up or down by simply updating the `.spec.replicas` field. The ReplicaSet controller ensures that a desired number of Pods with a matching label selector are available and operational. +When scaling down, the ReplicaSet controller chooses which pods to delete by sorting the available pods to +prioritize scaling down pods based on the following general algorithm: + 1. Pending (and unschedulable) pods are scaled down first + 2. If controller.kubernetes.io/pod-deletion-cost annotation is set, then + the pod with the lower value will come first. + 3. Pods on nodes with more replicas come before pods on nodes with fewer replicas. + 4. If the pods' creation times differ, the pod that was created more recently + comes before the older pod (the creation times are bucketed on an integer log scale + when the `LogarithmicScaleDown` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled) + +If all of the above match, then selection is random. + +### Pod deletion cost +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +Using the [`controller.kubernetes.io/pod-deletion-cost`](/docs/reference/labels-annotations-taints/#pod-deletion-cost) +annotation, users can set a preference regarding which pods to remove first when downscaling a ReplicaSet. + +The annotation should be set on the pod, the range is [-2147483647, 2147483647]. It represents the cost of +deleting a pod compared to other pods belonging to the same ReplicaSet. Pods with lower deletion +cost are preferred to be deleted before pods with higher deletion cost. + +The implicit value for this annotation for pods that don't set it is 0; negative values are permitted. +Invalid values will be rejected by the API server. + +This feature is alpha and disabled by default. You can enable it by setting the +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +`PodDeletionCost` in both kube-apiserver and kube-controller-manager. + +{{< note >}} +- This is honored on a best-effort basis, so it does not offer any guarantees on pod deletion order. +- Users should avoid updating the annotation frequently, such as updating it based on a metric value, + because doing so will generate a significant number of pod updates on the apiserver. +{{< /note >}} + +#### Example Use Case +The different pods of an application could have different utilization levels. On scale down, the application +may prefer to remove the pods with lower utilization. To avoid frequently updating the pods, the application +should update `controller.kubernetes.io/pod-deletion-cost` once before issuing a scale down (setting the +annotation to a value proportional to pod utilization level). This works if the application itself controls +the down scaling; for example, the driver pod of a Spark deployment. + ### ReplicaSet as a Horizontal Pod Autoscaler Target A ReplicaSet can also be a target for diff --git a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md index 36ae4a880a018..2b06539fd677d 100644 --- a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md @@ -54,7 +54,9 @@ Run the example job by downloading the example file and then running this comman ```shell kubectl apply -f https://k8s.io/examples/controllers/replication.yaml ``` + The output is similar to this: + ``` replicationcontroller/nginx created ``` @@ -64,7 +66,9 @@ Check on the status of the ReplicationController using this command: ```shell kubectl describe replicationcontrollers/nginx ``` + The output is similar to this: + ``` Name: nginx Namespace: default @@ -103,22 +107,23 @@ To list all the pods that belong to the ReplicationController in a machine reada pods=$(kubectl get pods --selector=app=nginx --output=jsonpath={.items..metadata.name}) echo $pods ``` + The output is similar to this: + ``` nginx-3ntk0 nginx-4ok8v nginx-qrm3m ``` Here, the selector is the same as the selector for the ReplicationController (seen in the `kubectl describe` output), and in a different form in `replication.yaml`. The `--output=jsonpath` option -specifies an expression that just gets the name from each pod in the returned list. - +specifies an expression with the name from each pod in the returned list. ## Writing a ReplicationController Spec As with all other Kubernetes config, a ReplicationController needs `apiVersion`, `kind`, and `metadata` fields. The name of a ReplicationController object must be a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). -For general information about working with config files, see [object management ](/docs/concepts/overview/working-with-objects/object-management/). +For general information about working with configuration files, see [object management](/docs/concepts/overview/working-with-objects/object-management/). A ReplicationController also needs a [`.spec` section](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). @@ -140,7 +145,7 @@ for example the [Kubelet](/docs/reference/command-line-tools-reference/kubelet/) The ReplicationController can itself have labels (`.metadata.labels`). Typically, you would set these the same as the `.spec.template.metadata.labels`; if `.metadata.labels` is not specified -then it defaults to `.spec.template.metadata.labels`. However, they are allowed to be +then it defaults to `.spec.template.metadata.labels`. However, they are allowed to be different, and the `.metadata.labels` do not affect the behavior of the ReplicationController. ### Pod Selector @@ -180,16 +185,16 @@ delete`](/docs/reference/generated/kubectl/kubectl-commands#delete). Kubectl wi for it to delete each pod before deleting the ReplicationController itself. If this kubectl command is interrupted, it can be restarted. -When using the REST API or go client library, you need to do the steps explicitly (scale replicas to +When using the REST API or Go client library, you need to do the steps explicitly (scale replicas to 0, wait for pod deletions, then delete the ReplicationController). -### Deleting just a ReplicationController +### Deleting only a ReplicationController You can delete a ReplicationController without affecting any of its pods. Using kubectl, specify the `--cascade=false` option to [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete). -When using the REST API or go client library, simply delete the ReplicationController object. +When using the REST API or Go client library, you can delete the ReplicationController object. Once the original is deleted, you can create a new ReplicationController to replace it. As long as the old and new `.spec.selector` are the same, then the new one will adopt the old pods. @@ -198,7 +203,7 @@ To update pods to a new spec in a controlled way, use a [rolling update](#rollin ### Isolating pods from a ReplicationController -Pods may be removed from a ReplicationController's target set by changing their labels. This technique may be used to remove pods from service for debugging, data recovery, etc. Pods that are removed in this way will be replaced automatically (assuming that the number of replicas is not also changed). +Pods may be removed from a ReplicationController's target set by changing their labels. This technique may be used to remove pods from service for debugging and data recovery. Pods that are removed in this way will be replaced automatically (assuming that the number of replicas is not also changed). ## Common usage patterns @@ -208,7 +213,7 @@ As mentioned above, whether you have 1 pod you want to keep running, or 1000, a ### Scaling -The ReplicationController makes it easy to scale the number of replicas up or down, either manually or by an auto-scaling control agent, by simply updating the `replicas` field. +The ReplicationController enables scaling the number of replicas up or down, either manually or by an auto-scaling control agent, by updating the `replicas` field. ### Rolling updates @@ -239,13 +244,12 @@ Pods created by a ReplicationController are intended to be fungible and semantic ## Responsibilities of the ReplicationController -The ReplicationController simply ensures that the desired number of pods matches its label selector and are operational. Currently, only terminated pods are excluded from its count. In the future, [readiness](https://issue.k8s.io/620) and other information available from the system may be taken into account, we may add more controls over the replacement policy, and we plan to emit events that could be used by external clients to implement arbitrarily sophisticated replacement and/or scale-down policies. +The ReplicationController ensures that the desired number of pods matches its label selector and are operational. Currently, only terminated pods are excluded from its count. In the future, [readiness](https://issue.k8s.io/620) and other information available from the system may be taken into account, we may add more controls over the replacement policy, and we plan to emit events that could be used by external clients to implement arbitrarily sophisticated replacement and/or scale-down policies. The ReplicationController is forever constrained to this narrow responsibility. It itself will not perform readiness nor liveness probes. Rather than performing auto-scaling, it is intended to be controlled by an external auto-scaler (as discussed in [#492](https://issue.k8s.io/492)), which would change its `replicas` field. We will not add scheduling policies (for example, [spreading](https://issue.k8s.io/367#issuecomment-48428019)) to the ReplicationController. Nor should it verify that the pods controlled match the currently specified template, as that would obstruct auto-sizing and other automated processes. Similarly, completion deadlines, ordering dependencies, configuration expansion, and other features belong elsewhere. We even plan to factor out the mechanism for bulk pod creation ([#170](https://issue.k8s.io/170)). The ReplicationController is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, scale) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](https://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing ReplicationControllers, auto-scalers, services, scheduling policies, canaries, etc. - ## API Object Replication controller is a top-level resource in the Kubernetes REST API. More details about the @@ -260,7 +264,6 @@ API object can be found at: It's mainly used by [Deployment](/docs/concepts/workloads/controllers/deployment/) as a mechanism to orchestrate pod creation, deletion and updates. Note that we recommend using Deployments instead of directly using Replica Sets, unless you require custom update orchestration or don't require updates at all. - ### Deployment (Recommended) [`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods. Deployments are recommended if you want this rolling update functionality because, they are declarative, server-side, and have additional features. @@ -284,5 +287,3 @@ safe to terminate when the machine is otherwise ready to be rebooted/shutdown. ## For more information Read [Run Stateless Application Deployment](/docs/tasks/run-application/run-stateless-application-deployment/). - - diff --git a/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md index 65527e5ce2865..266e72a79fd0f 100644 --- a/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -8,7 +8,7 @@ weight: 70 -{{< feature-state for_k8s_version="v1.12" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} The TTL controller provides a TTL (time to live) mechanism to limit the lifetime of resource objects that have finished execution. TTL controller only handles @@ -16,9 +16,9 @@ objects that have finished execution. TTL controller only handles and may be expanded to handle other resources that will finish execution, such as Pods and custom resources. -Alpha Disclaimer: this feature is currently alpha, and can be enabled with both kube-apiserver and kube-controller-manager +This feature is currently beta and enabled by default, and can be disabled via [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -`TTLAfterFinished`. +`TTLAfterFinished` in both kube-apiserver and kube-controller-manager. diff --git a/content/en/docs/concepts/workloads/pods/disruptions.md b/content/en/docs/concepts/workloads/pods/disruptions.md index 3d4248443d34a..cf0346e9395d0 100644 --- a/content/en/docs/concepts/workloads/pods/disruptions.md +++ b/content/en/docs/concepts/workloads/pods/disruptions.md @@ -75,7 +75,7 @@ Here are some ways to mitigate involuntary disruptions: and [stateful](/docs/tasks/run-application/run-replicated-stateful-application/) applications.) - For even higher availability when running replicated applications, spread applications across racks (using - [anti-affinity](/docs/user-guide/node-selection/#inter-pod-affinity-and-anti-affinity-beta-feature)) + [anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity)) or across zones (if using a [multi-zone cluster](/docs/setup/multiple-zones).) @@ -90,7 +90,7 @@ disruptions, if any, to expect. ## Pod disruption budgets -{{< feature-state for_k8s_version="v1.5" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} Kubernetes offers features to help you run highly available applications even when you introduce frequent voluntary disruptions. @@ -104,7 +104,7 @@ ensure that the number of replicas serving load never falls below a certain percentage of the total. Cluster managers and hosting providers should use tools which -respect PodDisruptionBudgets by calling the [Eviction API](/docs/tasks/administer-cluster/safely-drain-node/#the-eviction-api) +respect PodDisruptionBudgets by calling the [Eviction API](/docs/tasks/administer-cluster/safely-drain-node/#eviction-api) instead of directly deleting pods or deployments. For example, the `kubectl drain` subcommand lets you mark a node as going out of @@ -136,7 +136,7 @@ during application updates is configured in the spec for the specific workload r When a pod is evicted using the eviction API, it is gracefully [terminated](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination), honoring the -`terminationGracePeriodSeconds` setting in its [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core).) +`terminationGracePeriodSeconds` setting in its [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). ## PodDisruptionBudget example {#pdb-example} diff --git a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md index 153f2cf3ae938..011fa4adf4053 100644 --- a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md @@ -78,7 +78,7 @@ sharing](/docs/tasks/configure-pod-container/share-process-namespace/) so you can view processes in other containers. See [Debugging with Ephemeral Debug Container]( -/docs/tasks/debug-application-cluster/debug-running-pod/#debugging-with-ephemeral-debug-container) +/docs/tasks/debug-application-cluster/debug-running-pod/#ephemeral-container) for examples of troubleshooting using ephemeral containers. ## Ephemeral containers API @@ -103,7 +103,7 @@ the ephemeral container to add as an `EphemeralContainers` list: "apiVersion": "v1", "kind": "EphemeralContainers", "metadata": { - "name": "example-pod" + "name": "example-pod" }, "ephemeralContainers": [{ "command": [ diff --git a/content/en/docs/concepts/workloads/pods/init-containers.md b/content/en/docs/concepts/workloads/pods/init-containers.md index 363c38a39b91d..1c67e357e50a6 100644 --- a/content/en/docs/concepts/workloads/pods/init-containers.md +++ b/content/en/docs/concepts/workloads/pods/init-containers.md @@ -313,16 +313,16 @@ limit, the same as the scheduler. A Pod can restart, causing re-execution of init containers, for the following reasons: -* A user updates the Pod specification, causing the init container image to change. - Any changes to the init container image restarts the Pod. App container image - changes only restart the app container. * The Pod infrastructure container is restarted. This is uncommon and would have to be done by someone with root access to nodes. * All containers in a Pod are terminated while `restartPolicy` is set to Always, forcing a restart, and the init container completion record has been lost due to garbage collection. - +The Pod will not be restarted when the init container image is changed, or the +init container completion record has been lost due to garbage collection. This +applies for Kubernetes v1.20 and later. If you are using an earlier version of +Kubernetes, consult the documentation for the version you are using. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index df83f7c5f33a4..778bee6c02d6e 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -38,8 +38,7 @@ If a {{< glossary_tooltip term_id="node" >}} dies, the Pods scheduled to that no are [scheduled for deletion](#pod-garbage-collection) after a timeout period. Pods do not, by themselves, self-heal. If a Pod is scheduled to a -{{< glossary_tooltip text="node" term_id="node" >}} that then fails, -or if the scheduling operation itself fails, the Pod is deleted; likewise, a Pod won't +{{< glossary_tooltip text="node" term_id="node" >}} that then fails, the Pod is deleted; likewise, a Pod won't survive an eviction due to a lack of resources or Node maintenance. Kubernetes uses a higher-level abstraction, called a {{< glossary_tooltip term_id="controller" text="controller" >}}, that handles the work of @@ -313,7 +312,7 @@ can specify a readiness probe that checks an endpoint specific to readiness that is different from the liveness probe. {{< note >}} -If you just want to be able to drain requests when the Pod is deleted, you do not +If you want to be able to drain requests when the Pod is deleted, you do not necessarily need a readiness probe; on deletion, the Pod automatically puts itself into an unready state regardless of whether the readiness probe exists. The Pod remains in the unready state while it waits for the containers in the Pod diff --git a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 2e8a915c627b3..8e588da111a23 100644 --- a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -58,7 +58,7 @@ graph TB class zoneA,zoneB cluster; {{< /mermaid >}} -Instead of manually applying labels, you can also reuse the [well-known labels](/docs/reference/kubernetes-api/labels-annotations-taints/) that are created and populated automatically on most clusters. +Instead of manually applying labels, you can also reuse the [well-known labels](/docs/reference/labels-annotations-taints/) that are created and populated automatically on most clusters. ## Spread Constraints for Pods diff --git a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md index 251dfe2efed4f..acc55b2807992 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -83,16 +83,16 @@ This section shows how to generate the ### Setting build variables * Set `K8S_ROOT` to ``. -* Set `WEB_ROOT` to ``. +* Set `K8S_WEBROOT` to ``. * Set `K8S_RELEASE` to the version of the docs you want to build. - For example, if you want to build docs for Kubernetes 1.17, set `K8S_RELEASE` to 1.17. + For example, if you want to build docs for Kubernetes 1.17.0, set `K8S_RELEASE` to 1.17.0. For example: ```shell -export WEB_ROOT=$(GOPATH)/src/github.com//website +export K8S_WEBROOT=$(GOPATH)/src/github.com//website export K8S_ROOT=$(GOPATH)/src/k8s.io/kubernetes -export K8S_RELEASE=1.17 +export K8S_RELEASE=1.17.0 ``` ### Creating versioned directory and fetching Open API spec @@ -124,8 +124,8 @@ make copyapi Verify that these two files have been generated: ```shell -[ -e "/gen-apidocs/generators/build/index.html" ] && echo "index.html built" || echo "no index.html" -[ -e "/gen-apidocs/generators/build/navData.js" ] && echo "navData.js built" || echo "no navData.js" +[ -e "/gen-apidocs/build/index.html" ] && echo "index.html built" || echo "no index.html" +[ -e "/gen-apidocs/build/navData.js" ] && echo "navData.js built" || echo "no navData.js" ``` Go to the base of your local ``, and diff --git a/content/en/docs/contribute/localization.md b/content/en/docs/contribute/localization.md index 48c48b6ed3172..eafc241d40bca 100644 --- a/content/en/docs/contribute/localization.md +++ b/content/en/docs/contribute/localization.md @@ -4,7 +4,6 @@ content_type: concept approvers: - remyleone - rlenferink -- zacharysarah weight: 50 card: name: contribute @@ -61,7 +60,7 @@ Members of `@kubernetes/sig-docs-**-owners` can approve PRs that change content For each localization, The `@kubernetes/sig-docs-**-reviews` team automates review assignment for new PRs. -Members of `@kubernetes/website-maintainers` can create new development branches to coordinate translation efforts. +Members of `@kubernetes/website-maintainers` can create new localization branches to coordinate translation efforts. Members of `@kubernetes/website-milestone-maintainers` can use the `/milestone` [Prow command](https://prow.k8s.io/command-help) to assign a milestone to issues or PRs. @@ -205,14 +204,20 @@ To ensure accuracy in grammar and meaning, members of your localization team sho ### Source files -Localizations must be based on the English files from the most recent release, {{< latest-version >}}. +Localizations must be based on the English files from a specific release targeted by the localization team. +Each localization team can decide which release to target which is referred to as the _target version_ below. -To find source files for the most recent release: +To find source files for your target version: 1. Navigate to the Kubernetes website repository at https://github.com/kubernetes/website. -2. Select the `release-1.X` branch for the most recent version. +2. Select a branch for your target version from the following table: + Target version | Branch + -----|----- + Next version | [`dev-{{< skew nextMinorVersion >}}`](https://github.com/kubernetes/website/tree/dev-{{< skew nextMinorVersion >}}) + Latest version | [`master`](https://github.com/kubernetes/website/tree/master) + Previous version | `release-*.**` -The latest version is {{< latest-version >}}, so the most recent release branch is [`{{< release-branch >}}`](https://github.com/kubernetes/website/tree/{{< release-branch >}}). +The `master` branch holds content for the current release `{{< latest-version >}}`. The release team will create `{{< release-branch >}}` branch shortly before the next release: v{{< skew nextMinorVersion >}}. ### Site strings in i18n @@ -239,11 +244,11 @@ Some language teams have their own language-specific style guide and glossary. F ## Branching strategy -Because localization projects are highly collaborative efforts, we encourage teams to work in shared development branches. +Because localization projects are highly collaborative efforts, we encourage teams to work in shared localization branches. -To collaborate on a development branch: +To collaborate on a localization branch: -1. A team member of [@kubernetes/website-maintainers](https://github.com/orgs/kubernetes/teams/website-maintainers) opens a development branch from a source branch on https://github.com/kubernetes/website. +1. A team member of [@kubernetes/website-maintainers](https://github.com/orgs/kubernetes/teams/website-maintainers) opens a localization branch from a source branch on https://github.com/kubernetes/website. Your team approvers joined the `@kubernetes/website-maintainers` team when you [added your localization team](#add-your-localization-team-in-github) to the [`kubernetes/org`](https://github.com/kubernetes/org) repository. @@ -251,25 +256,31 @@ To collaborate on a development branch: `dev--.` - For example, an approver on a German localization team opens the development branch `dev-1.12-de.1` directly against the k/website repository, based on the source branch for Kubernetes v1.12. + For example, an approver on a German localization team opens the localization branch `dev-1.12-de.1` directly against the k/website repository, based on the source branch for Kubernetes v1.12. -2. Individual contributors open feature branches based on the development branch. +2. Individual contributors open feature branches based on the localization branch. For example, a German contributor opens a pull request with changes to `kubernetes:dev-1.12-de.1` from `username:local-branch-name`. -3. Approvers review and merge feature branches into the development branch. +3. Approvers review and merge feature branches into the localization branch. -4. Periodically, an approver merges the development branch to its source branch by opening and approving a new pull request. Be sure to squash the commits before approving the pull request. +4. Periodically, an approver merges the localization branch to its source branch by opening and approving a new pull request. Be sure to squash the commits before approving the pull request. -Repeat steps 1-4 as needed until the localization is complete. For example, subsequent German development branches would be: `dev-1.12-de.2`, `dev-1.12-de.3`, etc. +Repeat steps 1-4 as needed until the localization is complete. For example, subsequent German localization branches would be: `dev-1.12-de.2`, `dev-1.12-de.3`, etc. -Teams must merge localized content into the same release branch from which the content was sourced. For example, a development branch sourced from {{< release-branch >}} must be based on {{< release-branch >}}. +Teams must merge localized content into the same branch from which the content was sourced. -An approver must maintain a development branch by keeping it current with its source branch and resolving merge conflicts. The longer a development branch stays open, the more maintenance it typically requires. Consider periodically merging development branches and opening new ones, rather than maintaining one extremely long-running development branch. +For example: +- a localization branch sourced from `master` must be merged into `master`. +- a localization branch sourced from `release-1.19` must be merged into `release-1.19`. -At the beginning of every team milestone, it's helpful to open an issue [comparing upstream changes](https://github.com/kubernetes/website/blob/master/scripts/upstream_changes.py) between the previous development branch and the current development branch. +{{< note >}} +If your localization branch was created from `master` branch but it is not merged into `master` before new release branch `{{< release-branch >}}` created, merge it into both `master` and new release branch `{{< release-branch >}}`. To merge your localization branch into new release branch `{{< release-branch >}}`, you need to switch upstream branch of your localization branch to `{{< release-branch >}}`. +{{< /note >}} - While only approvers can open a new development branch and merge pull requests, anyone can open a pull request for a new development branch. No special permissions are required. +At the beginning of every team milestone, it's helpful to open an issue comparing upstream changes between the previous localization branch and the current localization branch. There are two scripts for comparing upstream changes. [`upstream_changes.py`](https://github.com/kubernetes/website/tree/master/scripts#upstream_changespy) is useful for checking the changes made to a specific file. And [`diff_l10n_branches.py`](https://github.com/kubernetes/website/tree/master/scripts#diff_l10n_branchespy) is useful for creating a list of outdated files for a specific localization branch. + +While only approvers can open a new localization branch and merge pull requests, anyone can open a pull request for a new localization branch. No special permissions are required. For more information about working from forks or directly from the repository, see ["fork and clone the repo"](#fork-and-clone-the-repo). @@ -290,5 +301,3 @@ Once a localization meets requirements for workflow and minimum output, SIG docs - Enable language selection on the website - Publicize the localization's availability through [Cloud Native Computing Foundation](https://www.cncf.io/about/) (CNCF) channels, including the [Kubernetes blog](https://kubernetes.io/blog/). - - diff --git a/content/en/docs/contribute/new-content/blogs-case-studies.md b/content/en/docs/contribute/new-content/blogs-case-studies.md index 66289256946af..8f2f6baaf7f36 100644 --- a/content/en/docs/contribute/new-content/blogs-case-studies.md +++ b/content/en/docs/contribute/new-content/blogs-case-studies.md @@ -39,8 +39,8 @@ Anyone can write a blog post and submit it for review. - Posts about other CNCF projects may or may not be on topic. We recommend asking the blog team before submitting a draft. - Many CNCF projects have their own blog. These are often a better choice for posts. There are times of major feature or milestone for a CNCF project that users would be interested in reading on the Kubernetes blog. - Blog posts should be original content - - The official blog is not for repurposing existing content from a third party as new content. - - The [license](https://github.com/kubernetes/website/blob/master/LICENSE) for the blog does allow commercial use of the content for commercial purposes, just not the other way around. + - The official blog is not for repurposing existing content from a third party as new content. + - The [license](https://github.com/kubernetes/website/blob/master/LICENSE) for the blog allows commercial use of the content for commercial purposes, but not the other way around. - Blog posts should aim to be future proof - Given the development velocity of the project, we want evergreen content that won't require updates to stay accurate for the reader. - It can be a better choice to add a tutorial or update official documentation than to write a high level overview as a blog post. diff --git a/content/en/docs/contribute/new-content/new-features.md b/content/en/docs/contribute/new-content/new-features.md index a0e36005628a9..268c447402e43 100644 --- a/content/en/docs/contribute/new-content/new-features.md +++ b/content/en/docs/contribute/new-content/new-features.md @@ -77,9 +77,8 @@ merged. Keep the following in mind: Alpha features. - It's hard to test (and therefore to document) a feature that hasn't been merged, or is at least considered feature-complete in its PR. -- Determining whether a feature needs documentation is a manual process and - just because a feature is not marked as needing docs doesn't mean it doesn't - need them. +- Determining whether a feature needs documentation is a manual process. Even if + a feature is not marked as needing docs, you may need to document the feature. ## For developers or other SIG members diff --git a/content/en/docs/contribute/new-content/open-a-pr.md b/content/en/docs/contribute/new-content/open-a-pr.md index d511360e2205f..a49bffb030843 100644 --- a/content/en/docs/contribute/new-content/open-a-pr.md +++ b/content/en/docs/contribute/new-content/open-a-pr.md @@ -123,8 +123,8 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi ```bash origin git@github.com:/website.git (fetch) origin git@github.com:/website.git (push) - upstream https://github.com/kubernetes/website (fetch) - upstream https://github.com/kubernetes/website (push) + upstream https://github.com/kubernetes/website.git (fetch) + upstream https://github.com/kubernetes/website.git (push) ``` 6. Fetch commits from your fork's `origin/master` and `kubernetes/website`'s `upstream/master`: diff --git a/content/en/docs/contribute/participate/roles-and-responsibilities.md b/content/en/docs/contribute/participate/roles-and-responsibilities.md index 8ebe7a1303c98..4e8632ac0bb88 100644 --- a/content/en/docs/contribute/participate/roles-and-responsibilities.md +++ b/content/en/docs/contribute/participate/roles-and-responsibilities.md @@ -52,7 +52,7 @@ Members can: {{< note >}} Using `/lgtm` triggers automation. If you want to provide non-binding - approval, simply commenting "LGTM" works too! + approval, commenting "LGTM" works too! {{< /note >}} - Use the `/hold` comment to block merging for a pull request diff --git a/content/en/docs/contribute/style/style-guide.md b/content/en/docs/contribute/style/style-guide.md index b4864dbabf095..26df0a85ac4b6 100644 --- a/content/en/docs/contribute/style/style-guide.md +++ b/content/en/docs/contribute/style/style-guide.md @@ -17,8 +17,6 @@ Changes to the style guide are made by SIG Docs as a group. To propose a change or addition, [add it to the agenda](https://docs.google.com/document/d/1ddHwLK3kUMX1wVFIwlksjTk0MsqitBnWPe1LRa1Rx5A/edit) for an upcoming SIG Docs meeting, and attend the meeting to participate in the discussion. - - {{< note >}} @@ -44,35 +42,38 @@ The English-language documentation uses U.S. English spelling and grammar. ### Use upper camel case for API objects -When you refer specifically to interacting with an API object, use [UpperCamelCase](https://en.wikipedia.org/wiki/Camel_case), also known as Pascal Case. When you are generally discussing an API object, use [sentence-style capitalization](https://docs.microsoft.com/en-us/style-guide/text-formatting/using-type/use-sentence-style-capitalization). +When you refer specifically to interacting with an API object, use [UpperCamelCase](https://en.wikipedia.org/wiki/Camel_case), also known as Pascal case. You may see different capitalization, such as "configMap", in the [API Reference](/docs/reference/kubernetes-api/). When writing general documentation, it's better to use upper camel case, calling it "ConfigMap" instead. + +When you are generally discussing an API object, use [sentence-style capitalization](https://docs.microsoft.com/en-us/style-guide/text-formatting/using-type/use-sentence-style-capitalization). + +You may use the word "resource", "API", or "object" to clarify a Kubernetes resource type in a sentence. -Don't split the API object name into separate words. For example, use -PodTemplateList, not Pod Template List. +Don't split an API object name into separate words. For example, use PodTemplateList, not Pod Template List. -Refer to API objects without saying "object," unless omitting "object" -leads to an awkward construction. +The following examples focus on capitalization. For more information about formatting API object names, review the related guidance on [Code Style](#code-style-inline-code). -{{< table caption = "Do and Don't - API objects" >}} +{{< table caption = "Do and Don't - Use Pascal case for API objects" >}} Do | Don't :--| :----- -The pod has two containers. | The Pod has two containers. -The HorizontalPodAutoscaler is responsible for ... | The HorizontalPodAutoscaler object is responsible for ... -A PodList is a list of pods. | A Pod List is a list of pods. -The two ContainerPorts ... | The two ContainerPort objects ... -The two ContainerStateTerminated objects ... | The two ContainerStateTerminateds ... +The HorizontalPodAutoscaler resource is responsible for ... | The Horizontal pod autoscaler is responsible for ... +A PodList object is a list of pods. | A Pod List object is a list of pods. +The Volume object contains a `hostPath` field. | The volume object contains a hostPath field. +Every ConfigMap object is part of a namespace. | Every configMap object is part of a namespace. +For managing confidential data, consider using the Secret API. | For managing confidential data, consider using the secret API. {{< /table >}} - ### Use angle brackets for placeholders Use angle brackets for placeholders. Tell the reader what a placeholder -represents. +represents, for example: -1. Display information about a pod: +Display information about a pod: - kubectl describe pod -n +```shell +kubectl describe pod -n +``` - If the namespace of the pod is `default`, you can omit the '-n' parameter. +If the namespace of the pod is `default`, you can omit the '-n' parameter. ### Use bold for user interface elements @@ -113,12 +114,12 @@ The copy is called a "fork". | The copy is called a "fork." ## Inline code formatting -### Use code style for inline code, commands, and API objects +### Use code style for inline code, commands, and API objects {#code-style-inline-code} For inline code in an HTML document, use the `` tag. In a Markdown document, use the backtick (`` ` ``). -{{< table caption = "Do and Don't - Use code style for inline code and commands" >}} +{{< table caption = "Do and Don't - Use code style for inline code, commands, and API objects" >}} Do | Don't :--| :----- The `kubectl run` command creates a `Pod`. | The "kubectl run" command creates a pod. @@ -186,7 +187,6 @@ Set the value of `image` to nginx:1.16. | Set the value of `image` to `nginx:1.1 Set the value of the `replicas` field to 2. | Set the value of the `replicas` field to `2`. {{< /table >}} - ## Code snippet formatting ### Don't include the command prompt @@ -197,17 +197,20 @@ Do | Don't kubectl get pods | $ kubectl get pods {{< /table >}} - ### Separate commands from output Verify that the pod is running on your chosen node: - kubectl get pods --output=wide +```shell +kubectl get pods --output=wide +``` The output is similar to this: - NAME READY STATUS RESTARTS AGE IP NODE - nginx 1/1 Running 0 13s 10.200.0.4 worker0 +```console +NAME READY STATUS RESTARTS AGE IP NODE +nginx 1/1 Running 0 13s 10.200.0.4 worker0 +``` ### Versioning Kubernetes examples @@ -260,17 +263,17 @@ Hugo [Shortcodes](https://gohugo.io/content-management/shortcodes) help create d 2. Use the following syntax to apply a style: - ``` - {{}} - No need to include a prefix; the shortcode automatically provides one. (Note:, Caution:, etc.) - {{}} - ``` + ```none + {{}} + No need to include a prefix; the shortcode automatically provides one. (Note:, Caution:, etc.) + {{}} + ``` -The output is: + The output is: -{{< note >}} -The prefix you choose is the same text for the tag. -{{< /note >}} + {{< note >}} + The prefix you choose is the same text for the tag. + {{< /note >}} ### Note @@ -400,7 +403,7 @@ The output is: 1. Prepare the batter, and pour into springform pan. - {{< note >}}Grease the pan for best results.{{< /note >}} + {{< note >}}Grease the pan for best results.{{< /note >}} 1. Bake for 20-25 minutes or until set. @@ -414,13 +417,14 @@ Shortcodes inside include statements will break the build. You must insert them {{}} ``` - ## Markdown elements ### Line breaks + Use a single newline to separate block-level content like headings, lists, images, code blocks, and others. The exception is second-level headings, where it should be two newlines. Second-level headings follow the first-level (or the title) without any preceding paragraphs or texts. A two line spacing helps visualize the overall structure of content in a code editor better. ### Headings + People accessing this documentation may use a screen reader or other assistive technology (AT). [Screen readers](https://en.wikipedia.org/wiki/Screen_reader) are linear output devices, they output items on a page one at a time. If there is a lot of content on a page, you can use headings to give the page an internal structure. A good page structure helps all readers to easily navigate the page or filter topics of interest. {{< table caption = "Do and Don't - Headings" >}} @@ -450,24 +454,24 @@ Write hyperlinks that give you context for the content they link to. For example Write Markdown-style links: `[link text](URL)`. For example: `[Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/#table-captions)` and the output is [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/#table-captions). | Write HTML-style links: `Visit our tutorial!`, or create links that open in new tabs or windows. For example: `[example website](https://example.com){target="_blank"}` {{< /table >}} - ### Lists + Group items in a list that are related to each other and need to appear in a specific order or to indicate a correlation between multiple items. When a screen reader comes across a list—whether it is an ordered or unordered list—it will be announced to the user that there is a group of list items. The user can then use the arrow keys to move up and down between the various items in the list. Website navigation links can also be marked up as list items; after all they are nothing but a group of related links. - - End each item in a list with a period if one or more items in the list are complete sentences. For the sake of consistency, normally either all items or none should be complete sentences. +- End each item in a list with a period if one or more items in the list are complete sentences. For the sake of consistency, normally either all items or none should be complete sentences. - {{< note >}} Ordered lists that are part of an incomplete introductory sentence can be in lowercase and punctuated as if each item was a part of the introductory sentence.{{< /note >}} + {{< note >}} Ordered lists that are part of an incomplete introductory sentence can be in lowercase and punctuated as if each item was a part of the introductory sentence.{{< /note >}} - - Use the number one (`1.`) for ordered lists. +- Use the number one (`1.`) for ordered lists. - - Use (`+`), (`*`), or (`-`) for unordered lists. +- Use (`+`), (`*`), or (`-`) for unordered lists. - - Leave a blank line after each list. +- Leave a blank line after each list. - - Indent nested lists with four spaces (for example, ⋅⋅⋅⋅). +- Indent nested lists with four spaces (for example, ⋅⋅⋅⋅). - - List items may consist of multiple paragraphs. Each subsequent paragraph in a list item must be indented by either four spaces or one tab. +- List items may consist of multiple paragraphs. Each subsequent paragraph in a list item must be indented by either four spaces or one tab. ### Tables @@ -487,7 +491,6 @@ Do | Don't This command starts a proxy. | This command will start a proxy. {{< /table >}} - Exception: Use future or past tense if it is required to convey the correct meaning. @@ -500,7 +503,6 @@ You can explore the API using a browser. | The API can be explored using a brows The YAML file specifies the replica count. | The replica count is specified in the YAML file. {{< /table >}} - Exception: Use passive voice if active voice leads to an awkward construction. ### Use simple and direct language @@ -524,7 +526,6 @@ You can create a Deployment by ... | We'll create a Deployment by ... In the preceding output, you can see... | In the preceding output, we can see ... {{< /table >}} - ### Avoid Latin phrases Prefer English terms over Latin abbreviations. @@ -536,7 +537,6 @@ For example, ... | e.g., ... That is, ...| i.e., ... {{< /table >}} - Exception: Use "etc." for et cetera. ## Patterns to avoid @@ -554,7 +554,6 @@ Kubernetes provides a new feature for ... | We provide a new feature ... This page teaches you how to use pods. | In this page, we are going to learn about pods. {{< /table >}} - ### Avoid jargon and idioms Some readers speak English as a second language. Avoid jargon and idioms to help them understand better. @@ -566,13 +565,16 @@ Internally, ... | Under the hood, ... Create a new cluster. | Turn up a new cluster. {{< /table >}} - ### Avoid statements about the future Avoid making promises or giving hints about the future. If you need to talk about an alpha feature, put the text under a heading that identifies it as alpha information. +An exception to this rule is documentation about announced deprecations +targeting removal in future versions. One example of documentation like this +is the [Deprecated API migration guide](/docs/reference/using-api/deprecation-guide/). + ### Avoid statements that will soon be out of date Avoid words like "currently" and "new." A feature that is new today might not be @@ -585,6 +587,18 @@ In version 1.4, ... | In the current version, ... The Federation feature provides ... | The new Federation feature provides ... {{< /table >}} +### Avoid words that assume a specific level of understanding + +Avoid words such as "just", "simply", "easy", "easily", or "simple". These words do not add value. + +{{< table caption = "Do and Don't - Avoid insensitive words" >}} +Do | Don't +:--| :----- +Include one command in ... | Include just one command in ... +Run the container ... | Simply run the container ... +You can remove ... | You can easily remove ... +These steps ... | These simple steps ... +{{< /table >}} ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/reference/_index.md b/content/en/docs/reference/_index.md index 588561810216b..a9d7ee3a9bdc2 100644 --- a/content/en/docs/reference/_index.md +++ b/content/en/docs/reference/_index.md @@ -6,8 +6,10 @@ linkTitle: "Reference" main_menu: true weight: 70 content_type: concept +no_list: true --- + This section of the Kubernetes documentation contains references. @@ -18,11 +20,15 @@ This section of the Kubernetes documentation contains references. ## API Reference +* [Glossary](/docs/reference/glossary/) - a comprehensive, standardized list of Kubernetes terminology + * [Kubernetes API Reference](/docs/reference/kubernetes-api/) * [One-page API Reference for Kubernetes {{< param "version" >}}](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) * [Using The Kubernetes API](/docs/reference/using-api/) - overview of the API for Kubernetes. +* [API access control](/docs/reference/access-authn-authz/) - details on how Kubernetes controls API access +* [Well-Known Labels, Annotations and Taints](/docs/reference/labels-annotations-taints/) -## API Client Libraries +## Officially supported client libraries To call the Kubernetes API from a programming language, you can use [client libraries](/docs/reference/using-api/client-libraries/). Officially supported @@ -32,22 +38,46 @@ client libraries: - [Kubernetes Python client library](https://github.com/kubernetes-client/python) - [Kubernetes Java client library](https://github.com/kubernetes-client/java) - [Kubernetes JavaScript client library](https://github.com/kubernetes-client/javascript) +- [Kubernetes Dotnet client library](https://github.com/kubernetes-client/csharp) +- [Kubernetes Haskell Client library](https://github.com/kubernetes-client/haskell) -## CLI Reference +## CLI * [kubectl](/docs/reference/kubectl/overview/) - Main CLI tool for running commands and managing Kubernetes clusters. * [JSONPath](/docs/reference/kubectl/jsonpath/) - Syntax guide for using [JSONPath expressions](https://goessner.net/articles/JsonPath/) with kubectl. * [kubeadm](/docs/reference/setup-tools/kubeadm/) - CLI tool to easily provision a secure Kubernetes cluster. -## Components Reference +## Components -* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - The primary *node agent* that runs on each node. The kubelet takes a set of PodSpecs and ensures that the described containers are running and healthy. -* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - REST API that validates and configures data for API objects such as pods, services, replication controllers. +* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - The + primary agent that runs on each node. The kubelet takes a set of PodSpecs + and ensures that the described containers are running and healthy. +* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - + REST API that validates and configures data for API objects such as pods, + services, replication controllers. * [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) - Daemon that embeds the core control loops shipped with Kubernetes. -* [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - Can do simple TCP/UDP stream forwarding or round-robin TCP/UDP forwarding across a set of back-ends. +* [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - Can + do simple TCP/UDP stream forwarding or round-robin TCP/UDP forwarding across + a set of back-ends. * [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) - Scheduler that manages availability, performance, and capacity. - * [kube-scheduler Policies](/docs/reference/scheduling/policies) - * [kube-scheduler Profiles](/docs/reference/scheduling/config#profiles) + + * [Scheduler Policies](/docs/reference/scheduling/policies) + * [Scheduler Profiles](/docs/reference/scheduling/config#profiles) + +## Config APIs + +This section hosts the documentation for "unpublished" APIs which are used to +configure kubernetes components or tools. Most of these APIs are not exposed +by the API server in a RESTful way though they are essential for a user or an +operator to use or manage a cluster. + +* [kubelet configuration (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) +* [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +* [kube-scheduler policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) +* [kube-proxy configuration (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/) +* [`audit.k8s.io/v1` API](/docs/reference/config-api/apiserver-audit.v1/) +* [Client authentication API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) +* [WebhookAdmission configuration (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/) ## Design Docs diff --git a/content/en/docs/reference/access-authn-authz/_index.md b/content/en/docs/reference/access-authn-authz/_index.md index d999e52bf5893..86d06488a8742 100644 --- a/content/en/docs/reference/access-authn-authz/_index.md +++ b/content/en/docs/reference/access-authn-authz/_index.md @@ -1,6 +1,6 @@ --- title: API Access Control -weight: 20 +weight: 15 no_list: true --- diff --git a/content/en/docs/reference/access-authn-authz/abac.md b/content/en/docs/reference/access-authn-authz/abac.md index 99fce41aba80c..3e2aea6b3623b 100644 --- a/content/en/docs/reference/access-authn-authz/abac.md +++ b/content/en/docs/reference/access-authn-authz/abac.md @@ -19,7 +19,7 @@ Attribute-based access control (ABAC) defines an access control paradigm whereby To enable `ABAC` mode, specify `--authorization-policy-file=SOME_FILENAME` and `--authorization-mode=ABAC` on startup. The file format is [one JSON object per line](https://jsonlines.org/). There -should be no enclosing list or map, just one map per line. +should be no enclosing list or map, only one map per line. Each line is a "policy object", where each such object is a map with the following properties: diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index 0cdcbf2f36dcc..581c218755750 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -94,7 +94,7 @@ kube-apiserver -h | grep enable-admission-plugins In the current version, the default ones are: ```shell -NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota +CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, LimitRanger, MutatingAdmissionWebhook, NamespaceLifecycle, PersistentVolumeClaimResize, Priority, ResourceQuota, RuntimeClass, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook ``` ## What does each admission controller do? @@ -105,22 +105,22 @@ NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority This admission controller allows all pods into the cluster. It is deprecated because its behavior is the same as if there were no admission controller at all. +### AlwaysDeny {#alwaysdeny} + +{{< feature-state for_k8s_version="v1.13" state="deprecated" >}} + +Rejects all requests. AlwaysDeny is DEPRECATED as it has no real meaning. + ### AlwaysPullImages {#alwayspullimages} This admission controller modifies every new Pod to force the image pull policy to Always. This is useful in a multitenant cluster so that users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission controller, once an image has been pulled to a -node, any pod from any user can use it simply by knowing the image's name (assuming the Pod is +node, any pod from any user can use it by knowing the image's name (assuming the Pod is scheduled onto the right node), without any authorization check against the image. When this admission controller is enabled, images are always pulled prior to starting containers, which means valid credentials are required. -### AlwaysDeny {#alwaysdeny} - -{{< feature-state for_k8s_version="v1.13" state="deprecated" >}} - -Rejects all requests. AlwaysDeny is DEPRECATED as no real meaning. - ### CertificateApproval {#certificateapproval} This admission controller observes requests to 'approve' CertificateSigningRequest resources and performs additional @@ -145,6 +145,22 @@ This admission controller observes creation of CertificateSigningRequest resourc of `kubernetes.io/kube-apiserver-client`. It rejects any request that specifies a 'group' (or 'organization attribute') of `system:masters`. +### DefaultIngressClass {#defaultingressclass} + +This admission controller observes creation of `Ingress` objects that do not request any specific +ingress class and automatically adds a default ingress class to them. This way, users that do not +request any special ingress class do not need to care about them at all and they will get the +default one. + +This admission controller does not do anything when no default ingress class is configured. When more than one ingress +class is marked as default, it rejects any creation of `Ingress` with an error and an administrator +must revisit their `IngressClass` objects and mark only one as default (with the annotation +"ingressclass.kubernetes.io/is-default-class"). This admission controller ignores any `Ingress` +updates; it acts only on creation. + +See the [ingress](/docs/concepts/services-networking/ingress/) documentation for more about ingress +classes and how to mark one as default. + ### DefaultStorageClass {#defaultstorageclass} This admission controller observes creation of `PersistentVolumeClaim` objects that do not request any specific storage class @@ -169,33 +185,46 @@ have toleration for taints `node.kubernetes.io/not-ready:NoExecute` or `node.kubernetes.io/unreachable:NoExecute`. The default value for `default-not-ready-toleration-seconds` and `default-unreachable-toleration-seconds` is 5 minutes. -### DenyExecOnPrivileged {#denyexeconprivileged} +### DenyEscalatingExec {#denyescalatingexec} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}} -This admission controller will intercept all requests to exec a command in a pod if that pod has a privileged container. +This admission controller will deny exec and attach commands to pods that run with escalated privileges that +allow host access. This includes pods that run as privileged, have access to the host IPC namespace, and +have access to the host PID namespace. -This functionality has been merged into [DenyEscalatingExec](#denyescalatingexec). -The DenyExecOnPrivileged admission plugin is deprecated and will be removed in v1.18. +The DenyEscalatingExec admission plugin is deprecated. Use of a policy-based admission plugin (like [PodSecurityPolicy](#podsecuritypolicy) or a custom admission plugin) which can be targeted at specific users or Namespaces and also protects against creation of overly privileged Pods is recommended instead. -### DenyEscalatingExec {#denyescalatingexec} +### DenyExecOnPrivileged {#denyexeconprivileged} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}} -This admission controller will deny exec and attach commands to pods that run with escalated privileges that -allow host access. This includes pods that run as privileged, have access to the host IPC namespace, and -have access to the host PID namespace. +This admission controller will intercept all requests to exec a command in a pod if that pod has a privileged container. -The DenyEscalatingExec admission plugin is deprecated and will be removed in v1.18. +This functionality has been merged into [DenyEscalatingExec](#denyescalatingexec). +The DenyExecOnPrivileged admission plugin is deprecated. Use of a policy-based admission plugin (like [PodSecurityPolicy](#podsecuritypolicy) or a custom admission plugin) which can be targeted at specific users or Namespaces and also protects against creation of overly privileged Pods is recommended instead. +### DenyServiceExternalIPs + +This admission controller rejects all net-new usage of the `Service` field `externalIPs`. This +feature is very powerful (allows network traffic interception) and not well +controlled by policy. When enabled, users of the cluster may not create new +Services which use `externalIPs` and may not add new values to `externalIPs` on +existing `Service` objects. Existing uses of `externalIPs` are not affected, +and users may remove values from `externalIPs` on existing `Service` objects. + +Most users do not need this feature at all, and cluster admins should consider disabling it. +Clusters that do need to use this feature should consider using some custom policy to manage usage +of it. + ### EventRateLimit {#eventratelimit} {{< feature-state for_k8s_version="v1.13" state="alpha" >}} @@ -462,8 +491,6 @@ and the [example of Limit Range](/docs/tasks/administer-cluster/manage-resources ### MutatingAdmissionWebhook {#mutatingadmissionwebhook} -{{< feature-state for_k8s_version="v1.13" state="beta" >}} - This admission controller calls any mutating webhooks which match the request. Matching webhooks are called in serial; each one may modify the object if it desires. @@ -474,7 +501,7 @@ If a webhook called by this has side effects (for example, decrementing quota) i webhooks or validating admission controllers will permit the request to finish. If you disable the MutatingAdmissionWebhook, you must also disable the -`MutatingWebhookConfiguration` object in the `admissionregistration.k8s.io/v1beta1` +`MutatingWebhookConfiguration` object in the `admissionregistration.k8s.io/v1` group/version via the `--runtime-config` flag (both are on by default in versions >= 1.9). @@ -486,8 +513,6 @@ versions >= 1.9). different when read back. * Setting originally unset fields is less likely to cause problems than overwriting fields set in the original request. Avoid doing the latter. - * This is a beta feature. Future versions of Kubernetes may restrict the types of - mutations these webhooks can make. * Future changes to control loops for built-in resources or third-party resources may break webhooks that work well today. Even when the webhook installation API is finalized, not all possible webhook behaviors will be guaranteed to be supported @@ -553,6 +578,37 @@ This admission controller also protects the access to `metadata.ownerReferences[ of an object, so that only users with "update" permission to the `finalizers` subresource of the referenced *owner* can change it. +### PersistentVolumeClaimResize {#persistentvolumeclaimresize} + +This admission controller implements additional validations for checking incoming `PersistentVolumeClaim` resize requests. + +{{< note >}} +Support for volume resizing is available as an alpha feature. Admins must set the feature gate `ExpandPersistentVolumes` +to `true` to enable resizing. +{{< /note >}} + +After enabling the `ExpandPersistentVolumes` feature gate, enabling the `PersistentVolumeClaimResize` admission +controller is recommended, too. This admission controller prevents resizing of all claims by default unless a claim's `StorageClass` + explicitly enables resizing by setting `allowVolumeExpansion` to `true`. + +For example: all `PersistentVolumeClaim`s created from the following `StorageClass` support volume expansion: + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gluster-vol-default +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://192.168.10.100:8080" + restuser: "" + secretNamespace: "" + secretName: "" +allowVolumeExpansion: true +``` + +For more information about persistent volume claims, see [PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). + ### PersistentVolumeLabel {#persistentvolumelabel} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}} @@ -569,6 +625,8 @@ Starting from 1.11, this admission controller is disabled by default. ### PodNodeSelector {#podnodeselector} +{{< feature-state for_k8s_version="v1.5" state="alpha" >}} + This admission controller defaults and limits what node selectors may be used within a namespace by reading a namespace annotation and a global configuration. #### Configuration File Format @@ -638,37 +696,6 @@ PodNodeSelector allows forcing pods to run on specifically labeled nodes. Also s admission plugin, which allows preventing pods from running on specifically tainted nodes. {{< /note >}} -### PersistentVolumeClaimResize {#persistentvolumeclaimresize} - -This admission controller implements additional validations for checking incoming `PersistentVolumeClaim` resize requests. - -{{< note >}} -Support for volume resizing is available as an alpha feature. Admins must set the feature gate `ExpandPersistentVolumes` -to `true` to enable resizing. -{{< /note >}} - -After enabling the `ExpandPersistentVolumes` feature gate, enabling the `PersistentVolumeClaimResize` admission -controller is recommended, too. This admission controller prevents resizing of all claims by default unless a claim's `StorageClass` - explicitly enables resizing by setting `allowVolumeExpansion` to `true`. - -For example: all `PersistentVolumeClaim`s created from the following `StorageClass` support volume expansion: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: gluster-vol-default -provisioner: kubernetes.io/glusterfs -parameters: - resturl: "http://192.168.10.100:8080" - restuser: "" - secretNamespace: "" - secretName: "" -allowVolumeExpansion: true -``` - -For more information about persistent volume claims, see [PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). - ### PodSecurityPolicy {#podsecuritypolicy} This admission controller acts on creation and modification of the pod and determines if it should be admitted @@ -679,6 +706,8 @@ for more information. ### PodTolerationRestriction {#podtolerationrestriction} +{{< feature-state for_k8s_version="v1.7" state="alpha" >}} + The PodTolerationRestriction admission controller verifies any conflict between tolerations of a pod and the tolerations of its namespace. It rejects the pod request if there is a conflict. It then merges the tolerations annotated on the namespace into the tolerations of the pod. @@ -766,8 +795,6 @@ This admission controller {{< glossary_tooltip text="taints" term_id="taint" >}} ### ValidatingAdmissionWebhook {#validatingadmissionwebhook} -{{< feature-state for_k8s_version="v1.13" state="beta" >}} - This admission controller calls any validating webhooks which match the request. Matching webhooks are called in parallel; if any of them rejects the request, the request fails. This admission controller only runs in the validation phase; the webhooks it calls may not @@ -778,7 +805,7 @@ If a webhook called by this has side effects (for example, decrementing quota) i webhooks or other validating admission controllers will permit the request to finish. If you disable the ValidatingAdmissionWebhook, you must also disable the -`ValidatingWebhookConfiguration` object in the `admissionregistration.k8s.io/v1beta1` +`ValidatingWebhookConfiguration` object in the `admissionregistration.k8s.io/v1` group/version via the `--runtime-config` flag (both are on by default in versions 1.9 and later). diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index c385a15fda8b6..d09ffa23a2395 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -68,8 +68,8 @@ when interpreted by an [authorizer](/docs/reference/access-authn-authz/authoriza You can enable multiple authentication methods at once. You should usually use at least two methods: - - service account tokens for service accounts - - at least one other method for user authentication. +- service account tokens for service accounts +- at least one other method for user authentication. When multiple authenticator modules are enabled, the first module to successfully authenticate the request short-circuits evaluation. @@ -99,7 +99,7 @@ openssl req -new -key jbeda.pem -out jbeda-csr.pem -subj "/CN=jbeda/O=app1/O=app This would create a CSR for the username "jbeda", belonging to two groups, "app1" and "app2". -See [Managing Certificates](/docs/concepts/cluster-administration/certificates/) for how to generate a client cert. +See [Managing Certificates](/docs/tasks/administer-cluster/certificates/) for how to generate a client cert. ### Static Token File @@ -205,8 +205,10 @@ spec: ``` Service account bearer tokens are perfectly valid to use outside the cluster and + can be used to create identities for long standing jobs that wish to talk to the -Kubernetes API. To manually create a service account, simply use the `kubectl +Kubernetes API. To manually create a service account, simply use the `kubectl` + create serviceaccount (NAME)` command. This creates a service account in the current namespace and an associated secret. @@ -320,14 +322,13 @@ sequenceDiagram 8. Once authorized the API server returns a response to `kubectl` 9. `kubectl` provides feedback to the user -Since all of the data needed to validate who you are is in the `id_token`, Kubernetes doesn't need to -"phone home" to the identity provider. In a model where every request is stateless this provides a very scalable -solution for authentication. It does offer a few challenges: -1. Kubernetes has no "web interface" to trigger the authentication process. There is no browser or interface to collect credentials which is why you need to authenticate to your identity provider first. -2. The `id_token` can't be revoked, it's like a certificate so it should be short-lived (only a few minutes) so it can be very annoying to have to get a new token every few minutes. -3. There's no easy way to authenticate to the Kubernetes dashboard without using the `kubectl proxy` command or a reverse proxy that injects the `id_token`. +Since all of the data needed to validate who you are is in the `id_token`, Kubernetes doesn't need to +"phone home" to the identity provider. In a model where every request is stateless this provides a very scalable solution for authentication. It does offer a few challenges: +1. Kubernetes has no "web interface" to trigger the authentication process. There is no browser or interface to collect credentials which is why you need to authenticate to your identity provider first. +2. The `id_token` can't be revoked, it's like a certificate so it should be short-lived (only a few minutes) so it can be very annoying to have to get a new token every few minutes. +3. To authenticate to the Kubernetes dashboard, you must use the `kubectl proxy` command or a reverse proxy that injects the `id_token`. #### Configuring the API Server @@ -422,12 +423,12 @@ users: refresh-token: q1bKLFOyUiosTfawzA93TzZIDzH2TNa2SMm0zEiPKTUwME6BkEo6Sql5yUWVBSWpKUGphaWpxSVAfekBOZbBhaEW+VlFUeVRGcluyVF5JT4+haZmPsluFoFu5XkpXk5BXq name: oidc ``` -Once your `id_token` expires, `kubectl` will attempt to refresh your `id_token` using your `refresh_token` and `client_secret` storing the new values for the `refresh_token` and `id_token` in your `.kube/config`. +Once your `id_token` expires, `kubectl` will attempt to refresh your `id_token` using your `refresh_token` and `client_secret` storing the new values for the `refresh_token` and `id_token` in your `.kube/config`. ##### Option 2 - Use the `--token` Option -The `kubectl` command lets you pass in a token using the `--token` option. Simply copy and paste the `id_token` into this option: +The `kubectl` command lets you pass in a token using the `--token` option. Copy and paste the `id_token` into this option: ```bash kubectl --token=eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczovL21sYi50cmVtb2xvLmxhbjo4MDQzL2F1dGgvaWRwL29pZGMiLCJhdWQiOiJrdWJlcm5ldGVzIiwiZXhwIjoxNDc0NTk2NjY5LCJqdGkiOiI2RDUzNXoxUEpFNjJOR3QxaWVyYm9RIiwiaWF0IjoxNDc0NTk2MzY5LCJuYmYiOjE0NzQ1OTYyNDksInN1YiI6Im13aW5kdSIsInVzZXJfcm9sZSI6WyJ1c2VycyIsIm5ldy1uYW1lc3BhY2Utdmlld2VyIl0sImVtYWlsIjoibXdpbmR1QG5vbW9yZWplZGkuY29tIn0.f2As579n9VNoaKzoF-dOQGmXkFKf1FMyNV0-va_B63jn-_n9LGSCca_6IVMP8pO-Zb4KvRqGyTP0r3HkHxYy5c81AnIh8ijarruczl-TK_yF5akjSTHFZD-0gRzlevBDiH8Q79NAr-ky0P4iIXS8lY9Vnjch5MF74Zx0c3alKJHJUnnpjIACByfF2SCaYzbWFMUNat-K1PaUk5-ujMBG7yYnr95xD-63n8CO8teGUAAEMx6zRjzfhnhbzX-ajwZLGwGUBT4WqjMs70-6a7_8gZmLZb2az1cZynkFRj2BaCkVT3A2RrjeEwZEtGXlMqKJ1_I2ulrOVsYx01_yD35-rw get nodes @@ -457,7 +458,7 @@ clusters: - name: name-of-remote-authn-service cluster: certificate-authority: /path/to/ca.pem # CA for verifying the remote service. - server: https://authn.example.com/authenticate # URL of remote service to query. Must use 'https'. + server: https://authn.example.com/authenticate # URL of remote service to query. 'https' recommended for production. # users refers to the API server's webhook configuration. users: @@ -733,7 +734,7 @@ to the impersonated user info. The following HTTP headers can be used to performing an impersonation request: * `Impersonate-User`: The username to act as. -* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User" +* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User". * `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` should be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1). {{< note >}} @@ -954,7 +955,8 @@ When run from an interactive session, `stdin` is exposed directly to the plugin. [TTY check](https://godoc.org/golang.org/x/crypto/ssh/terminal#IsTerminal) to determine if it's appropriate to prompt a user interactively. -To use bearer token credentials, the plugin returns a token in the status of the `ExecCredential`. +To use bearer token credentials, the plugin returns a token in the status of the +[`ExecCredential`](/docs/reference/config-api/client-authentication.v1beta1/#client-authentication-k8s-io-v1beta1-ExecCredential) ```json { @@ -1005,13 +1007,12 @@ RFC3339 timestamp. Presence or absence of an expiry has the following impact: } ``` -The plugin can optionally be called with an environment variable, `KUBERNETES_EXEC_INFO`, -that contains information about the cluster for which this plugin is obtaining -credentials. This information can be used to perform cluster-specific credential -acquisition logic. In order to enable this behavior, the `provideClusterInfo` field must -be set on the exec user field in the -[kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/). Here is an -example of the aforementioned `KUBERNETES_EXEC_INFO` environment variable. +To enable the exec plugin to obtain cluster-specific information, set `provideClusterInfo` on the `user.exec` +field in the [kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/). +The plugin will then be supplied with an environment variable, `KUBERNETES_EXEC_INFO`. +Information from this environment variable can be used to perform cluster-specific +credential acquisition logic. +The following `ExecCredential` manifest describes a cluster information sample. ```json { @@ -1030,3 +1031,8 @@ example of the aforementioned `KUBERNETES_EXEC_INFO` environment variable. } } ``` + +## {{% heading "whatsnext" %}} + +* Read the [client authentication reference (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) + diff --git a/content/en/docs/reference/access-authn-authz/authorization.md b/content/en/docs/reference/access-authn-authz/authorization.md index 04963e10eebee..af73a23350601 100644 --- a/content/en/docs/reference/access-authn-authz/authorization.md +++ b/content/en/docs/reference/access-authn-authz/authorization.md @@ -138,7 +138,7 @@ no exposes the API server authorization to external services. Other resources in this group include: -* `SubjectAccessReview` - Access review for any user, not just the current one. Useful for delegating authorization decisions to the API server. For example, the kubelet and extension API servers use this to determine user access to their own APIs. +* `SubjectAccessReview` - Access review for any user, not only the current one. Useful for delegating authorization decisions to the API server. For example, the kubelet and extension API servers use this to determine user access to their own APIs. * `LocalSubjectAccessReview` - Like `SubjectAccessReview` but restricted to a specific namespace. * `SelfSubjectRulesReview` - A review which returns the set of actions a user can perform within a namespace. Useful for users to quickly summarize their own access, or for UIs to hide/show actions. diff --git a/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md b/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md index 856669a5d8914..f128c14a7ab34 100644 --- a/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md +++ b/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md @@ -167,7 +167,7 @@ data: users: [] ``` -The `kubeconfig` member of the ConfigMap is a config file with just the cluster +The `kubeconfig` member of the ConfigMap is a config file with only the cluster information filled out. The key thing being communicated here is the `certificate-authority-data`. This may be expanded in the future. diff --git a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md index 6d05d0436ad1d..450bedf5413fe 100644 --- a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md +++ b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md @@ -196,8 +196,8 @@ O is the group that this user will belong to. You can refer to [RBAC](/docs/reference/access-authn-authz/rbac/) for standard groups. ```shell -openssl genrsa -out john.key 2048 -openssl req -new -key john.key -out john.csr +openssl genrsa -out myuser.key 2048 +openssl req -new -key myuser.key -out myuser.csr ``` ### Create CertificateSigningRequest @@ -209,7 +209,7 @@ cat < myuser.crt +``` + ### Create Role and RoleBinding With the certificate created. it is time to define the Role and RoleBinding for @@ -266,31 +272,30 @@ kubectl create role developer --verb=create --verb=get --verb=list --verb=update This is a sample command to create a RoleBinding for this new user: ```shell -kubectl create rolebinding developer-binding-john --role=developer --user=john +kubectl create rolebinding developer-binding-myuser --role=developer --user=myuser ``` ### Add to kubeconfig The last step is to add this user into the kubeconfig file. -This example assumes the key and certificate files are located at "/home/vagrant/work/". First, you need to add new credentials: ``` -kubectl config set-credentials john --client-key=/home/vagrant/work/john.key --client-certificate=/home/vagrant/work/john.crt --embed-certs=true +kubectl config set-credentials myuser --client-key=myuser.key --client-certificate=myuser.crt --embed-certs=true ``` Then, you need to add the context: ``` -kubectl config set-context john --cluster=kubernetes --user=john +kubectl config set-context myuser --cluster=kubernetes --user=myuser ``` -To test it, change the context to `john`: +To test it, change the context to `myuser`: ``` -kubectl config use-context john +kubectl config use-context myuser ``` ## Approval or rejection {#approval-rejection} @@ -363,7 +368,7 @@ status: It's usual to set `status.conditions.reason` to a machine-friendly reason code using TitleCase; this is a convention but you can set it to anything -you like. If you want to add a note just for human consumption, use the +you like. If you want to add a note for human consumption, use the `status.conditions.message` field. ## Signing @@ -438,4 +443,3 @@ status: * View the source code for the kube-controller-manager built in [approver](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/approver/sarapprove.go) * For details of X.509 itself, refer to [RFC 5280](https://tools.ietf.org/html/rfc5280#section-3.1) section 3.1 * For information on the syntax of PKCS#10 certificate signing requests, refer to [RFC 2986](https://tools.ietf.org/html/rfc2986) - diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index a3f4f9c5b9a70..26a7634c2a9b0 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -57,7 +57,7 @@ In the following, we describe how to quickly experiment with admission webhooks. ### Write an admission webhook server Please refer to the implementation of the [admission webhook -server](https://github.com/kubernetes/kubernetes/blob/v1.13.0/test/images/webhook/main.go) +server](https://github.com/kubernetes/kubernetes/blob/release-1.21/test/images/agnhost/webhook/main.go) that is validated in a Kubernetes e2e test. The webhook handles the `AdmissionReview` request sent by the apiservers, and sends back its decision as an `AdmissionReview` object in the same version it received. @@ -147,7 +147,7 @@ webhooks: {{< /tabs >}} The scope field specifies if only cluster-scoped resources ("Cluster") or namespace-scoped -resources ("Namespaced") will match this rule. "*" means that there are no scope restrictions. +resources ("Namespaced") will match this rule. "∗" means that there are no scope restrictions. {{< note >}} When using `clientConfig.service`, the server cert must be valid for @@ -225,7 +225,7 @@ plugins: {{< /tabs >}} For more information about `AdmissionConfiguration`, see the -[AdmissionConfiguration schema](https://github.com/kubernetes/kubernetes/blob/v1.17.0/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go#L27). +[AdmissionConfiguration (v1) reference](/docs/reference/config-api/apiserver-webhookadmission.v1/). See the [webhook configuration](#webhook-configuration) section for details about each config field. * In the kubeConfig file, provide the credentials: @@ -1093,8 +1093,8 @@ be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this -webhook. Such installs are likely to be non-portable, i.e., not easy -to turn up in a new cluster. +webhook. Such installations are likely to be non-portable or not readily +run in a new cluster. The scheme must be "https"; the URL must begin with "https://". diff --git a/content/en/docs/reference/access-authn-authz/rbac.md b/content/en/docs/reference/access-authn-authz/rbac.md index 4bc2b86dd692d..bd9aba1aa803e 100644 --- a/content/en/docs/reference/access-authn-authz/rbac.md +++ b/content/en/docs/reference/access-authn-authz/rbac.md @@ -219,7 +219,7 @@ the role that is granted to those subjects. 1. A binding to a different role is a fundamentally different binding. Requiring a binding to be deleted/recreated in order to change the `roleRef` ensures the full list of subjects in the binding is intended to be granted -the new role (as opposed to enabling accidentally modifying just the roleRef +the new role (as opposed to enabling or accidentally modifying only the roleRef without verifying all of the existing subjects should be given the new role's permissions). @@ -333,7 +333,7 @@ as a cluster administrator, include rules for custom resources, such as those se or aggregated API servers, to extend the default roles. For example: the following ClusterRoles let the "admin" and "edit" default roles manage the custom resource -named CronTab, whereas the "view" role can perform just read actions on CronTab resources. +named CronTab, whereas the "view" role can perform only read actions on CronTab resources. You can assume that CronTab objects are named `"crontabs"` in URLs as seen by the API server. ```yaml diff --git a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md index dd13dfeecc865..ea04f462b1a4a 100644 --- a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md @@ -1,23 +1,24 @@ --- reviewers: -- bprashanth -- davidopp -- lavalamp -- liggitt + - bprashanth + - davidopp + - lavalamp + - liggitt title: Managing Service Accounts content_type: concept weight: 50 --- + This is a Cluster Administrator guide to service accounts. You should be familiar with [configuring Kubernetes service accounts](/docs/tasks/configure-pod-container/configure-service-account/). -Support for authorization and user accounts is planned but incomplete. Sometimes +Support for authorization and user accounts is planned but incomplete. Sometimes incomplete features are referred to in order to better describe service accounts. - + ## User accounts versus service accounts Kubernetes distinguishes between the concept of a user account and a service account @@ -53,37 +54,51 @@ It is part of the API server. It acts synchronously to modify pods as they are created or updated. When this plugin is active (and it is by default on most distributions), then it does the following when a pod is created or modified: - 1. If the pod does not have a `ServiceAccount` set, it sets the `ServiceAccount` to `default`. - 1. It ensures that the `ServiceAccount` referenced by the pod exists, and otherwise rejects it. - 1. If the pod does not contain any `ImagePullSecrets`, then `ImagePullSecrets` of the `ServiceAccount` are added to the pod. - 1. It adds a `volume` to the pod which contains a token for API access. - 1. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`. +1. If the pod does not have a `ServiceAccount` set, it sets the `ServiceAccount` to `default`. +1. It ensures that the `ServiceAccount` referenced by the pod exists, and otherwise rejects it. +1. It adds a `volume` to the pod which contains a token for API access if neither the ServiceAccount `automountServiceAccountToken` nor the Pod's `automountServiceAccountToken` is set to `false`. +1. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`, if the previous step has created a volume for ServiceAccount token. +1. If the pod does not contain any `ImagePullSecrets`, then `ImagePullSecrets` of the `ServiceAccount` are added to the pod. #### Bound Service Account Token Volume -{{< feature-state for_k8s_version="v1.13" state="alpha" >}} - -When the `BoundServiceAccountTokenVolume` feature gate is enabled, the service account admission controller will -add a projected service account token volume instead of a secret volume. The service account token will expire after 1 hour by default or the pod is deleted. See more details about [projected volume](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). - -This feature depends on the `RootCAConfigMap` feature gate enabled which publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. -1. If the pod does not have a `serviceAccountName` set, it sets the - `serviceAccountName` to `default`. -1. It ensures that the `serviceAccountName` referenced by the pod exists, and - otherwise rejects it. -1. If the pod does not contain any `imagePullSecrets`, then `imagePullSecrets` - of the ServiceAccount referenced by `serviceAccountName` are added to the pod. -1. It adds a `volume` to the pod which contains a token for API access - if neither the ServiceAccount `automountServiceAccountToken` nor the Pod's - `automountServiceAccountToken` is set to `false`. -1. It adds a `volumeSource` to each container of the pod mounted at - `/var/run/secrets/kubernetes.io/serviceaccount`, if the previous step has - created a volume for ServiceAccount token. - -You can migrate a service account volume to a projected volume when -the `BoundServiceAccountTokenVolume` feature gate is enabled. -The service account token will expire after 1 hour or the pod is deleted. See -more details about -[projected volume](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). + +{{< feature-state for_k8s_version="v1.21" state="beta" >}} + +When the `BoundServiceAccountTokenVolume` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled, the service account admission controller will +add the following projected volume instead of a Secret-based volume for the non-expiring service account token created by Token Controller. + +```yaml +- name: kube-api-access- + projected: + defaultMode: 420 # 0644 + sources: + - serviceAccountToken: + expirationSeconds: 3600 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +``` + +This projected volume consists of three sources: + +1. A ServiceAccountToken acquired from kube-apiserver via TokenRequest API. It will expire after 1 hour by default or when the pod is deleted. It is bound to the pod and has kube-apiserver as the audience. +1. A ConfigMap containing a CA bundle used for verifying connections to the kube-apiserver. This feature depends on the `RootCAConfigMap` feature gate being enabled, which publishes a "kube-root-ca.crt" ConfigMap to every namespace. `RootCAConfigMap` is enabled by default in 1.20, and always enabled in 1.21+. +1. A DownwardAPI that references the namespace of the pod. + +See more details about [projected volumes](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). + +You can manually migrate a secret-based service account volume to a projected volume when +the `BoundServiceAccountTokenVolume` feature gate is not enabled by adding the above +projected volume to the pod spec. However, `RootCAConfigMap` needs to be enabled. ### Token Controller @@ -140,4 +155,3 @@ kubectl delete secret mysecretname A ServiceAccount controller manages the ServiceAccounts inside namespaces, and ensures a ServiceAccount named "default" exists in every active namespace. - diff --git a/content/en/docs/reference/command-line-tools-reference/_index.md b/content/en/docs/reference/command-line-tools-reference/_index.md index 6698fe66c00a7..8f9cf74a0ee9b 100644 --- a/content/en/docs/reference/command-line-tools-reference/_index.md +++ b/content/en/docs/reference/command-line-tools-reference/_index.md @@ -1,4 +1,4 @@ --- -title: Command line tools reference +title: Component tools weight: 60 --- diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index d9754afb5660c..849af9f00bf29 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -53,17 +53,16 @@ different Kubernetes components. | `APIPriorityAndFairness` | `false` | Alpha | 1.17 | 1.19 | | `APIPriorityAndFairness` | `true` | Beta | 1.20 | | | `APIResponseCompression` | `false` | Alpha | 1.7 | 1.15 | -| `APIResponseCompression` | `false` | Beta | 1.16 | | +| `APIResponseCompression` | `true` | Beta | 1.16 | | | `APIServerIdentity` | `false` | Alpha | 1.20 | | | `AllowInsecureBackendProxy` | `true` | Beta | 1.17 | | | `AnyVolumeDataSource` | `false` | Alpha | 1.18 | | | `AppArmor` | `true` | Beta | 1.4 | | | `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | | -| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | | +| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | 1.20 | +| `BoundServiceAccountTokenVolume` | `true` | Beta | 1.21 | | | `CPUManager` | `false` | Alpha | 1.8 | 1.9 | | `CPUManager` | `true` | Beta | 1.10 | | -| `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | -| `CRIContainerLogRotation` | `true` | Beta| 1.11 | | | `CSIInlineVolume` | `false` | Alpha | 1.15 | 1.15 | | `CSIInlineVolume` | `true` | Beta | 1.16 | - | | `CSIMigration` | `false` | Alpha | 1.14 | 1.16 | @@ -74,7 +73,8 @@ different Kubernetes components. | `CSIMigrationAzureDisk` | `false` | Alpha | 1.15 | 1.18 | | `CSIMigrationAzureDisk` | `false` | Beta | 1.19 | | | `CSIMigrationAzureDiskComplete` | `false` | Alpha | 1.17 | | -| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | | +| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | 1.19 | +| `CSIMigrationAzureFile` | `false` | Beta | 1.21 | | | `CSIMigrationAzureFileComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationGCE` | `false` | Alpha | 1.14 | 1.16 | | `CSIMigrationGCE` | `false` | Beta | 1.17 | | @@ -84,13 +84,17 @@ different Kubernetes components. | `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationvSphere` | `false` | Beta | 1.19 | | | `CSIMigrationvSphereComplete` | `false` | Beta | 1.19 | | -| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | | -| `CSIStorageCapacity` | `false` | Alpha | 1.19 | | +| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | 1.20 | +| `CSIServiceAccountToken` | `true` | Beta | 1.21 | | +| `CSIStorageCapacity` | `false` | Alpha | 1.19 | 1.20 | +| `CSIStorageCapacity` | `true` | Beta | 1.21 | | | `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | 1.19 | | `CSIVolumeFSGroupPolicy` | `true` | Beta | 1.20 | | +| `CSIVolumeHealth` | `false` | Alpha | 1.21 | | | `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | 1.19 | | `ConfigurableFSGroupPolicy` | `true` | Beta | 1.20 | | -| `CronJobControllerV2` | `false` | Alpha | 1.20 | | +| `CronJobControllerV2` | `false` | Alpha | 1.20 | 1.20 | +| `CronJobControllerV2` | `true` | Beta | 1.21 | | | `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | | `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | 1.19 | | `DefaultPodTopologySpread` | `true` | Beta | 1.20 | | @@ -98,14 +102,11 @@ different Kubernetes components. | `DevicePlugins` | `true` | Beta | 1.10 | | | `DisableAcceleratorUsageMetrics` | `false` | Alpha | 1.19 | 1.19 | | `DisableAcceleratorUsageMetrics` | `true` | Beta | 1.20 | | -| `DownwardAPIHugePages` | `false` | Alpha | 1.20 | | +| `DownwardAPIHugePages` | `false` | Alpha | 1.20 | 1.20 | +| `DownwardAPIHugePages` | `false` | Beta | 1.21 | | | `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | | `DynamicKubeletConfig` | `true` | Beta | 1.11 | | | `EfficientWatchResumption` | `false` | Alpha | 1.20 | | -| `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 | -| `EndpointSlice` | `false` | Beta | 1.17 | | -| `EndpointSlice` | `true` | Beta | 1.18 | | -| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | | | `EndpointSliceProxying` | `false` | Alpha | 1.18 | 1.18 | | `EndpointSliceProxying` | `true` | Beta | 1.19 | | | `EndpointSliceTerminatingCondition` | `false` | Alpha | 1.20 | | @@ -117,49 +118,51 @@ different Kubernetes components. | `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | | `ExpandPersistentVolumes` | `true` | Beta | 1.11 | | | `ExperimentalHostUserNamespaceDefaulting` | `false` | Beta | 1.5 | | -| `GenericEphemeralVolume` | `false` | Alpha | 1.19 | | -| `GracefulNodeShutdown` | `false` | Alpha | 1.20 | | +| `GenericEphemeralVolume` | `false` | Alpha | 1.19 | 1.20 | +| `GenericEphemeralVolume` | `true` | Beta | 1.21 | | +| `GracefulNodeShutdown` | `false` | Alpha | 1.20 | 1.20 | +| `GracefulNodeShutdown` | `true` | Beta | 1.21 | | | `HPAContainerMetrics` | `false` | Alpha | 1.20 | | | `HPAScaleToZero` | `false` | Alpha | 1.16 | | | `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | | `HugePageStorageMediumSize` | `true` | Beta | 1.19 | | -| `IPv6DualStack` | `false` | Alpha | 1.15 | | -| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 | -| `ImmutableEphemeralVolumes` | `true` | Beta | 1.19 | | +| `IndexedJob` | `false` | Alpha | 1.21 | | +| `IngressClassNamespacedParams` | `false` | Alpha | 1.21 | | +| `IPv6DualStack` | `false` | Alpha | 1.15 | 1.20 | +| `IPv6DualStack` | `true` | Beta | 1.21 | | | `KubeletCredentialProviders` | `false` | Alpha | 1.20 | | -| `KubeletPodResources` | `true` | Alpha | 1.13 | 1.14 | -| `KubeletPodResources` | `true` | Beta | 1.15 | | | `LegacyNodeRoleBehavior` | `false` | Alpha | 1.16 | 1.18 | -| `LegacyNodeRoleBehavior` | `true` | True | 1.19 | | +| `LegacyNodeRoleBehavior` | `true` | Beta | 1.19 | | | `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | | `LocalStorageCapacityIsolation` | `true` | Beta | 1.10 | | | `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha | 1.15 | | +| `LogarithmicScaleDown` | `false` | Alpha | 1.21 | | +| `KubeletPodResourcesGetAllocatable` | `false` | Alpha | 1.21 | | | `MixedProtocolLBService` | `false` | Alpha | 1.20 | | +| `NamespaceDefaultLabelName` | `true` | Beta | 1.21 | | +| `NetworkPolicyEndPort` | `false` | Alpha | 1.21 | | | `NodeDisruptionExclusion` | `false` | Alpha | 1.16 | 1.18 | | `NodeDisruptionExclusion` | `true` | Beta | 1.19 | | | `NonPreemptingPriority` | `false` | Alpha | 1.15 | 1.18 | | `NonPreemptingPriority` | `true` | Beta | 1.19 | | -| `PodDisruptionBudget` | `false` | Alpha | 1.3 | 1.4 | -| `PodDisruptionBudget` | `true` | Beta | 1.5 | | +| `PodDeletionCost` | `false` | Alpha | 1.21 | | +| `PodAffinityNamespaceSelector` | `false` | Alpha | 1.21 | | | `PodOverhead` | `false` | Alpha | 1.16 | 1.17 | | `PodOverhead` | `true` | Beta | 1.18 | | +| `ProbeTerminationGracePeriod` | `false` | Alpha | 1.21 | | | `ProcMountType` | `false` | Alpha | 1.12 | | | `QOSReserved` | `false` | Alpha | 1.11 | | | `RemainingItemCount` | `false` | Alpha | 1.15 | | | `RemoveSelfLink` | `false` | Alpha | 1.16 | 1.19 | | `RemoveSelfLink` | `true` | Beta | 1.20 | | -| `RootCAConfigMap` | `false` | Alpha | 1.13 | 1.19 | -| `RootCAConfigMap` | `true` | Beta | 1.20 | | | `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | | `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | | `RunAsGroup` | `true` | Beta | 1.14 | | -| `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 | -| `SCTPSupport` | `true` | Beta | 1.19 | | | `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | | `ServerSideApply` | `true` | Beta | 1.16 | | -| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | 1.19 | -| `ServiceAccountIssuerDiscovery` | `true` | Beta | 1.20 | | +| `ServiceInternalTrafficPolicy` | `false` | Alpha | 1.21 | | | `ServiceLBNodePortControl` | `false` | Alpha | 1.20 | | +| `ServiceLoadBalancerClass` | `false` | Alpha | 1.21 | | | `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 | | `ServiceNodeExclusion` | `true` | Beta | 1.19 | | | `ServiceTopology` | `false` | Alpha | 1.17 | | @@ -169,17 +172,20 @@ different Kubernetes components. | `StorageVersionAPI` | `false` | Alpha | 1.20 | | | `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 | | `StorageVersionHash` | `true` | Beta | 1.15 | | -| `Sysctls` | `true` | Beta | 1.11 | | +| `SuspendJob` | `false` | Alpha | 1.21 | | | `TTLAfterFinished` | `false` | Alpha | 1.12 | | +| `TopologyAwareHints` | `false` | Alpha | 1.21 | | | `TopologyManager` | `false` | Alpha | 1.16 | 1.17 | | `TopologyManager` | `true` | Beta | 1.18 | | | `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | | `ValidateProxyRedirects` | `true` | Beta | 1.14 | | +| `VolumeCapacityPriority` | `false` | Alpha | 1.21 | - | | `WarningHeaders` | `true` | Beta | 1.19 | | | `WinDSR` | `false` | Alpha | 1.14 | | | `WinOverlay` | `false` | Alpha | 1.14 | 1.19 | | `WinOverlay` | `true` | Beta | 1.20 | | -| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | | +| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | 1.20 | +| `WindowsEndpointSliceProxying` | `true` | Beta | 1.21 | | {{< /table >}} ### Feature gates for graduated or deprecated features @@ -200,6 +206,9 @@ different Kubernetes components. | `BlockVolume` | `false` | Alpha | 1.9 | 1.12 | | `BlockVolume` | `true` | Beta | 1.13 | 1.17 | | `BlockVolume` | `true` | GA | 1.18 | - | +| `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | +| `CRIContainerLogRotation` | `true` | Beta | 1.11 | 1.20 | +| `CRIContainerLogRotation` | `true` | GA | 1.21 | - | | `CSIBlockVolume` | `false` | Alpha | 1.11 | 1.13 | | `CSIBlockVolume` | `true` | Beta | 1.14 | 1.17 | | `CSIBlockVolume` | `true` | GA | 1.18 | - | @@ -242,8 +251,15 @@ different Kubernetes components. | `DynamicProvisioningScheduling` | - | Deprecated| 1.12 | - | | `DynamicVolumeProvisioning` | `true` | Alpha | 1.3 | 1.7 | | `DynamicVolumeProvisioning` | `true` | GA | 1.8 | - | +| `EnableAggregatedDiscoveryTimeout` | `true` | Deprecated | 1.16 | - | | `EnableEquivalenceClassCache` | `false` | Alpha | 1.8 | 1.14 | | `EnableEquivalenceClassCache` | - | Deprecated | 1.15 | - | +| `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 | +| `EndpointSlice` | `false` | Beta | 1.17 | 1.17 | +| `EndpointSlice` | `true` | Beta | 1.18 | 1.20 | +| `EndpointSlice` | `true` | GA | 1.21 | - | +| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | 1.20 | +| `EndpointSliceNodeName` | `true` | GA | 1.21 | - | | `ExperimentalCriticalPodAnnotation` | `false` | Alpha | 1.5 | 1.12 | | `ExperimentalCriticalPodAnnotation` | `false` | Deprecated | 1.13 | - | | `EvenPodsSpread` | `false` | Alpha | 1.16 | 1.17 | @@ -257,6 +273,9 @@ different Kubernetes components. | `HugePages` | `true` | GA | 1.14 | - | | `HyperVContainer` | `false` | Alpha | 1.10 | 1.19 | | `HyperVContainer` | `false` | Deprecated | 1.20 | - | +| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 | +| `ImmutableEphemeralVolumes` | `true` | Beta | 1.19 | 1.20 | +| `ImmutableEphemeralVolumes` | `true` | GA | 1.21 | | | `Initializers` | `false` | Alpha | 1.7 | 1.13 | | `Initializers` | - | Deprecated | 1.14 | - | | `KubeletConfigFile` | `false` | Alpha | 1.8 | 1.9 | @@ -280,6 +299,9 @@ different Kubernetes components. | `PersistentLocalVolumes` | `false` | Alpha | 1.7 | 1.9 | | `PersistentLocalVolumes` | `true` | Beta | 1.10 | 1.13 | | `PersistentLocalVolumes` | `true` | GA | 1.14 | - | +| `PodDisruptionBudget` | `false` | Alpha | 1.3 | 1.4 | +| `PodDisruptionBudget` | `true` | Beta | 1.5 | 1.20 | +| `PodDisruptionBudget` | `true` | GA | 1.21 | - | | `PodPriority` | `false` | Alpha | 1.8 | 1.10 | | `PodPriority` | `true` | Beta | 1.11 | 1.13 | | `PodPriority` | `true` | GA | 1.14 | - | @@ -290,11 +312,15 @@ different Kubernetes components. | `PodShareProcessNamespace` | `true` | Beta | 1.12 | 1.16 | | `PodShareProcessNamespace` | `true` | GA | 1.17 | - | | `RequestManagement` | `false` | Alpha | 1.15 | 1.16 | +| `RequestManagement` | - | Derecated | 1.17 | - | | `ResourceLimitsPriorityFunction` | `false` | Alpha | 1.9 | 1.18 | | `ResourceLimitsPriorityFunction` | - | Deprecated | 1.19 | - | | `ResourceQuotaScopeSelectors` | `false` | Alpha | 1.11 | 1.11 | | `ResourceQuotaScopeSelectors` | `true` | Beta | 1.12 | 1.16 | | `ResourceQuotaScopeSelectors` | `true` | GA | 1.17 | - | +| `RootCAConfigMap` | `false` | Alpha | 1.13 | 1.19 | +| `RootCAConfigMap` | `true` | Beta | 1.20 | 1.20 | +| `RootCAConfigMap` | `true` | GA | 1.21 | - | | `RotateKubeletClientCertificate` | `true` | Beta | 1.8 | 1.18 | | `RotateKubeletClientCertificate` | `true` | GA | 1.19 | - | | `RuntimeClass` | `false` | Alpha | 1.12 | 1.13 | @@ -306,8 +332,11 @@ different Kubernetes components. | `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 | | `SCTPSupport` | `true` | Beta | 1.19 | 1.19 | | `SCTPSupport` | `true` | GA | 1.20 | - | +| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | 1.19 | +| `ServiceAccountIssuerDiscovery` | `true` | Beta | 1.20 | 1.20 | +| `ServiceAccountIssuerDiscovery` | `true` | GA | 1.21 | - | | `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 | -| `ServiceAppProtocol` | `true` | Beta | 1.19 | | +| `ServiceAppProtocol` | `true` | Beta | 1.19 | 1.19 | | `ServiceAppProtocol` | `true` | GA | 1.20 | - | | `ServiceLoadBalancerFinalizer` | `false` | Alpha | 1.15 | 1.15 | | `ServiceLoadBalancerFinalizer` | `true` | Beta | 1.16 | 1.16 | @@ -319,7 +348,7 @@ different Kubernetes components. | `StorageObjectInUseProtection` | `true` | GA | 1.11 | - | | `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 | | `StreamingProxyRedirects` | `true` | Beta | 1.6 | 1.18 | -| `StreamingProxyRedirects` | - | Deprecated| 1.19 | - | +| `StreamingProxyRedirects` | - | GA | 1.19 | - | | `SupportIPVSProxyMode` | `false` | Alpha | 1.8 | 1.8 | | `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 | | `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 | @@ -330,6 +359,8 @@ different Kubernetes components. | `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 | | `SupportPodPidsLimit` | `true` | Beta | 1.14 | 1.19 | | `SupportPodPidsLimit` | `true` | GA | 1.20 | - | +| `Sysctls` | `true` | Beta | 1.11 | 1.20 | +| `Sysctls` | `true` | GA | 1.21 | | | `TaintBasedEvictions` | `false` | Alpha | 1.6 | 1.12 | | `TaintBasedEvictions` | `true` | Beta | 1.13 | 1.17 | | `TaintBasedEvictions` | `true` | GA | 1.18 | - | @@ -342,16 +373,16 @@ different Kubernetes components. | `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | | `TokenRequestProjection` | `true` | Beta | 1.12 | 1.19 | | `TokenRequestProjection` | `true` | GA | 1.20 | - | -| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 | -| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | 1.19 | -| `VolumeSnapshotDataSource` | `true` | GA | 1.20 | - | | `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 | | `VolumePVCDataSource` | `true` | Beta | 1.16 | 1.17 | | `VolumePVCDataSource` | `true` | GA | 1.18 | - | | `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | | `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | | `VolumeScheduling` | `true` | GA | 1.13 | - | -| `VolumeSubpath` | `true` | GA | 1.13 | - | +| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 | +| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | 1.19 | +| `VolumeSnapshotDataSource` | `true` | GA | 1.20 | - | +| `VolumeSubpath` | `true` | GA | 1.10 | - | | `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | 1.14 | | `VolumeSubpathEnvExpansion` | `true` | Beta | 1.15 | 1.16 | | `VolumeSubpathEnvExpansion` | `true` | GA | 1.17 | - | @@ -417,7 +448,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `APIServerIdentity`: Assign each API server an ID in a cluster. - `Accelerators`: Enable Nvidia GPU support when using Docker - `AdvancedAuditing`: Enable [advanced auditing](/docs/tasks/debug-application-cluster/audit/#advanced-audit) -- `AffinityInAnnotations`(*deprecated*): Enable setting +- `AffinityInAnnotations`: Enable setting [Pod affinity or anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). - `AllowExtTrafficLocalEndpoints`: Enable a service to route external requests to node local endpoints. - `AllowInsecureBackendProxy`: Enable the users to skip TLS verification of @@ -443,7 +474,9 @@ Each feature gate is designed for enabling/disabling a specific feature: for more details. - `CPUManager`: Enable container level CPU affinity support, see [CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/). -- `CRIContainerLogRotation`: Enable container log rotation for cri container runtime. +- `CRIContainerLogRotation`: Enable container log rotation for CRI container runtime. The default max size of a log file is 10MB and the + default max number of log files allowed for a container is 5. These values can be configured in the kubelet config. + See the [logging at node level](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level) documentation for more details. - `CSIBlockVolume`: Enable external CSI volume drivers to support block storage. See the [`csi` raw block volume support](/docs/concepts/storage/volumes/#csi-raw-block-volume-support) documentation for more details. @@ -524,6 +557,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIVolumeFSGroupPolicy`: Allows CSIDrivers to use the `fsGroupPolicy` field. This field controls whether volumes created by a CSIDriver support volume ownership and permission modifications when these volumes are mounted. +- `CSIVolumeHealth`: Enable support for CSI volume health monitoring on node. - `ConfigurableFSGroupPolicy`: Allows user to configure volume permission change policy for fsGroups when mounting a volume in a Pod. See [Configure volume permission and ownership change policy for Pods](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods) @@ -545,10 +579,9 @@ Each feature gate is designed for enabling/disabling a specific feature: [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - `CustomResourceWebhookConversion`: Enable webhook-based conversion on resources created from [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - troubleshoot a running Pod. - `DefaultPodTopologySpread`: Enables the use of `PodTopologySpread` scheduling plugin to do [default spreading](/docs/concepts/workloads/pods/pod-topology-spread-constraints/#internal-default-constraints). -- `DevicePlugins`: Enable the [device-plugins](/docs/concepts/cluster-administration/device-plugins/) +- `DevicePlugins`: Enable the [device-plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) based resource provisioning on nodes. - `DisableAcceleratorUsageMetrics`: [Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics). @@ -556,18 +589,18 @@ Each feature gate is designed for enabling/disabling a specific feature: [downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information). - `DryRun`: Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests so that validation, merging, and mutation can be tested without committing. -- `DynamicAuditing`(*deprecated*): Used to enable dynamic auditing before v1.19. +- `DynamicAuditing`: Used to enable dynamic auditing before v1.19. - `DynamicKubeletConfig`: Enable the dynamic configuration of kubelet. See [Reconfigure kubelet](/docs/tasks/administer-cluster/reconfigure-kubelet/). - `DynamicProvisioningScheduling`: Extend the default scheduler to be aware of volume topology and handle PV provisioning. This feature is superseded by the `VolumeScheduling` feature completely in v1.12. -- `DynamicVolumeProvisioning`(*deprecated*): Enable the +- `DynamicVolumeProvisioning`: Enable the [dynamic provisioning](/docs/concepts/storage/dynamic-provisioning/) of persistent volumes to Pods. - `EfficientWatchResumption`: Allows for storage-originated bookmark (progress notify) events to be delivered to the users. This is only applied to watch operations. -- `EnableAggregatedDiscoveryTimeout` (*deprecated*): Enable the five second +- `EnableAggregatedDiscoveryTimeout`: Enable the five second timeout on aggregated discovery calls. - `EnableEquivalenceClassCache`: Enable the scheduler to cache equivalence of nodes when scheduling Pods. @@ -623,20 +656,30 @@ Each feature gate is designed for enabling/disabling a specific feature: - `HyperVContainer`: Enable [Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) for Windows containers. -- `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) - support for IPv6. - `ImmutableEphemeralVolumes`: Allows for marking individual Secrets and ConfigMaps as immutable for better safety and performance. -- `KubeletConfigFile` (*deprecated*): Enable loading kubelet configuration from +- `IndexedJob`: Allows the [Job](/docs/concepts/workloads/controllers/job/) + controller to manage Pod completions per completion index. +- `IngressClassNamespacedParams`: Allow namespace-scoped parameters reference in + `IngressClass` resource. This feature adds two fields - `Scope` and `Namespace` + to `IngressClass.spec.parameters`. +- `Initializers`: Allow asynchronous coordination of object creation using the + Initializers admission plugin. +- `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) + support for IPv6. +- `KubeletConfigFile`: Enable loading kubelet configuration from a file specified using a config file. See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details. - `KubeletCredentialProviders`: Enable kubelet exec credential providers for image pull credentials. - `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi). -- `KubeletPodResources`: Enable the kubelet's pod resources GRPC endpoint. See - [Support Device Monitoring](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/compute-device-assignment.md) +- `KubeletPodResources`: Enable the kubelet's pod resources gRPC endpoint. See + [Support Device Monitoring](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/606-compute-device-assignment/README.md) for more details. +- `KubeletPodResourcesGetAllocatable`: Enable the kubelet's pod resources `GetAllocatableResources` functionality. + This API augments the [resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) + with informations about the allocatable resources, enabling clients to properly track the free compute resources on a node. - `LegacyNodeRoleBehavior`: When disabled, legacy behavior in service load balancers and node disruption will ignore the `node-role.kubernetes.io/master` label in favor of the feature-specific labels provided by `NodeDisruptionExclusion` and `ServiceNodeExclusion`. @@ -651,21 +694,29 @@ Each feature gate is designed for enabling/disabling a specific feature: supports project quotas and they are enabled, use project quotas to monitor [emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than filesystem walk for better performance and accuracy. +- `LogarithmicScaleDown`: Enable semi-random selection of pods to evict on controller scaledown + based on logarithmic bucketing of pod timestamps. - `MixedProtocolLBService`: Enable using different protocols in the same `LoadBalancer` type Service instance. -- `MountContainers` (*deprecated*): Enable using utility containers on host as - the volume mounter. +- `MountContainers`: Enable using utility containers on host as the volume mounter. - `MountPropagation`: Enable sharing volume mounted by one container to other containers or pods. For more details, please see [mount propagation](/docs/concepts/storage/volumes/#mount-propagation). +- `NamespaceDefaultLabelName`: Configure the API Server to set an immutable {{< glossary_tooltip text="label" term_id="label" >}} + `kubernetes.io/metadata.name` on all namespaces, containing the namespace name. +- `NetworkPolicyEndPort`: Enable use of the field `endPort` in NetworkPolicy objects, allowing the selection of a port range instead of a single port. - `NodeDisruptionExclusion`: Enable use of the Node label `node.kubernetes.io/exclude-disruption` which prevents nodes from being evacuated during zone failures. - `NodeLease`: Enable the new Lease API to report node heartbeats, which could be used as a node health signal. - `NonPreemptingPriority`: Enable `preemptionPolicy` field for PriorityClass and Pod. - `PVCProtection`: Enable the prevention of a PersistentVolumeClaim (PVC) from being deleted when it is still used by any Pod. +- `PodDeletionCost`: Enable the [Pod Deletion Cost](/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) + feature which allows users to influence ReplicaSet downscaling order. - `PersistentLocalVolumes`: Enable the usage of `local` volume type in Pods. Pod affinity has to be specified if requesting a `local` volume. - `PodDisruptionBudget`: Enable the [PodDisruptionBudget](/docs/tasks/run-application/configure-pdb/) feature. +- `PodAffinityNamespaceSelector`: Enable the [Pod Affinity Namespace Selector](/docs/concepts/scheduling-eviction/assign-pod-node/#namespace-selector) + and [CrossNamespacePodAffinity](/docs/concepts/policy/resource-quotas/#cross-namespace-pod-affinity-quota) quota scope features. - `PodOverhead`: Enable the [PodOverhead](/docs/concepts/scheduling-eviction/pod-overhead/) feature to account for pod overheads. - `PodPriority`: Enable the descheduling and preemption of Pods based on their @@ -676,6 +727,9 @@ Each feature gate is designed for enabling/disabling a specific feature: - `PodShareProcessNamespace`: Enable the setting of `shareProcessNamespace` in a Pod for sharing a single process namespace between containers running in a pod. More details can be found in [Share Process Namespace between Containers in a Pod](/docs/tasks/configure-pod-container/share-process-namespace/). +- `ProbeTerminationGracePeriod`: Enable [setting probe-level + `terminationGracePeriodSeconds`](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds) + on pods. See the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period) for more details. - `ProcMountType`: Enables control over the type proc mounts for containers by setting the `procMount` field of a SecurityContext. - `QOSReserved`: Allows resource reservations at the QoS level preventing pods @@ -686,7 +740,9 @@ Each feature gate is designed for enabling/disabling a specific feature: [chunking list request](/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks). - `RemoveSelfLink`: Deprecates and removes `selfLink` from ObjectMeta and ListMeta. -- `ResourceLimitsPriorityFunction` (*deprecated*): Enable a scheduler priority function that +- `RequestManagement`: Enables managing request concurrency with prioritization and fairness + at each API server. Deprecated by `APIPriorityAndFairness` since 1.17. +- `ResourceLimitsPriorityFunction`: Enable a scheduler priority function that assigns a lowest possible score of 1 to a node that satisfies at least one of the input Pod's cpu and memory limits. The intent is to break ties between nodes with same scores. @@ -716,9 +772,11 @@ Each feature gate is designed for enabling/disabling a specific feature: JWKS URLs) for the service account issuer in the API server. See [Configure Service Accounts for Pods](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery) for more details. -- `ServiceAppProtocol`: Enables the `AppProtocol` field on Services and Endpoints. -- `ServiceLBNodePortControl`: Enables the `spec.allocateLoadBalancerNodePorts` - field on Services. +- `ServiceAppProtocol`: Enables the `appProtocol` field on Services and Endpoints. +- `ServiceInternalTrafficPolicy`: Enables the `internalTrafficPolicy` field on Services +- `ServiceLBNodePortControl`: Enables the `allocateLoadBalancerNodePorts` field on Services. +- `ServiceLoadBalancerClass`: Enables the `loadBalancerClass` field on Services. See + [Specifying class of load balancer implementation](/docs/concepts/services-networking/service/#load-balancer-class) for more details. - `ServiceLoadBalancerFinalizer`: Enable finalizer protection for Service load balancers. - `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider. A node is eligible for exclusion if labelled with @@ -727,8 +785,6 @@ Each feature gate is designed for enabling/disabling a specific feature: topology of the cluster. See [ServiceTopology](/docs/concepts/services-networking/service-topology/) for more details. -- `SizeMemoryBackedVolumes`: Enables kubelet support to size memory backed volumes. - See [volumes](docs/concepts/storage/volumes) for more details. - `SetHostnameAsFQDN`: Enable the ability of setting Fully Qualified Domain Name(FQDN) as the hostname of a pod. See [Pod's `setHostnameAsFQDN` field](/docs/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field). @@ -748,12 +804,15 @@ Each feature gate is designed for enabling/disabling a specific feature: Examples of streaming requests include the `exec`, `attach` and `port-forward` requests. - `SupportIPVSProxyMode`: Enable providing in-cluster service load balancing using IPVS. See [service proxies](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) for more details. -- `SupportPodPidsLimit`: Enable the support to limiting PIDs in Pods. - `SupportNodePidsLimit`: Enable the support to limiting PIDs on the Node. The parameter `pid=` in the `--system-reserved` and `--kube-reserved` options can be specified to ensure that the specified number of process IDs will be reserved for the system as a whole and for Kubernetes system daemons respectively. +- `SupportPodPidsLimit`: Enable the support to limiting PIDs in Pods. +- `SuspendJob`: Enable support to suspend and resume Jobs. See + [the Jobs docs](/docs/concepts/workloads/controllers/job/) for + more details. - `Sysctls`: Enable support for namespaced kernel parameters (sysctls) that can be set for each pod. See [sysctls](/docs/tasks/administer-cluster/sysctl-cluster/) for more details. @@ -769,15 +828,25 @@ Each feature gate is designed for enabling/disabling a specific feature: - `TokenRequest`: Enable the `TokenRequest` endpoint on service account resources. - `TokenRequestProjection`: Enable the injection of service account tokens into a Pod through a [`projected` volume](/docs/concepts/storage/volumes/#projected). +- `TopologyAwareHints`: Enables topology aware routing based on topology hints + in EndpointSlices. See [Topology Aware + Hints](/docs/concepts/services-networking/topology-aware-hints/) for more + details. - `TopologyManager`: Enable a mechanism to coordinate fine-grained hardware resource assignments for different components in Kubernetes. See [Control Topology Management Policies on a node](/docs/tasks/administer-cluster/topology-manager/). +- `ValidateProxyRedirects`: This flag controls whether the API server should + validate that redirects are only followed to the same host. Only used if the + `StreamingProxyRedirects` flag is enabled. +- 'VolumeCapacityPriority`: Enable support for prioritizing nodes in different + topologies based on available PV capacity. - `VolumePVCDataSource`: Enable support for specifying an existing PVC as a DataSource. - `VolumeScheduling`: Enable volume topology aware scheduling and make the PersistentVolumeClaim (PVC) binding aware of scheduling decisions. It also enables the usage of [`local`](/docs/concepts/storage/volumes/#local) volume type when used together with the `PersistentLocalVolumes` feature gate. - `VolumeSnapshotDataSource`: Enable volume snapshot data source support. +- `VolumeSubpath`: Allow mounting a subpath of a volume in a container. - `VolumeSubpathEnvExpansion`: Enable `subPathExpr` field for expanding environment variables into a `subPath`. - `WarningHeaders`: Allow sending warning headers in API responses. diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md index 6e87bff44c986..77fe552879043 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -2,8 +2,22 @@ title: kube-apiserver content_type: tool-reference weight: 30 +auto_generated: true --- + + + + ## {{% heading "synopsis" %}} @@ -26,1102 +40,1137 @@ kube-apiserver [flags] ---add-dir-header +--add_dir_header -If true, adds the file directory to the header of the log messages +

If true, adds the file directory to the header of the log messages

--admission-control-config-file string -File with admission control configuration. +

File with admission control configuration.

+ + + +--advertise-address string + + +

The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used.

---advertise-address ip +--allow-metric-labels stringToString     Default: [] -The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. +

The map from metric-label to value allow-list of this label. The key's format is ,. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

--allow-privileged -If true, allow privileged containers. [default=false] +

If true, allow privileged containers. [default=false]

--alsologtostderr -log to standard error as well as files +

log to standard error as well as files

--anonymous-auth     Default: true -Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. +

Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated.

---api-audiences stringSlice +--api-audiences strings -Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. +

Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL.

--apiserver-count int     Default: 1 -The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.) +

The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.)

--audit-log-batch-buffer-size int     Default: 10000 -The size of the buffer to store events before batching and writing. Only used in batch mode. +

The size of the buffer to store events before batching and writing. Only used in batch mode.

--audit-log-batch-max-size int     Default: 1 -The maximum size of a batch. Only used in batch mode. +

The maximum size of a batch. Only used in batch mode.

--audit-log-batch-max-wait duration -The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. +

The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode.

--audit-log-batch-throttle-burst int -Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. +

Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode.

--audit-log-batch-throttle-enable -Whether batching throttling is enabled. Only used in batch mode. +

Whether batching throttling is enabled. Only used in batch mode.

---audit-log-batch-throttle-qps float32 +--audit-log-batch-throttle-qps float -Maximum average number of batches per second. Only used in batch mode. +

Maximum average number of batches per second. Only used in batch mode.

--audit-log-compress -If set, the rotated log files will be compressed using gzip. +

If set, the rotated log files will be compressed using gzip.

--audit-log-format string     Default: "json" -Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. +

Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json.

--audit-log-maxage int -The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. +

The maximum number of days to retain old audit log files based on the timestamp encoded in their filename.

--audit-log-maxbackup int -The maximum number of old audit log files to retain. +

The maximum number of old audit log files to retain.

--audit-log-maxsize int -The maximum size in megabytes of the audit log file before it gets rotated. +

The maximum size in megabytes of the audit log file before it gets rotated.

--audit-log-mode string     Default: "blocking" -Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. +

Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict.

--audit-log-path string -If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. +

If set, all requests coming to the apiserver will be logged to this file. '-' means standard out.

--audit-log-truncate-enabled -Whether event and batch truncating is enabled. +

Whether event and batch truncating is enabled.

--audit-log-truncate-max-batch-size int     Default: 10485760 -Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. +

Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size.

--audit-log-truncate-max-event-size int     Default: 102400 -Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. +

Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded.

--audit-log-version string     Default: "audit.k8s.io/v1" -API group and version used for serializing audit events written to log. +

API group and version used for serializing audit events written to log.

--audit-policy-file string -Path to the file that defines the audit policy configuration. +

Path to the file that defines the audit policy configuration.

--audit-webhook-batch-buffer-size int     Default: 10000 -The size of the buffer to store events before batching and writing. Only used in batch mode. +

The size of the buffer to store events before batching and writing. Only used in batch mode.

--audit-webhook-batch-max-size int     Default: 400 -The maximum size of a batch. Only used in batch mode. +

The maximum size of a batch. Only used in batch mode.

--audit-webhook-batch-max-wait duration     Default: 30s -The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. +

The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode.

--audit-webhook-batch-throttle-burst int     Default: 15 -Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. +

Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode.

--audit-webhook-batch-throttle-enable     Default: true -Whether batching throttling is enabled. Only used in batch mode. +

Whether batching throttling is enabled. Only used in batch mode.

---audit-webhook-batch-throttle-qps float32     Default: 10 +--audit-webhook-batch-throttle-qps float     Default: 10 -Maximum average number of batches per second. Only used in batch mode. +

Maximum average number of batches per second. Only used in batch mode.

--audit-webhook-config-file string -Path to a kubeconfig formatted file that defines the audit webhook configuration. +

Path to a kubeconfig formatted file that defines the audit webhook configuration.

--audit-webhook-initial-backoff duration     Default: 10s -The amount of time to wait before retrying the first failed request. +

The amount of time to wait before retrying the first failed request.

--audit-webhook-mode string     Default: "batch" -Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. +

Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict.

--audit-webhook-truncate-enabled -Whether event and batch truncating is enabled. +

Whether event and batch truncating is enabled.

--audit-webhook-truncate-max-batch-size int     Default: 10485760 -Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. +

Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size.

--audit-webhook-truncate-max-event-size int     Default: 102400 -Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. +

Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded.

--audit-webhook-version string     Default: "audit.k8s.io/v1" -API group and version used for serializing audit events written to webhook. +

API group and version used for serializing audit events written to webhook.

--authentication-token-webhook-cache-ttl duration     Default: 2m0s -The duration to cache responses from the webhook token authenticator. +

The duration to cache responses from the webhook token authenticator.

--authentication-token-webhook-config-file string -File with webhook configuration for token authentication in kubeconfig format. The API server will query the remote service to determine authentication for bearer tokens. +

File with webhook configuration for token authentication in kubeconfig format. The API server will query the remote service to determine authentication for bearer tokens.

--authentication-token-webhook-version string     Default: "v1beta1" -The API version of the authentication.k8s.io TokenReview to send to and expect from the webhook. +

The API version of the authentication.k8s.io TokenReview to send to and expect from the webhook.

---authorization-mode stringSlice     Default: [AlwaysAllow] +--authorization-mode strings     Default: "AlwaysAllow" -Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. +

Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node.

--authorization-policy-file string -File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port. +

File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port.

--authorization-webhook-cache-authorized-ttl duration     Default: 5m0s -The duration to cache 'authorized' responses from the webhook authorizer. +

The duration to cache 'authorized' responses from the webhook authorizer.

--authorization-webhook-cache-unauthorized-ttl duration     Default: 30s -The duration to cache 'unauthorized' responses from the webhook authorizer. +

The duration to cache 'unauthorized' responses from the webhook authorizer.

--authorization-webhook-config-file string -File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. +

File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port.

--authorization-webhook-version string     Default: "v1beta1" -The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook. +

The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook.

--azure-container-registry-config string -Path to the file containing Azure container registry configuration information. +

Path to the file containing Azure container registry configuration information.

---bind-address ip     Default: 0.0.0.0 +--bind-address string     Default: 0.0.0.0 -The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. +

The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used.

--cert-dir string     Default: "/var/run/kubernetes" -The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. +

The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.

--client-ca-file string -If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. +

If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.

--cloud-config string -The path to the cloud provider configuration file. Empty string for no configuration file. +

The path to the cloud provider configuration file. Empty string for no configuration file.

--cloud-provider string -The provider for cloud services. Empty string for no provider. +

The provider for cloud services. Empty string for no provider.

--cloud-provider-gce-l7lb-src-cidrs cidrs     Default: 130.211.0.0/22,35.191.0.0/16 -CIDRs opened in GCE firewall for L7 LB traffic proxy & health checks +

CIDRs opened in GCE firewall for L7 LB traffic proxy & health checks

--contention-profiling -Enable lock contention profiling, if profiling is enabled +

Enable lock contention profiling, if profiling is enabled

---cors-allowed-origins stringSlice +--cors-allowed-origins strings -List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. +

List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.

--default-not-ready-toleration-seconds int     Default: 300 -Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. +

Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration.

--default-unreachable-toleration-seconds int     Default: 300 -Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. +

Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration.

--default-watch-cache-size int     Default: 100 -Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set. +

Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.

--delete-collection-workers int     Default: 1 -Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. +

Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.

---disable-admission-plugins stringSlice +--disable-admission-plugins strings -admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. +

admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

+ + + +--disabled-metrics strings + + +

This flag provides an escape hatch for misbehaving metrics. You must provide the fully qualified metric name in order to disable it. Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.

--egress-selector-config-file string -File with apiserver egress selector configuration. +

File with apiserver egress selector configuration.

---enable-admission-plugins stringSlice +--enable-admission-plugins strings -admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. +

admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

--enable-aggregator-routing -Turns on aggregator routing requests to endpoints IP rather than cluster IP. +

Turns on aggregator routing requests to endpoints IP rather than cluster IP.

--enable-bootstrap-token-auth -Enable to allow secrets of type 'bootstrap.kubernetes.io/token' in the 'kube-system' namespace to be used for TLS bootstrapping authentication. +

Enable to allow secrets of type 'bootstrap.kubernetes.io/token' in the 'kube-system' namespace to be used for TLS bootstrapping authentication.

--enable-garbage-collector     Default: true -Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. +

Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager.

--enable-priority-and-fairness     Default: true -If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness +

If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness

--encryption-provider-config string -The file containing configuration for encryption providers to be used for storing secrets in etcd +

The file containing configuration for encryption providers to be used for storing secrets in etcd

--endpoint-reconciler-type string     Default: "lease" -Use an endpoint reconciler (master-count, lease, none) +

Use an endpoint reconciler (master-count, lease, none)

--etcd-cafile string -SSL Certificate Authority file used to secure etcd communication. +

SSL Certificate Authority file used to secure etcd communication.

--etcd-certfile string -SSL certification file used to secure etcd communication. +

SSL certification file used to secure etcd communication.

--etcd-compaction-interval duration     Default: 5m0s -The interval of compaction requests. If 0, the compaction request from apiserver is disabled. +

The interval of compaction requests. If 0, the compaction request from apiserver is disabled.

--etcd-count-metric-poll-period duration     Default: 1m0s -Frequency of polling etcd for number of resources per type. 0 disables the metric collection. +

Frequency of polling etcd for number of resources per type. 0 disables the metric collection.

--etcd-db-metric-poll-interval duration     Default: 30s -The interval of requests to poll etcd and update metric. 0 disables the metric collection +

The interval of requests to poll etcd and update metric. 0 disables the metric collection

--etcd-healthcheck-timeout duration     Default: 2s -The timeout to use when checking etcd health. +

The timeout to use when checking etcd health.

--etcd-keyfile string -SSL key file used to secure etcd communication. +

SSL key file used to secure etcd communication.

--etcd-prefix string     Default: "/registry" -The prefix to prepend to all resource paths in etcd. +

The prefix to prepend to all resource paths in etcd.

---etcd-servers stringSlice +--etcd-servers strings -List of etcd servers to connect with (scheme://ip:port), comma separated. +

List of etcd servers to connect with (scheme://ip:port), comma separated.

---etcd-servers-overrides stringSlice +--etcd-servers-overrides strings -Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. +

Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary.

--event-ttl duration     Default: 1h0m0s -Amount of time to retain events. +

Amount of time to retain events.

--experimental-logging-sanitization -[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. +

[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.

--external-hostname string -The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). +

The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery).

---feature-gates mapStringBool +--feature-gates <comma-separated 'key=True|False' pairs> -A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
GracefulNodeShutdown=true|false (ALPHA - default=false)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

--goaway-chance float -To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. +

To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point.

-h, --help -help for kube-apiserver +

help for kube-apiserver

--http2-max-streams-per-connection int -The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. +

The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.

--identity-lease-duration-seconds int     Default: 3600 -The duration of kube-apiserver lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.) +

The duration of kube-apiserver lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.)

--identity-lease-renew-interval-seconds int     Default: 10 -The interval of kube-apiserver renewing its lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.) +

The interval of kube-apiserver renewing its lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.)

--kubelet-certificate-authority string -Path to a cert file for the certificate authority. +

Path to a cert file for the certificate authority.

--kubelet-client-certificate string -Path to a client cert file for TLS. +

Path to a client cert file for TLS.

--kubelet-client-key string -Path to a client key file for TLS. +

Path to a client key file for TLS.

---kubelet-preferred-address-types stringSlice     Default: [Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP] +--kubelet-preferred-address-types strings     Default: "Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP" -List of the preferred NodeAddressTypes to use for kubelet connections. +

List of the preferred NodeAddressTypes to use for kubelet connections.

--kubelet-timeout duration     Default: 5s -Timeout for kubelet operations. +

Timeout for kubelet operations.

--kubernetes-service-node-port int -If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP. +

If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP.

+ + + +--lease-reuse-duration-seconds int     Default: 60 + + +

The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer.

--livez-grace-period duration -This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. +

This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true.

---log-backtrace-at traceLocation     Default: :0 +--log-flush-frequency duration     Default: 5s -when logging hits line file:N, emit a stack trace +

Maximum number of seconds between log flushes

---log-dir string +--log_backtrace_at <a string in the form 'file:N'>     Default: :0 -If non-empty, write log files in this directory +

when logging hits line file:N, emit a stack trace

---log-file string +--log_dir string -If non-empty, use this log file +

If non-empty, write log files in this directory

---log-file-max-size uint     Default: 1800 +--log_file string -Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. +

If non-empty, use this log file

---log-flush-frequency duration     Default: 5s +--log_file_max_size uint     Default: 1800 -Maximum number of seconds between log flushes +

Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited.

--logging-format string     Default: "text" -Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning. +

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

--logtostderr     Default: true -log to standard error instead of files +

log to standard error instead of files

--master-service-namespace string     Default: "default" -DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods. +

DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods.

--max-connection-bytes-per-sec int -If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests. +

If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests.

--max-mutating-requests-inflight int     Default: 200 -The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. +

The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.

--max-requests-inflight int     Default: 400 -The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit. +

The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.

--min-request-timeout int     Default: 1800 -An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. +

An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load.

--oidc-ca-file string -If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. +

If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.

--oidc-client-id string -The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. +

The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.

--oidc-groups-claim string -If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. +

If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details.

--oidc-groups-prefix string -If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. +

If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.

--oidc-issuer-url string -The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). +

The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).

---oidc-required-claim mapStringString +--oidc-required-claim <comma-separated 'key=value' pairs> -A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims. +

A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims.

---oidc-signing-algs stringSlice     Default: [RS256] +--oidc-signing-algs strings     Default: "RS256" -Comma-separated list of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1. +

Comma-separated list of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1.

--oidc-username-claim string     Default: "sub" -The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. +

The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details.

--oidc-username-prefix string -If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. +

If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.

---one-output +--one_output -If true, only write logs to their native severity level (vs also writing to each lower severity level +

If true, only write logs to their native severity level (vs also writing to each lower severity level)

+ + + +--permit-address-sharing + + +

If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]

--permit-port-sharing -If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] +

If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]

--profiling     Default: true -Enable profiling via web interface host:port/debug/pprof/ +

Enable profiling via web interface host:port/debug/pprof/

--proxy-client-cert-file string -Client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. It is expected that this cert includes a signature from the CA in the --requestheader-client-ca-file flag. That CA is published in the 'extension-apiserver-authentication' configmap in the kube-system namespace. Components receiving calls from kube-aggregator should use that CA to perform their half of the mutual TLS verification. +

Client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. It is expected that this cert includes a signature from the CA in the --requestheader-client-ca-file flag. That CA is published in the 'extension-apiserver-authentication' configmap in the kube-system namespace. Components receiving calls from kube-aggregator should use that CA to perform their half of the mutual TLS verification.

--proxy-client-key-file string -Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. +

Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins.

--request-timeout duration     Default: 1m0s -An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. +

An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests.

---requestheader-allowed-names stringSlice +--requestheader-allowed-names strings -List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. +

List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.

--requestheader-client-ca-file string -Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. +

Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests.

---requestheader-extra-headers-prefix stringSlice +--requestheader-extra-headers-prefix strings -List of request header prefixes to inspect. X-Remote-Extra- is suggested. +

List of request header prefixes to inspect. X-Remote-Extra- is suggested.

---requestheader-group-headers stringSlice +--requestheader-group-headers strings -List of request headers to inspect for groups. X-Remote-Group is suggested. +

List of request headers to inspect for groups. X-Remote-Group is suggested.

---requestheader-username-headers stringSlice +--requestheader-username-headers strings -List of request headers to inspect for usernames. X-Remote-User is common. +

List of request headers to inspect for usernames. X-Remote-User is common.

---runtime-config mapStringString +--runtime-config <comma-separated 'key=value' pairs> -A set of key=value pairs that enable or disable built-in APIs. Supported options are:
v1=true|false for the core API group
<group>/<version>=true|false for a specific API group and version (e.g. apps/v1=true)
api/all=true|false controls all API versions
api/ga=true|false controls all API versions of the form v[0-9]+
api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+
api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+
api/legacy is deprecated, and will be removed in a future version +

A set of key=value pairs that enable or disable built-in APIs. Supported options are:
v1=true|false for the core API group
/=true|false for a specific API group and version (e.g. apps/v1=true)
api/all=true|false controls all API versions
api/ga=true|false controls all API versions of the form v[0-9]+
api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+
api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+
api/legacy is deprecated, and will be removed in a future version

--secure-port int     Default: 6443 -The port on which to serve HTTPS with authentication and authorization. It cannot be switched off with 0. +

The port on which to serve HTTPS with authentication and authorization. It cannot be switched off with 0.

--service-account-extend-token-expiration     Default: true -Turns on projected service account expiration extension during token generation, which helps safe transition from legacy token to bound service account token feature. If this flag is enabled, admission injected tokens would be extended up to 1 year to prevent unexpected failure during transition, ignoring value of service-account-max-token-expiration. +

Turns on projected service account expiration extension during token generation, which helps safe transition from legacy token to bound service account token feature. If this flag is enabled, admission injected tokens would be extended up to 1 year to prevent unexpected failure during transition, ignoring value of service-account-max-token-expiration.

--service-account-issuer string -Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI. If this option is not a valid URI per the OpenID Discovery 1.0 spec, the ServiceAccountIssuerDiscovery feature will remain disabled, even if the feature gate is set to true. It is highly recommended that this value comply with the OpenID spec: https://openid.net/specs/openid-connect-discovery-1_0.html. In practice, this means that service-account-issuer must be an https URL. It is also highly recommended that this URL be capable of serving OpenID discovery documents at {service-account-issuer}/.well-known/openid-configuration. +

Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI. If this option is not a valid URI per the OpenID Discovery 1.0 spec, the ServiceAccountIssuerDiscovery feature will remain disabled, even if the feature gate is set to true. It is highly recommended that this value comply with the OpenID spec: https://openid.net/specs/openid-connect-discovery-1_0.html. In practice, this means that service-account-issuer must be an https URL. It is also highly recommended that this URL be capable of serving OpenID discovery documents at {service-account-issuer}/.well-known/openid-configuration.

--service-account-jwks-uri string -Overrides the URI for the JSON Web Key Set in the discovery doc served at /.well-known/openid-configuration. This flag is useful if the discovery docand key set are served to relying parties from a URL other than the API server's external (as auto-detected or overridden with external-hostname). Only valid if the ServiceAccountIssuerDiscovery feature gate is enabled. +

Overrides the URI for the JSON Web Key Set in the discovery doc served at /.well-known/openid-configuration. This flag is useful if the discovery docand key set are served to relying parties from a URL other than the API server's external (as auto-detected or overridden with external-hostname). Only valid if the ServiceAccountIssuerDiscovery feature gate is enabled.

---service-account-key-file stringArray +--service-account-key-file strings -File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. If unspecified, --tls-private-key-file is used. Must be specified when --service-account-signing-key is provided +

File containing PEM-encoded x509 RSA or ECDSA private or public keys, used to verify ServiceAccount tokens. The specified file can contain multiple keys, and the flag can be specified multiple times with different files. If unspecified, --tls-private-key-file is used. Must be specified when --service-account-signing-key is provided

--service-account-lookup     Default: true -If true, validate ServiceAccount tokens exist in etcd as part of authentication. +

If true, validate ServiceAccount tokens exist in etcd as part of authentication.

--service-account-max-token-expiration duration -The maximum validity duration of a token created by the service account token issuer. If an otherwise valid TokenRequest with a validity duration larger than this value is requested, a token will be issued with a validity duration of this value. +

The maximum validity duration of a token created by the service account token issuer. If an otherwise valid TokenRequest with a validity duration larger than this value is requested, a token will be issued with a validity duration of this value.

--service-account-signing-key-file string -Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. +

Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key.

--service-cluster-ip-range string -A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods. +

A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods. Max of two dual-stack CIDRs is allowed.

---service-node-port-range portRange     Default: 30000-32767 +--service-node-port-range <a string in the form 'N1-N2'>     Default: 30000-32767 -A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range. +

A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.

--show-hidden-metrics-for-version string -The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. +

The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that.

--shutdown-delay-duration duration -Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. +

Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server.

---skip-headers +--skip_headers -If true, avoid header prefixes in the log messages +

If true, avoid header prefixes in the log messages

---skip-log-headers +--skip_log_headers -If true, avoid headers when opening log files +

If true, avoid headers when opening log files

---stderrthreshold severity     Default: 2 +--stderrthreshold int     Default: 2 -logs at or above this threshold go to stderr +

logs at or above this threshold go to stderr

--storage-backend string -The storage backend for persistence. Options: 'etcd3' (default). +

The storage backend for persistence. Options: 'etcd3' (default).

--storage-media-type string     Default: "application/vnd.kubernetes.protobuf" -The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. +

The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting.

+ + + +--strict-transport-security-directives strings + + +

List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload'

--tls-cert-file string -File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. +

File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.

---tls-cipher-suites stringSlice +--tls-cipher-suites strings -Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. +

Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.

--tls-min-version string -Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 +

Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13

--tls-private-key-file string -File containing the default x509 private key matching --tls-cert-file. +

File containing the default x509 private key matching --tls-cert-file.

---tls-sni-cert-key namedCertKey     Default: [] +--tls-sni-cert-key string -A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". +

A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com".

--token-auth-file string -If set, the file that will be used to secure the secure port of the API server via token authentication. +

If set, the file that will be used to secure the secure port of the API server via token authentication.

--v, --v Level +-v, --v int -number for the log level verbosity +

number for the log level verbosity

--version version[=true] -Print version information and quit +

Print version information and quit

---vmodule moduleSpec +--vmodule <comma-separated 'pattern=N' settings> -comma-separated list of pattern=N settings for file-filtered logging +

comma-separated list of pattern=N settings for file-filtered logging

--watch-cache     Default: true -Enable watch caching in the apiserver +

Enable watch caching in the apiserver

---watch-cache-sizes stringSlice +--watch-cache-sizes strings -Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. It takes effect when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) have system defaults set by heuristics, others default to default-watch-cache-size +

Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. It takes effect when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) have system defaults set by heuristics, others default to default-watch-cache-size

diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md index 79af1bc81a354..ae6df0502d306 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -2,8 +2,22 @@ title: kube-controller-manager content_type: tool-reference weight: 30 +auto_generated: true --- + + + + ## {{% heading "synopsis" %}} @@ -30,983 +44,1018 @@ kube-controller-manager [flags] ---add-dir-header +--add_dir_header -If true, adds the file directory to the header of the log messages +

If true, adds the file directory to the header of the log messages

--allocate-node-cidrs -Should CIDRs for Pods be allocated and set on the cloud provider. +

Should CIDRs for Pods be allocated and set on the cloud provider.

+ + + +--allow-metric-labels stringToString     Default: [] + + +

The map from metric-label to value allow-list of this label. The key's format is ,. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

--alsologtostderr -log to standard error as well as files +

log to standard error as well as files

--attach-detach-reconcile-sync-period duration     Default: 1m0s -The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods. +

The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.

--authentication-kubeconfig string -kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. +

kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster.

--authentication-skip-lookup -If false, the authentication-kubeconfig will be used to lookup missing authentication configuration from the cluster. +

If false, the authentication-kubeconfig will be used to lookup missing authentication configuration from the cluster.

--authentication-token-webhook-cache-ttl duration     Default: 10s -The duration to cache responses from the webhook token authenticator. +

The duration to cache responses from the webhook token authenticator.

--authentication-tolerate-lookup-failure -If true, failures to look up missing authentication configuration from the cluster are not considered fatal. Note that this can result in authentication that treats all requests as anonymous. +

If true, failures to look up missing authentication configuration from the cluster are not considered fatal. Note that this can result in authentication that treats all requests as anonymous.

---authorization-always-allow-paths stringSlice     Default: [/healthz] +--authorization-always-allow-paths strings     Default: "/healthz,/readyz,/livez" -A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server. +

A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server.

--authorization-kubeconfig string -kubeconfig file pointing at the 'core' kubernetes server with enough rights to create subjectaccessreviews.authorization.k8s.io. This is optional. If empty, all requests not skipped by authorization are forbidden. +

kubeconfig file pointing at the 'core' kubernetes server with enough rights to create subjectaccessreviews.authorization.k8s.io. This is optional. If empty, all requests not skipped by authorization are forbidden.

--authorization-webhook-cache-authorized-ttl duration     Default: 10s -The duration to cache 'authorized' responses from the webhook authorizer. +

The duration to cache 'authorized' responses from the webhook authorizer.

--authorization-webhook-cache-unauthorized-ttl duration     Default: 10s -The duration to cache 'unauthorized' responses from the webhook authorizer. +

The duration to cache 'unauthorized' responses from the webhook authorizer.

--azure-container-registry-config string -Path to the file containing Azure container registry configuration information. +

Path to the file containing Azure container registry configuration information.

---bind-address ip     Default: 0.0.0.0 +--bind-address string     Default: 0.0.0.0 -The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. +

The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used.

--cert-dir string -The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. +

The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.

--cidr-allocator-type string     Default: "RangeAllocator" -Type of CIDR allocator to use +

Type of CIDR allocator to use

--client-ca-file string -If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. +

If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.

--cloud-config string -The path to the cloud provider configuration file. Empty string for no configuration file. +

The path to the cloud provider configuration file. Empty string for no configuration file.

--cloud-provider string -The provider for cloud services. Empty string for no provider. +

The provider for cloud services. Empty string for no provider.

--cluster-cidr string -CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true +

CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true

--cluster-name string     Default: "kubernetes" -The instance prefix for the cluster. +

The instance prefix for the cluster.

--cluster-signing-cert-file string -Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates. If specified, no more specific --cluster-signing-* flag may be specified. +

Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates. If specified, no more specific --cluster-signing-* flag may be specified.

--cluster-signing-duration duration     Default: 8760h0m0s -The length of duration signed certificates will be given. +

The length of duration signed certificates will be given.

--cluster-signing-key-file string -Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates. If specified, no more specific --cluster-signing-* flag may be specified. +

Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates. If specified, no more specific --cluster-signing-* flag may be specified.

--cluster-signing-kube-apiserver-client-cert-file string -Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/kube-apiserver-client signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/kube-apiserver-client signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--cluster-signing-kube-apiserver-client-key-file string -Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/kube-apiserver-client signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/kube-apiserver-client signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--cluster-signing-kubelet-client-cert-file string -Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/kube-apiserver-client-kubelet signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/kube-apiserver-client-kubelet signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--cluster-signing-kubelet-client-key-file string -Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/kube-apiserver-client-kubelet signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/kube-apiserver-client-kubelet signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--cluster-signing-kubelet-serving-cert-file string -Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/kubelet-serving signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/kubelet-serving signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--cluster-signing-kubelet-serving-key-file string -Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/kubelet-serving signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/kubelet-serving signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--cluster-signing-legacy-unknown-cert-file string -Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/legacy-unknown signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded X509 CA certificate used to issue certificates for the kubernetes.io/legacy-unknown signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--cluster-signing-legacy-unknown-key-file string -Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/legacy-unknown signer. If specified, --cluster-signing-{cert,key}-file must not be set. +

Filename containing a PEM-encoded RSA or ECDSA private key used to sign certificates for the kubernetes.io/legacy-unknown signer. If specified, --cluster-signing-{cert,key}-file must not be set.

--concurrent-deployment-syncs int32     Default: 5 -The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load +

The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load

--concurrent-endpoint-syncs int32     Default: 5 -The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load +

The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load

--concurrent-gc-syncs int32     Default: 20 -The number of garbage collector workers that are allowed to sync concurrently. +

The number of garbage collector workers that are allowed to sync concurrently.

--concurrent-namespace-syncs int32     Default: 10 -The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load +

The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load

--concurrent-replicaset-syncs int32     Default: 5 -The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load +

The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load

--concurrent-resource-quota-syncs int32     Default: 5 -The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load +

The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load

--concurrent-service-endpoint-syncs int32     Default: 5 -The number of service endpoint syncing operations that will be done concurrently. Larger number = faster endpoint slice updating, but more CPU (and network) load. Defaults to 5. +

The number of service endpoint syncing operations that will be done concurrently. Larger number = faster endpoint slice updating, but more CPU (and network) load. Defaults to 5.

--concurrent-service-syncs int32     Default: 1 -The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load +

The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load

--concurrent-serviceaccount-token-syncs int32     Default: 5 -The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load +

The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load

--concurrent-statefulset-syncs int32     Default: 5 -The number of statefulset objects that are allowed to sync concurrently. Larger number = more responsive statefulsets, but more CPU (and network) load +

The number of statefulset objects that are allowed to sync concurrently. Larger number = more responsive statefulsets, but more CPU (and network) load

--concurrent-ttl-after-finished-syncs int32     Default: 5 -The number of TTL-after-finished controller workers that are allowed to sync concurrently. +

The number of TTL-after-finished controller workers that are allowed to sync concurrently.

--concurrent_rc_syncs int32     Default: 5 -The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load +

The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load

--configure-cloud-routes     Default: true -Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider. +

Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.

--contention-profiling -Enable lock contention profiling, if profiling is enabled +

Enable lock contention profiling, if profiling is enabled

--controller-start-interval duration -Interval between starting controller managers. +

Interval between starting controller managers.

---controllers stringSlice     Default: [*] +--controllers strings     Default: "*" -A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.
All controllers: attachdetach, bootstrapsigner, cloud-node-lifecycle, clusterrole-aggregation, cronjob, csrapproving, csrcleaner, csrsigning, daemonset, deployment, disruption, endpoint, endpointslice, endpointslicemirroring, ephemeral-volume, garbagecollector, horizontalpodautoscaling, job, namespace, nodeipam, nodelifecycle, persistentvolume-binder, persistentvolume-expander, podgc, pv-protection, pvc-protection, replicaset, replicationcontroller, resourcequota, root-ca-cert-publisher, route, service, serviceaccount, serviceaccount-token, statefulset, tokencleaner, ttl, ttl-after-finished
Disabled-by-default controllers: bootstrapsigner, tokencleaner +

A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.
All controllers: attachdetach, bootstrapsigner, cloud-node-lifecycle, clusterrole-aggregation, cronjob, csrapproving, csrcleaner, csrsigning, daemonset, deployment, disruption, endpoint, endpointslice, endpointslicemirroring, ephemeral-volume, garbagecollector, horizontalpodautoscaling, job, namespace, nodeipam, nodelifecycle, persistentvolume-binder, persistentvolume-expander, podgc, pv-protection, pvc-protection, replicaset, replicationcontroller, resourcequota, root-ca-cert-publisher, route, service, serviceaccount, serviceaccount-token, statefulset, tokencleaner, ttl, ttl-after-finished
Disabled-by-default controllers: bootstrapsigner, tokencleaner

--deployment-controller-sync-period duration     Default: 30s -Period for syncing the deployments. +

Period for syncing the deployments.

--disable-attach-detach-reconcile-sync -Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely. +

Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.

+ + + +--disabled-metrics strings + + +

This flag provides an escape hatch for misbehaving metrics. You must provide the fully qualified metric name in order to disable it. Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.

--enable-dynamic-provisioning     Default: true -Enable dynamic provisioning for environments that support it. +

Enable dynamic provisioning for environments that support it.

--enable-garbage-collector     Default: true -Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver. +

Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.

--enable-hostpath-provisioner -Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development. +

Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.

+ + + +--enable-leader-migration + + +

Whether to enable controller leader migration.

--enable-taint-manager     Default: true -WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints. +

WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.

--endpoint-updates-batch-period duration -The length of endpoint updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated +

The length of endpoint updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated

--endpointslice-updates-batch-period duration -The length of endpoint slice updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated +

The length of endpoint slice updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated

--experimental-logging-sanitization -[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. +

[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.

--external-cloud-volume-plugin string -The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers. +

The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.

---feature-gates mapStringBool +--feature-gates <comma-separated 'key=True|False' pairs> -A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
GracefulNodeShutdown=true|false (ALPHA - default=false)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

--flex-volume-plugin-dir string     Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" -Full path of the directory in which the flex volume plugin should search for additional third party volume plugins. +

Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.

-h, --help -help for kube-controller-manager +

help for kube-controller-manager

--horizontal-pod-autoscaler-cpu-initialization-period duration     Default: 5m0s -The period after pod start when CPU samples might be skipped. +

The period after pod start when CPU samples might be skipped.

--horizontal-pod-autoscaler-downscale-stabilization duration     Default: 5m0s -The period for which autoscaler will look backwards and not scale down below any recommendation it made during that period. +

The period for which autoscaler will look backwards and not scale down below any recommendation it made during that period.

--horizontal-pod-autoscaler-initial-readiness-delay duration     Default: 30s -The period after pod start during which readiness changes will be treated as initial readiness. +

The period after pod start during which readiness changes will be treated as initial readiness.

--horizontal-pod-autoscaler-sync-period duration     Default: 15s -The period for syncing the number of pods in horizontal pod autoscaler. +

The period for syncing the number of pods in horizontal pod autoscaler.

--horizontal-pod-autoscaler-tolerance float     Default: 0.1 -The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling. +

The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.

--http2-max-streams-per-connection int -The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. +

The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.

--kube-api-burst int32     Default: 30 -Burst to use while talking with kubernetes apiserver. +

Burst to use while talking with kubernetes apiserver.

--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf" -Content type of requests sent to apiserver. +

Content type of requests sent to apiserver.

---kube-api-qps float32     Default: 20 +--kube-api-qps float     Default: 20 -QPS to use while talking with kubernetes apiserver. +

QPS to use while talking with kubernetes apiserver.

--kubeconfig string -Path to kubeconfig file with authorization and master location information. +

Path to kubeconfig file with authorization and master location information.

--large-cluster-size-threshold int32     Default: 50 -Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller. +

Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.

--leader-elect     Default: true -Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability. +

Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.

--leader-elect-lease-duration duration     Default: 15s -The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. +

The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.

--leader-elect-renew-deadline duration     Default: 10s -The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. +

The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.

--leader-elect-resource-lock string     Default: "leases" -The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. +

The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'.

--leader-elect-resource-name string     Default: "kube-controller-manager" -The name of resource object that is used for locking during leader election. +

The name of resource object that is used for locking during leader election.

--leader-elect-resource-namespace string     Default: "kube-system" -The namespace of resource object that is used for locking during leader election. +

The namespace of resource object that is used for locking during leader election.

--leader-elect-retry-period duration     Default: 2s -The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. +

The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.

---log-backtrace-at traceLocation     Default: :0 +--leader-migration-config string -when logging hits line file:N, emit a stack trace +

Path to the config file for controller leader migration, or empty to use the value that reflects default configuration of the controller manager. The config file should be of type LeaderMigrationConfiguration, group controllermanager.config.k8s.io, version v1alpha1.

---log-dir string +--log-flush-frequency duration     Default: 5s -If non-empty, write log files in this directory +

Maximum number of seconds between log flushes

---log-file string +--log_backtrace_at <a string in the form 'file:N'>     Default: :0 -If non-empty, use this log file +

when logging hits line file:N, emit a stack trace

---log-file-max-size uint     Default: 1800 +--log_dir string -Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. +

If non-empty, write log files in this directory

---log-flush-frequency duration     Default: 5s +--log_file string -Maximum number of seconds between log flushes +

If non-empty, use this log file

+ + + +--log_file_max_size uint     Default: 1800 + + +

Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited.

--logging-format string     Default: "text" -Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning. +

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

--logtostderr     Default: true -log to standard error instead of files +

log to standard error instead of files

--master string -The address of the Kubernetes API server (overrides any value in kubeconfig). +

The address of the Kubernetes API server (overrides any value in kubeconfig).

--max-endpoints-per-slice int32     Default: 100 -The maximum number of endpoints that will be added to an EndpointSlice. More endpoints per slice will result in less endpoint slices, but larger resources. Defaults to 100. +

The maximum number of endpoints that will be added to an EndpointSlice. More endpoints per slice will result in less endpoint slices, but larger resources. Defaults to 100.

--min-resync-period duration     Default: 12h0m0s -The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod. +

The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.

--mirroring-concurrent-service-endpoint-syncs int32     Default: 5 -The number of service endpoint syncing operations that will be done concurrently by the EndpointSliceMirroring controller. Larger number = faster endpoint slice updating, but more CPU (and network) load. Defaults to 5. +

The number of service endpoint syncing operations that will be done concurrently by the EndpointSliceMirroring controller. Larger number = faster endpoint slice updating, but more CPU (and network) load. Defaults to 5.

--mirroring-endpointslice-updates-batch-period duration -The length of EndpointSlice updates batching period for EndpointSliceMirroring controller. Processing of EndpointSlice changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of EndpointSlice updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated +

The length of EndpointSlice updates batching period for EndpointSliceMirroring controller. Processing of EndpointSlice changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of EndpointSlice updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated

--mirroring-max-endpoints-per-subset int32     Default: 1000 -The maximum number of endpoints that will be added to an EndpointSlice by the EndpointSliceMirroring controller. More endpoints per slice will result in less endpoint slices, but larger resources. Defaults to 100. +

The maximum number of endpoints that will be added to an EndpointSlice by the EndpointSliceMirroring controller. More endpoints per slice will result in less endpoint slices, but larger resources. Defaults to 100.

--namespace-sync-period duration     Default: 5m0s -The period for syncing namespace life-cycle updates +

The period for syncing namespace life-cycle updates

--node-cidr-mask-size int32 -Mask size for node cidr in cluster. Default is 24 for IPv4 and 64 for IPv6. +

Mask size for node cidr in cluster. Default is 24 for IPv4 and 64 for IPv6.

--node-cidr-mask-size-ipv4 int32 -Mask size for IPv4 node cidr in dual-stack cluster. Default is 24. +

Mask size for IPv4 node cidr in dual-stack cluster. Default is 24.

--node-cidr-mask-size-ipv6 int32 -Mask size for IPv6 node cidr in dual-stack cluster. Default is 64. +

Mask size for IPv6 node cidr in dual-stack cluster. Default is 64.

---node-eviction-rate float32     Default: 0.1 +--node-eviction-rate float     Default: 0.1 -Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. +

Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.

--node-monitor-grace-period duration     Default: 40s -Amount of time which we allow running Node to be unresponsive before marking it unhealthy. Must be N times more than kubelet's nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet to post node status. +

Amount of time which we allow running Node to be unresponsive before marking it unhealthy. Must be N times more than kubelet's nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet to post node status.

--node-monitor-period duration     Default: 5s -The period for syncing NodeStatus in NodeController. +

The period for syncing NodeStatus in NodeController.

--node-startup-grace-period duration     Default: 1m0s -Amount of time which we allow starting Node to be unresponsive before marking it unhealthy. +

Amount of time which we allow starting Node to be unresponsive before marking it unhealthy.

+ + + +--one_output + + +

If true, only write logs to their native severity level (vs also writing to each lower severity level)

---one-output +--permit-address-sharing -If true, only write logs to their native severity level (vs also writing to each lower severity level +

If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]

--permit-port-sharing -If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] +

If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]

--pod-eviction-timeout duration     Default: 5m0s -The grace period for deleting pods on failed nodes. +

The grace period for deleting pods on failed nodes.

--profiling     Default: true -Enable profiling via web interface host:port/debug/pprof/ +

Enable profiling via web interface host:port/debug/pprof/

--pv-recycler-increment-timeout-nfs int32     Default: 30 -the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod +

the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod

--pv-recycler-minimum-timeout-hostpath int32     Default: 60 -The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster. +

The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.

--pv-recycler-minimum-timeout-nfs int32     Default: 300 -The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod +

The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod

--pv-recycler-pod-template-filepath-hostpath string -The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster. +

The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.

--pv-recycler-pod-template-filepath-nfs string -The file path to a pod definition used as a template for NFS persistent volume recycling +

The file path to a pod definition used as a template for NFS persistent volume recycling

--pv-recycler-timeout-increment-hostpath int32     Default: 30 -the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster. +

the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.

--pvclaimbinder-sync-period duration     Default: 15s -The period for syncing persistent volumes and persistent volume claims +

The period for syncing persistent volumes and persistent volume claims

---requestheader-allowed-names stringSlice +--requestheader-allowed-names strings -List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. +

List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.

--requestheader-client-ca-file string -Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. +

Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests.

---requestheader-extra-headers-prefix stringSlice     Default: [x-remote-extra-] +--requestheader-extra-headers-prefix strings     Default: "x-remote-extra-" -List of request header prefixes to inspect. X-Remote-Extra- is suggested. +

List of request header prefixes to inspect. X-Remote-Extra- is suggested.

---requestheader-group-headers stringSlice     Default: [x-remote-group] +--requestheader-group-headers strings     Default: "x-remote-group" -List of request headers to inspect for groups. X-Remote-Group is suggested. +

List of request headers to inspect for groups. X-Remote-Group is suggested.

---requestheader-username-headers stringSlice     Default: [x-remote-user] +--requestheader-username-headers strings     Default: "x-remote-user" -List of request headers to inspect for usernames. X-Remote-User is common. +

List of request headers to inspect for usernames. X-Remote-User is common.

--resource-quota-sync-period duration     Default: 5m0s -The period for syncing quota usage status in the system +

The period for syncing quota usage status in the system

--root-ca-file string -If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle. +

If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.

--route-reconciliation-period duration     Default: 10s -The period for reconciling routes created for Nodes by cloud provider. +

The period for reconciling routes created for Nodes by cloud provider.

---secondary-node-eviction-rate float32     Default: 0.01 +--secondary-node-eviction-rate float     Default: 0.01 -Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold. +

Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.

--secure-port int     Default: 10257 -The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. +

The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all.

--service-account-private-key-file string -Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens. +

Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.

--service-cluster-ip-range string -CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true +

CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true

--show-hidden-metrics-for-version string -The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. +

The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that.

---skip-headers +--skip_headers -If true, avoid header prefixes in the log messages +

If true, avoid header prefixes in the log messages

---skip-log-headers +--skip_log_headers -If true, avoid headers when opening log files +

If true, avoid headers when opening log files

---stderrthreshold severity     Default: 2 +--stderrthreshold int     Default: 2 -logs at or above this threshold go to stderr +

logs at or above this threshold go to stderr

--terminated-pod-gc-threshold int32     Default: 12500 -Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled. +

Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.

--tls-cert-file string -File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. +

File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.

---tls-cipher-suites stringSlice +--tls-cipher-suites strings -Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. +

Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.

--tls-min-version string -Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 +

Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13

--tls-private-key-file string -File containing the default x509 private key matching --tls-cert-file. +

File containing the default x509 private key matching --tls-cert-file.

---tls-sni-cert-key namedCertKey     Default: [] +--tls-sni-cert-key string -A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". +

A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com".

---unhealthy-zone-threshold float32     Default: 0.55 +--unhealthy-zone-threshold float     Default: 0.55 -Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. +

Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy.

--use-service-account-credentials -If true, use individual service account credentials for each controller. +

If true, use individual service account credentials for each controller.

--v, --v Level +-v, --v int -number for the log level verbosity +

number for the log level verbosity

--version version[=true] -Print version information and quit +

Print version information and quit

---vmodule moduleSpec +--vmodule <comma-separated 'pattern=N' settings> -comma-separated list of pattern=N settings for file-filtered logging +

comma-separated list of pattern=N settings for file-filtered logging

--volume-host-allow-local-loopback     Default: true -If false, deny local loopback IPs in addition to any CIDR ranges in --volume-host-cidr-denylist +

If false, deny local loopback IPs in addition to any CIDR ranges in --volume-host-cidr-denylist

---volume-host-cidr-denylist stringSlice +--volume-host-cidr-denylist strings -A comma-separated list of CIDR ranges to avoid from volume plugins. +

A comma-separated list of CIDR ranges to avoid from volume plugins.

diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md index a13368ed013b0..ffe104c22e694 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md @@ -2,8 +2,22 @@ title: kube-proxy content_type: tool-reference weight: 30 +auto_generated: true --- + + + + ## {{% heading "synopsis" %}} @@ -32,308 +46,308 @@ kube-proxy [flags] --azure-container-registry-config string -Path to the file containing Azure container registry configuration information. +

Path to the file containing Azure container registry configuration information.

---bind-address ip     Default: 0.0.0.0 +--bind-address string     Default: 0.0.0.0 -The IP address for the proxy server to serve on (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +

The IP address for the proxy server to serve on (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces)

--bind-address-hard-fail -If true kube-proxy will treat failure to bind to a port as fatal and exit +

If true kube-proxy will treat failure to bind to a port as fatal and exit

--cleanup -If true cleanup iptables and ipvs rules and exit. +

If true cleanup iptables and ipvs rules and exit.

--cluster-cidr string -The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead +

The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead

--config string -The path to the configuration file. +

The path to the configuration file.

--config-sync-period duration     Default: 15m0s -How often configuration from the apiserver is refreshed. Must be greater than 0. +

How often configuration from the apiserver is refreshed. Must be greater than 0.

--conntrack-max-per-core int32     Default: 32768 -Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). +

Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).

--conntrack-min int32     Default: 131072 -Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). +

Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).

--conntrack-tcp-timeout-close-wait duration     Default: 1h0m0s -NAT timeout for TCP connections in the CLOSE_WAIT state +

NAT timeout for TCP connections in the CLOSE_WAIT state

--conntrack-tcp-timeout-established duration     Default: 24h0m0s -Idle timeout for established TCP connections (0 to leave as-is) +

Idle timeout for established TCP connections (0 to leave as-is)

--detect-local-mode LocalMode -Mode to use to detect local traffic +

Mode to use to detect local traffic

---feature-gates mapStringBool +--feature-gates <comma-separated 'key=True|False' pairs> -A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
GracefulNodeShutdown=true|false (ALPHA - default=false)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

--healthz-bind-address ipport     Default: 0.0.0.0:10256 -The IP address with port for the health check server to serve on (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +

The IP address with port for the health check server to serve on (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable.

-h, --help -help for kube-proxy +

help for kube-proxy

--hostname-override string -If non-empty, will use this string as identification instead of the actual hostname. +

If non-empty, will use this string as identification instead of the actual hostname.

--iptables-masquerade-bit int32     Default: 14 -If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. +

If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].

--iptables-min-sync-period duration     Default: 1s -The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). +

The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').

--iptables-sync-period duration     Default: 30s -The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. +

The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.

---ipvs-exclude-cidrs stringSlice +--ipvs-exclude-cidrs strings -A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. +

A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.

--ipvs-min-sync-period duration -The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). +

The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').

--ipvs-scheduler string -The ipvs scheduler type when proxy mode is ipvs +

The ipvs scheduler type when proxy mode is ipvs

--ipvs-strict-arp -Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 +

Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2

--ipvs-sync-period duration     Default: 30s -The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. +

The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.

--ipvs-tcp-timeout duration -The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). +

The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

--ipvs-tcpfin-timeout duration -The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). +

The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

--ipvs-udp-timeout duration -The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). +

The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

--kube-api-burst int32     Default: 10 -Burst to use while talking with kubernetes apiserver +

Burst to use while talking with kubernetes apiserver

--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf" -Content type of requests sent to apiserver. +

Content type of requests sent to apiserver.

---kube-api-qps float32     Default: 5 +--kube-api-qps float     Default: 5 -QPS to use while talking with kubernetes apiserver +

QPS to use while talking with kubernetes apiserver

--kubeconfig string -Path to kubeconfig file with authorization information (the master location can be overridden by the master flag). +

Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).

--log-flush-frequency duration     Default: 5s -Maximum number of seconds between log flushes +

Maximum number of seconds between log flushes

--masquerade-all -If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) +

If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)

--master string -The address of the Kubernetes API server (overrides any value in kubeconfig) +

The address of the Kubernetes API server (overrides any value in kubeconfig)

--metrics-bind-address ipport     Default: 127.0.0.1:10249 -The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +

The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable.

---nodeport-addresses stringSlice +--nodeport-addresses strings -A string slice of values which specify the addresses to use for NodePorts. Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +

A string slice of values which specify the addresses to use for NodePorts. Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses.

--oom-score-adj int32     Default: -999 -The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +

The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]

--profiling -If true enables profiling via web interface on /debug/pprof handler. +

If true enables profiling via web interface on /debug/pprof handler.

--proxy-mode ProxyMode -Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs' or 'kernelspace' (windows). If blank, use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy. +

Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs' or 'kernelspace' (windows). If blank, use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.

--proxy-port-range port-range -Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen. +

Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.

--show-hidden-metrics-for-version string -The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. +

The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that.

--udp-timeout duration     Default: 250ms -How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace +

How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace

--version version[=true] -Print version information and quit +

Print version information and quit

--write-config-to string -If set, write the default configuration values to this file and exit. +

If set, write the default configuration values to this file and exit.

diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index 7aa8ccf5d0a46..811dc417e0245 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -2,8 +2,22 @@ title: kube-scheduler content_type: tool-reference weight: 30 +auto_generated: true --- + + + + ## {{% heading "synopsis" %}} @@ -30,507 +44,528 @@ kube-scheduler [flags] ---add-dir-header +--add_dir_header -If true, adds the file directory to the header of the log messages +

If true, adds the file directory to the header of the log messages

--address string     Default: "0.0.0.0" -DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). See --bind-address instead. +

DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 or :: for listening in all interfaces and IP families). See --bind-address instead. This parameter is ignored if a config file is specified in --config.

--algorithm-provider string -DEPRECATED: the scheduling algorithm provider to use, this sets the default plugins for component config profiles. Choose one of: ClusterAutoscalerProvider | DefaultProvider +

DEPRECATED: the scheduling algorithm provider to use, this sets the default plugins for component config profiles. Choose one of: ClusterAutoscalerProvider | DefaultProvider

+ + + +--allow-metric-labels stringToString     Default: [] + + +

The map from metric-label to value allow-list of this label. The key's format is ,. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

--alsologtostderr -log to standard error as well as files +

log to standard error as well as files

--authentication-kubeconfig string -kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. +

kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster.

--authentication-skip-lookup -If false, the authentication-kubeconfig will be used to lookup missing authentication configuration from the cluster. +

If false, the authentication-kubeconfig will be used to lookup missing authentication configuration from the cluster.

--authentication-token-webhook-cache-ttl duration     Default: 10s -The duration to cache responses from the webhook token authenticator. +

The duration to cache responses from the webhook token authenticator.

--authentication-tolerate-lookup-failure     Default: true -If true, failures to look up missing authentication configuration from the cluster are not considered fatal. Note that this can result in authentication that treats all requests as anonymous. +

If true, failures to look up missing authentication configuration from the cluster are not considered fatal. Note that this can result in authentication that treats all requests as anonymous.

---authorization-always-allow-paths stringSlice     Default: [/healthz] +--authorization-always-allow-paths strings     Default: "/healthz,/readyz,/livez" -A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server. +

A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server.

--authorization-kubeconfig string -kubeconfig file pointing at the 'core' kubernetes server with enough rights to create subjectaccessreviews.authorization.k8s.io. This is optional. If empty, all requests not skipped by authorization are forbidden. +

kubeconfig file pointing at the 'core' kubernetes server with enough rights to create subjectaccessreviews.authorization.k8s.io. This is optional. If empty, all requests not skipped by authorization are forbidden.

--authorization-webhook-cache-authorized-ttl duration     Default: 10s -The duration to cache 'authorized' responses from the webhook authorizer. +

The duration to cache 'authorized' responses from the webhook authorizer.

--authorization-webhook-cache-unauthorized-ttl duration     Default: 10s -The duration to cache 'unauthorized' responses from the webhook authorizer. +

The duration to cache 'unauthorized' responses from the webhook authorizer.

--azure-container-registry-config string -Path to the file containing Azure container registry configuration information. +

Path to the file containing Azure container registry configuration information.

---bind-address ip     Default: 0.0.0.0 +--bind-address string     Default: 0.0.0.0 -The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. +

The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used.

--cert-dir string -The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. +

The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.

--client-ca-file string -If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. +

If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.

--config string -The path to the configuration file. The following flags can overwrite fields in this file:
--address
--port
--use-legacy-policy-config
--policy-configmap
--policy-config-file
--algorithm-provider +

The path to the configuration file. The following flags can overwrite fields in this file:
--algorithm-provider
--policy-config-file
--policy-configmap
--policy-configmap-namespace

--contention-profiling     Default: true -DEPRECATED: enable lock contention profiling, if profiling is enabled +

DEPRECATED: enable lock contention profiling, if profiling is enabled. This parameter is ignored if a config file is specified in --config.

+ + + +--disabled-metrics strings + + +

This flag provides an escape hatch for misbehaving metrics. You must provide the fully qualified metric name in order to disable it. Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.

--experimental-logging-sanitization -[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. +

[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens).
Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.

---feature-gates mapStringBool +--feature-gates <comma-separated 'key=True|False' pairs> -A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
GracefulNodeShutdown=true|false (ALPHA - default=false)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

--hard-pod-affinity-symmetric-weight int32     Default: 1 -DEPRECATED: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule. Must be in the range 0-100.This option was moved to the policy configuration file +

DEPRECATED: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule. Must be in the range 0-100.This parameter is ignored if a config file is specified in --config.

-h, --help -help for kube-scheduler +

help for kube-scheduler

--http2-max-streams-per-connection int -The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. +

The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.

--kube-api-burst int32     Default: 100 -DEPRECATED: burst to use while talking with kubernetes apiserver +

DEPRECATED: burst to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.

--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf" -DEPRECATED: content type of requests sent to apiserver. +

DEPRECATED: content type of requests sent to apiserver. This parameter is ignored if a config file is specified in --config.

---kube-api-qps float32     Default: 50 +--kube-api-qps float     Default: 50 -DEPRECATED: QPS to use while talking with kubernetes apiserver +

DEPRECATED: QPS to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.

--kubeconfig string -DEPRECATED: path to kubeconfig file with authorization and master location information. +

DEPRECATED: path to kubeconfig file with authorization and master location information. This parameter is ignored if a config file is specified in --config.

--leader-elect     Default: true -Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability. +

Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.

--leader-elect-lease-duration duration     Default: 15s -The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. +

The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.

--leader-elect-renew-deadline duration     Default: 10s -The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. +

The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.

--leader-elect-resource-lock string     Default: "leases" -The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. +

The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'.

--leader-elect-resource-name string     Default: "kube-scheduler" -The name of resource object that is used for locking during leader election. +

The name of resource object that is used for locking during leader election.

--leader-elect-resource-namespace string     Default: "kube-system" -The namespace of resource object that is used for locking during leader election. +

The namespace of resource object that is used for locking during leader election.

--leader-elect-retry-period duration     Default: 2s -The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. +

The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.

--lock-object-name string     Default: "kube-scheduler" -DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name +

DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name. This parameter is ignored if a config file is specified in --config.

--lock-object-namespace string     Default: "kube-system" -DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace. +

DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace. This parameter is ignored if a config file is specified in --config.

---log-backtrace-at traceLocation     Default: :0 +--log-flush-frequency duration     Default: 5s -when logging hits line file:N, emit a stack trace +

Maximum number of seconds between log flushes

---log-dir string +--log_backtrace_at <a string in the form 'file:N'>     Default: :0 -If non-empty, write log files in this directory +

when logging hits line file:N, emit a stack trace

---log-file string +--log_dir string -If non-empty, use this log file +

If non-empty, write log files in this directory

---log-file-max-size uint     Default: 1800 +--log_file string -Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. +

If non-empty, use this log file

---log-flush-frequency duration     Default: 5s +--log_file_max_size uint     Default: 1800 -Maximum number of seconds between log flushes +

Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited.

--logging-format string     Default: "text" -Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning. +

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

--logtostderr     Default: true -log to standard error instead of files +

log to standard error instead of files

--master string -The address of the Kubernetes API server (overrides any value in kubeconfig) +

The address of the Kubernetes API server (overrides any value in kubeconfig)

+ + + +--one_output + + +

If true, only write logs to their native severity level (vs also writing to each lower severity level)

---one-output +--permit-address-sharing -If true, only write logs to their native severity level (vs also writing to each lower severity level +

If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]

--permit-port-sharing -If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] +

If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]

--policy-config-file string -DEPRECATED: file with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config=true. Note: The scheduler will fail if this is combined with Plugin configs +

DEPRECATED: file with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config=true. Note: The scheduler will fail if this is combined with Plugin configs

--policy-configmap string -DEPRECATED: name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config=false. The config must be provided as the value of an element in 'Data' map with the key='policy.cfg'. Note: The scheduler will fail if this is combined with Plugin configs +

DEPRECATED: name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config=false. The config must be provided as the value of an element in 'Data' map with the key='policy.cfg'. Note: The scheduler will fail if this is combined with Plugin configs

--policy-configmap-namespace string     Default: "kube-system" -DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty. Note: The scheduler will fail if this is combined with Plugin configs +

DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty. Note: The scheduler will fail if this is combined with Plugin configs

--port int     Default: 10251 -DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead. +

DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead. This parameter is ignored if a config file is specified in --config.

--profiling     Default: true -DEPRECATED: enable profiling via web interface host:port/debug/pprof/ +

DEPRECATED: enable profiling via web interface host:port/debug/pprof/. This parameter is ignored if a config file is specified in --config.

---requestheader-allowed-names stringSlice +--requestheader-allowed-names strings -List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. +

List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.

--requestheader-client-ca-file string -Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. +

Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests.

---requestheader-extra-headers-prefix stringSlice     Default: [x-remote-extra-] +--requestheader-extra-headers-prefix strings     Default: "x-remote-extra-" -List of request header prefixes to inspect. X-Remote-Extra- is suggested. +

List of request header prefixes to inspect. X-Remote-Extra- is suggested.

---requestheader-group-headers stringSlice     Default: [x-remote-group] +--requestheader-group-headers strings     Default: "x-remote-group" -List of request headers to inspect for groups. X-Remote-Group is suggested. +

List of request headers to inspect for groups. X-Remote-Group is suggested.

---requestheader-username-headers stringSlice     Default: [x-remote-user] +--requestheader-username-headers strings     Default: "x-remote-user" -List of request headers to inspect for usernames. X-Remote-User is common. +

List of request headers to inspect for usernames. X-Remote-User is common.

--scheduler-name string     Default: "default-scheduler" -DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's "spec.schedulerName". +

DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's "spec.schedulerName". This parameter is ignored if a config file is specified in --config.

--secure-port int     Default: 10259 -The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. +

The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all.

--show-hidden-metrics-for-version string -The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. +

The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that.

---skip-headers +--skip_headers -If true, avoid header prefixes in the log messages +

If true, avoid header prefixes in the log messages

---skip-log-headers +--skip_log_headers -If true, avoid headers when opening log files +

If true, avoid headers when opening log files

---stderrthreshold severity     Default: 2 +--stderrthreshold int     Default: 2 -logs at or above this threshold go to stderr +

logs at or above this threshold go to stderr

--tls-cert-file string -File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. +

File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.

---tls-cipher-suites stringSlice +--tls-cipher-suites strings -Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. +

Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.

--tls-min-version string -Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 +

Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13

--tls-private-key-file string -File containing the default x509 private key matching --tls-cert-file. +

File containing the default x509 private key matching --tls-cert-file.

---tls-sni-cert-key namedCertKey     Default: [] +--tls-sni-cert-key string -A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". +

A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com".

--use-legacy-policy-config -DEPRECATED: when set to true, scheduler will ignore policy ConfigMap and uses policy config file. Note: The scheduler will fail if this is combined with Plugin configs +

DEPRECATED: when set to true, scheduler will ignore policy ConfigMap and uses policy config file. Note: The scheduler will fail if this is combined with Plugin configs

--v, --v Level +-v, --v int -number for the log level verbosity +

number for the log level verbosity

--version version[=true] -Print version information and quit +

Print version information and quit

---vmodule moduleSpec +--vmodule <comma-separated 'pattern=N' settings> -comma-separated list of pattern=N settings for file-filtered logging +

comma-separated list of pattern=N settings for file-filtered logging

--write-config-to string -If set, write the configuration values to this file and exit. +

If set, write the configuration values to this file and exit.

diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet-config.v1beta1.md b/content/en/docs/reference/command-line-tools-reference/kubelet-config.v1beta1.md new file mode 100644 index 0000000000000..a5e28a1043018 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/kubelet-config.v1beta1.md @@ -0,0 +1,1501 @@ + + + + + +--- +title: kubelet.config.k8s.io/v1beta1 +content_type: tool-reference +--- + + +## Resource Types + + + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +- [SerializedNodeConfigSource](#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource) + + + + + +### `KubeletConfiguration` {#kubelet-config-k8s-io-v1beta1-KubeletConfiguration} + + + + + +KubeletConfiguration contains the configuration for the Kubelet + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
KubeletConfiguration
enableServer*
+bool +
enableServer enables Kubelet's secured server. +Note: Kubelet's insecure port is controlled by the readOnlyPort option. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: true
staticPodPath
+string +
staticPodPath is the path to the directory containing local (static) pods to +run, or the path to a single static pod file. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +the set of static pods specified at the new path may be different than the +ones the Kubelet initially started with, and this may disrupt your node. +Default: ""
syncFrequency
+meta/v1.Duration +
syncFrequency is the max period between synchronizing running +containers and config. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening this duration may have a negative performance impact, especially +as the number of Pods on the node increases. Alternatively, increasing this +duration will result in longer refresh times for ConfigMaps and Secrets. +Default: "1m"
fileCheckFrequency
+meta/v1.Duration +
fileCheckFrequency is the duration between checking config files for +new data +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the duration will cause the Kubelet to reload local Static Pod +configurations more frequently, which may have a negative performance impact. +Default: "20s"
httpCheckFrequency
+meta/v1.Duration +
httpCheckFrequency is the duration between checking http for new data +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the duration will cause the Kubelet to poll staticPodURL more +frequently, which may have a negative performance impact. +Default: "20s"
staticPodURL
+string +
staticPodURL is the URL for accessing static pods to run +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +the set of static pods specified at the new URL may be different than the +ones the Kubelet initially started with, and this may disrupt your node. +Default: ""
staticPodURLHeader
+map[string][]string +
staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt the ability to read the latest set of static pods from StaticPodURL. +Default: nil
address
+string +
address is the IP address for the Kubelet to serve on (set to 0.0.0.0 +for all interfaces). +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: "0.0.0.0"
port
+int32 +
port is the port for the Kubelet to serve on. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: 10250
readOnlyPort
+int32 +
readOnlyPort is the read-only port for the Kubelet to serve on with +no authentication/authorization. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: 0 (disabled)
tlsCertFile
+string +
tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, +if any, concatenated after server cert). If tlsCertFile and +tlsPrivateKeyFile are not provided, a self-signed certificate +and key are generated for the public address and saved to the directory +passed to the Kubelet's --cert-dir flag. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: ""
tlsPrivateKeyFile
+string +
tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: ""
tlsCipherSuites
+[]string +
TLSCipherSuites is the list of allowed cipher suites for the server. +Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: nil
tlsMinVersion
+string +
TLSMinVersion is the minimum TLS version supported. +Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: ""
rotateCertificates
+bool +
rotateCertificates enables client certificate rotation. The Kubelet will request a +new certificate from the certificates.k8s.io API. This requires an approver to approve the +certificate signing requests. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it may disrupt the Kubelet's ability to authenticate with the API server +after the current certificate expires. +Default: false
serverTLSBootstrap
+bool +
serverTLSBootstrap enables server certificate bootstrap. Instead of self +signing a serving certificate, the Kubelet will request a certificate from +the certificates.k8s.io API. This requires an approver to approve the +certificate signing requests. The RotateKubeletServerCertificate feature +must be enabled. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it will stop the renewal of Kubelet server certificates, which can +disrupt components that interact with the Kubelet server in the long term, +due to certificate expiration. +Default: false
authentication
+KubeletAuthentication +
authentication specifies how requests to the Kubelet's server are authenticated +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Defaults: + anonymous: + enabled: false + webhook: + enabled: true + cacheTTL: "2m"
authorization
+KubeletAuthorization +
authorization specifies how requests to the Kubelet's server are authorized +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Defaults: + mode: Webhook + webhook: + cacheAuthorizedTTL: "5m" + cacheUnauthorizedTTL: "30s"
registryPullQPS
+int32 +
registryPullQPS is the limit of registry pulls per second. +Set to 0 for no limit. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced +by image pulls. +Default: 5
registryBurst
+int32 +
registryBurst is the maximum size of bursty pulls, temporarily allows +pulls to burst to this number, while still not exceeding registryPullQPS. +Only used if registryPullQPS > 0. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced +by image pulls. +Default: 10
eventRecordQPS
+int32 +
eventRecordQPS is the maximum event creations per second. If 0, there +is no limit enforced. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced by +event creations. +Default: 5
eventBurst
+int32 +
eventBurst is the maximum size of a burst of event creations, temporarily +allows event creations to burst to this number, while still not exceeding +eventRecordQPS. Only used if eventRecordQPS > 0. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced by +event creations. +Default: 10
enableDebuggingHandlers
+bool +
enableDebuggingHandlers enables server endpoints for log access +and local running of containers and commands, including the exec, +attach, logs, and portforward features. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it may disrupt components that interact with the Kubelet server. +Default: true
enableContentionProfiling
+bool +
enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +enabling it may carry a performance impact. +Default: false
healthzPort
+int32 +
healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that monitor Kubelet health. +Default: 10248
healthzBindAddress
+string +
healthzBindAddress is the IP address for the healthz server to serve on +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that monitor Kubelet health. +Default: "127.0.0.1"
oomScoreAdj
+int32 +
oomScoreAdj is The oom-score-adj value for kubelet process. Values +must be within the range [-1000, 1000]. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the stability of nodes under memory pressure. +Default: -999
clusterDomain
+string +
clusterDomain is the DNS domain for this cluster. If set, kubelet will +configure all containers to search this domain in addition to the +host's search domains. +Dynamic Kubelet Config (beta): Dynamically updating this field is not recommended, +as it should be kept in sync with the rest of the cluster. +Default: ""
clusterDNS
+[]string +
clusterDNS is a list of IP addresses for the cluster DNS server. If set, +kubelet will configure all containers to use this for DNS resolution +instead of the host's DNS servers. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes will only take effect on Pods created after the update. Draining +the node is recommended before changing this field. +Default: nil
streamingConnectionIdleTimeout
+meta/v1.Duration +
streamingConnectionIdleTimeout is the maximum time a streaming connection +can be idle before the connection is automatically closed. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact components that rely on infrequent updates over streaming +connections to the Kubelet server. +Default: "4h"
nodeStatusUpdateFrequency
+meta/v1.Duration +
nodeStatusUpdateFrequency is the frequency that kubelet computes node +status. If node lease feature is not enabled, it is also the frequency that +kubelet posts node status to master. +Note: When node lease feature is not enabled, be cautious when changing the +constant, it must work with nodeMonitorGracePeriod in nodecontroller. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact node scalability, and also that the node controller's +nodeMonitorGracePeriod must be set to N∗NodeStatusUpdateFrequency, +where N is the number of retries before the node controller marks +the node unhealthy. +Default: "10s"
nodeStatusReportFrequency
+meta/v1.Duration +
nodeStatusReportFrequency is the frequency that kubelet posts node +status to master if node status does not change. Kubelet will ignore this +frequency and post node status immediately if any change is detected. It is +only used when node lease feature is enabled. nodeStatusReportFrequency's +default value is 1m. But if nodeStatusUpdateFrequency is set explicitly, +nodeStatusReportFrequency's default value will be set to +nodeStatusUpdateFrequency for backward compatibility. +Default: "1m"
nodeLeaseDurationSeconds
+int32 +
nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease, +when the NodeLease feature is enabled. This feature provides an indicator of node +health by having the Kubelet create and periodically renew a lease, named after the node, +in the kube-node-lease namespace. If the lease expires, the node can be considered unhealthy. +The lease is currently renewed every 10s, per KEP-0009. In the future, the lease renewal interval +may be set based on the lease duration. +Requires the NodeLease feature gate to be enabled. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +decreasing the duration may reduce tolerance for issues that temporarily prevent +the Kubelet from renewing the lease (e.g. a short-lived network issue). +Default: 40
imageMinimumGCAge
+meta/v1.Duration +
imageMinimumGCAge is the minimum age for an unused image before it is +garbage collected. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay garbage collection, and may change the image overhead +on the node. +Default: "2m"
imageGCHighThresholdPercent
+int32 +
imageGCHighThresholdPercent is the percent of disk usage after which +image garbage collection is always run. The percent is calculated as +this field value out of 100. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay garbage collection, and may change the image overhead +on the node. +Default: 85
imageGCLowThresholdPercent
+int32 +
imageGCLowThresholdPercent is the percent of disk usage before which +image garbage collection is never run. Lowest disk usage to garbage +collect to. The percent is calculated as this field value out of 100. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay garbage collection, and may change the image overhead +on the node. +Default: 80
volumeStatsAggPeriod
+meta/v1.Duration +
How frequently to calculate and cache volume disk usage for all pods +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the period may carry a performance impact. +Default: "1m"
kubeletCgroups
+string +
kubeletCgroups is the absolute name of cgroups to isolate the kubelet in +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
systemCgroups
+string +
systemCgroups is absolute name of cgroups in which to place +all non-kernel processes that are not already in a container. Empty +for no container. Rolling back the flag requires a reboot. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
cgroupRoot
+string +
cgroupRoot is the root cgroup to use for pods. This is handled by the +container runtime on a best effort basis. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
cgroupsPerQOS
+bool +
Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes +And all Burstable and BestEffort pods are brought up under their +specific top level QoS cgroup. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: true
cgroupDriver
+string +
driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "cgroupfs"
cpuManagerPolicy
+string +
CPUManagerPolicy is the name of the policy to use. +Requires the CPUManager feature gate to be enabled. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "none"
cpuManagerReconcilePeriod
+meta/v1.Duration +
CPU Manager reconciliation period. +Requires the CPUManager feature gate to be enabled. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the period may carry a performance impact. +Default: "10s"
topologyManagerPolicy
+string +
TopologyManagerPolicy is the name of the policy to use. +Policies other than "none" require the TopologyManager feature gate to be enabled. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "none"
topologyManagerScope
+string +
TopologyManagerScope represents the scope of topology hint generation +that topology manager requests and hint providers generate. +"pod" scope requires the TopologyManager feature gate to be enabled. +Default: "container"
qosReserved
+map[string]string +
qosReserved is a set of resource name to percentage pairs that specify +the minimum percentage of a resource reserved for exclusive use by the +guaranteed QoS tier. +Currently supported resources: "memory" +Requires the QOSReserved feature gate to be enabled. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: nil
runtimeRequestTimeout
+meta/v1.Duration +
runtimeRequestTimeout is the timeout for all runtime requests except long running +requests - pull, logs, exec and attach. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: "2m"
hairpinMode
+string +
hairpinMode specifies how the Kubelet should configure the container +bridge for hairpin packets. +Setting this flag allows endpoints in a Service to loadbalance back to +themselves if they should try to access their own Service. Values: + "promiscuous-bridge": make the container bridge promiscuous. + "hairpin-veth": set the hairpin flag on container veth interfaces. + "none": do nothing. +Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT, +because promiscuous-bridge assumes the existence of a container bridge named cbr0. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may require a node reboot, depending on the network plugin. +Default: "promiscuous-bridge"
maxPods
+int32 +
maxPods is the number of pods that can run on this Kubelet. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes may cause Pods to fail admission on Kubelet restart, and may change +the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting +future scheduling decisions. Increasing this value may also decrease performance, +as more Pods can be packed into a single node. +Default: 110
podCIDR
+string +
The CIDR to use for pod IP addresses, only used in standalone mode. +In cluster mode, this is obtained from the master. +Dynamic Kubelet Config (beta): This field should always be set to the empty default. +It should only set for standalone Kubelets, which cannot use Dynamic Kubelet Config. +Default: ""
podPidsLimit
+int64 +
PodPidsLimit is the maximum number of pids in any pod. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it may prevent container processes from forking after the change. +Default: -1
resolvConf
+string +
ResolverConfig is the resolver configuration file used as the basis +for the container DNS resolution configuration. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes will only take effect on Pods created after the update. Draining +the node is recommended before changing this field. +Default: "/etc/resolv.conf"
runOnce
+bool +
RunOnce causes the Kubelet to check the API server once for pods, +run those in addition to the pods specified by static pod files, and exit. +Default: false
cpuCFSQuota
+bool +
cpuCFSQuota enables CPU CFS quota enforcement for containers that +specify CPU limits. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it may reduce node stability. +Default: true
cpuCFSQuotaPeriod
+meta/v1.Duration +
CPUCFSQuotaPeriod is the CPU CFS quota period value, cpu.cfs_period_us. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +limits set for containers will result in different cpu.cfs_quota settings. This +will trigger container restarts on the node being reconfigured. +Default: "100ms"
nodeStatusMaxImages
+int32 +
nodeStatusMaxImages caps the number of images reported in Node.Status.Images. +Note: If -1 is specified, no cap will be applied. If 0 is specified, no image is returned. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +different values can be reported on node status. +Default: 50
maxOpenFiles
+int64 +
maxOpenFiles is Number of files that can be opened by Kubelet process. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the ability of the Kubelet to interact with the node's filesystem. +Default: 1000000
contentType
+string +
contentType is contentType of requests sent to apiserver. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the ability for the Kubelet to communicate with the API server. +If the Kubelet loses contact with the API server due to a change to this field, +the change cannot be reverted via dynamic Kubelet config. +Default: "application/vnd.kubernetes.protobuf"
kubeAPIQPS
+int32 +
kubeAPIQPS is the QPS to use while talking with kubernetes apiserver +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic the Kubelet +sends to the API server. +Default: 5
kubeAPIBurst
+int32 +
kubeAPIBurst is the burst to allow while talking with kubernetes apiserver +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic the Kubelet +sends to the API server. +Default: 10
serializeImagePulls
+bool +
serializeImagePulls when enabled, tells the Kubelet to pull images one +at a time. We recommend ∗not∗ changing the default value on nodes that +run docker daemon with version < 1.9 or an Aufs storage backend. +Issue #10959 has more details. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the performance of image pulls. +Default: true
evictionHard
+map[string]string +
Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. +To explicitly disable, pass a 0% or 100% threshold on an arbitrary resource. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay Pod evictions. +Default: + memory.available: "100Mi" + nodefs.available: "10%" + nodefs.inodesFree: "5%" + imagefs.available: "15%"
evictionSoft
+map[string]string +
Map of signal names to quantities that defines soft eviction thresholds. +For example: {"memory.available": "300Mi"}. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay Pod evictions, and may change the allocatable reported +by the node. +Default: nil
evictionSoftGracePeriod
+map[string]string +
Map of signal names to quantities that defines grace periods for each soft eviction signal. +For example: {"memory.available": "30s"}. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay Pod evictions. +Default: nil
evictionPressureTransitionPeriod
+meta/v1.Duration +
Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it may decrease the stability of the node when the node is overcommitted. +Default: "5m"
evictionMaxPodGracePeriod
+int32 +
Maximum allowed grace period (in seconds) to use when terminating pods in +response to a soft eviction threshold being met. This value effectively caps +the Pod's TerminationGracePeriodSeconds value during soft evictions. +Note: Due to issue #64530, the behavior has a bug where this value currently just +overrides the grace period during soft eviction, which can increase the grace +period from what is set on the Pod. This bug will be fixed in a future release. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it decreases the amount of time Pods will have to gracefully clean +up before being killed during a soft eviction. +Default: 0
evictionMinimumReclaim
+map[string]string +
Map of signal names to quantities that defines minimum reclaims, which describe the minimum +amount of a given resource the kubelet will reclaim when performing a pod eviction while +that resource is under pressure. For example: {"imagefs.available": "2Gi"} +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may change how well eviction can manage resource pressure. +Default: nil
podsPerCore
+int32 +
podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods. +If 0, this field is ignored. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes may cause Pods to fail admission on Kubelet restart, and may change +the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting +future scheduling decisions. Increasing this value may also decrease performance, +as more Pods can be packed into a single node. +Default: 0
enableControllerAttachDetach
+bool +
enableControllerAttachDetach enables the Attach/Detach controller to +manage attachment/detachment of volumes scheduled to this node, and +disables kubelet from executing any attach/detach operations +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changing which component is responsible for volume management on a live node +may result in volumes refusing to detach if the node is not drained prior to +the update, and if Pods are scheduled to the node before the +volumes.kubernetes.io/controller-managed-attach-detach annotation is updated by the +Kubelet. In general, it is safest to leave this value set the same as local config. +Default: true
protectKernelDefaults
+bool +
protectKernelDefaults, if true, causes the Kubelet to error if kernel +flags are not as it expects. Otherwise the Kubelet will attempt to modify +kernel flags to match its expectation. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +enabling it may cause the Kubelet to crash-loop if the Kernel is not configured as +Kubelet expects. +Default: false
makeIPTablesUtilChains
+bool +
If true, Kubelet ensures a set of iptables rules are present on host. +These rules will serve as utility rules for various components, e.g. KubeProxy. +The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it will prevent the Kubelet from healing locally misconfigured iptables rules. +Default: true
iptablesMasqueradeBit
+int32 +
iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT +Values must be within the range [0, 31]. Must be different from other mark bits. +Warning: Please match the value of the corresponding parameter in kube-proxy. +TODO: clean up IPTablesMasqueradeBit in kube-proxy +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it needs to be coordinated with other components, like kube-proxy, and the update +will only be effective if MakeIPTablesUtilChains is enabled. +Default: 14
iptablesDropBit
+int32 +
iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. +Values must be within the range [0, 31]. Must be different from other mark bits. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it needs to be coordinated with other components, like kube-proxy, and the update +will only be effective if MakeIPTablesUtilChains is enabled. +Default: 15
featureGates
+map[string]bool +
featureGates is a map of feature names to bools that enable or disable alpha/experimental +features. This field modifies piecemeal the built-in default values from +"k8s.io/kubernetes/pkg/features/kube_features.go". +Dynamic Kubelet Config (beta): If dynamically updating this field, consider the +documentation for the features you are enabling or disabling. While we +encourage feature developers to make it possible to dynamically enable +and disable features, some changes may require node reboots, and some +features may require careful coordination to retroactively disable. +Default: nil
failSwapOn
+bool +
failSwapOn tells the Kubelet to fail to start if swap is enabled on the node. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +setting it to true will cause the Kubelet to crash-loop if swap is enabled. +Default: true
containerLogMaxSize
+string +
A quantity defines the maximum size of the container log file before it is rotated. +For example: "5Mi" or "256Ki". +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger log rotation. +Default: "10Mi"
containerLogMaxFiles
+int32 +
Maximum number of container log files that can be present for a container. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it may cause log files to be deleted. +Default: 5
configMapAndSecretChangeDetectionStrategy
+ResourceChangeDetectionStrategy +
ConfigMapAndSecretChangeDetectionStrategy is a mode in which +config map and secret managers are running. +Default: "Watch"
systemReserved
+map[string]string +
systemReserved is a set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) +pairs that describe resources reserved for non-kubernetes components. +Currently only cpu and memory are supported. +See http://kubernetes.io/docs/user-guide/compute-resources for more detail. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may not be possible to increase the reserved resources, because this +requires resizing cgroups. Always look for a NodeAllocatableEnforced event +after updating this field to ensure that the update was successful. +Default: nil
kubeReserved
+map[string]string +
A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs +that describe resources reserved for kubernetes system components. +Currently cpu, memory and local storage for root file system are supported. +See http://kubernetes.io/docs/user-guide/compute-resources for more detail. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may not be possible to increase the reserved resources, because this +requires resizing cgroups. Always look for a NodeAllocatableEnforced event +after updating this field to ensure that the update was successful. +Default: nil
reservedSystemCPUs*
+string +
This ReservedSystemCPUs option specifies the cpu list reserved for the host level system threads and kubernetes related threads. +This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved. +This option overwrites CPUs provided by system-reserved and kube-reserved.
showHiddenMetricsForVersion
+string +
The previous version for which you want to show hidden metrics. +Only the previous minor version is meaningful, other values will not be allowed. +The format is ., e.g.: '1.16'. +The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, +rather than being surprised when they are permanently removed in the release after that. +Default: ""
systemReservedCgroup
+string +
This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
kubeReservedCgroup
+string +
This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
enforceNodeAllocatable
+[]string +
This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. +This flag accepts a list of options. Acceptable options are `none`, `pods`, `system-reserved` & `kube-reserved`. +If `none` is specified, no other options may be specified. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +removing enforcements may reduce the stability of the node. Alternatively, adding +enforcements may reduce the stability of components which were using more than +the reserved amount of resources; for example, enforcing kube-reserved may cause +Kubelets to OOM if it uses more than the reserved resources, and enforcing system-reserved +may cause system daemons to OOM if they use more than the reserved resources. +Default: ["pods"]
allowedUnsafeSysctls
+[]string +
A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in ∗). +Unsafe sysctl groups are kernel.shm∗, kernel.msg∗, kernel.sem, fs.mqueue.∗, and net.∗. +These sysctls are namespaced but not allowed by default. For example: "kernel.msg∗,net.ipv4.route.min_pmtu" +Default: []
volumePluginDir
+string +
volumePluginDir is the full path of the directory in which to search +for additional third party volume plugins. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that changing +the volumePluginDir may disrupt workloads relying on third party volume plugins. +Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
providerID
+string +
providerID, if set, sets the unique id of the instance that an external provider (i.e. cloudprovider) +can use to identify a specific node. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the ability of the Kubelet to interact with cloud providers. +Default: ""
kernelMemcgNotification
+bool +
kernelMemcgNotification, if set, the kubelet will integrate with the kernel memcg notification +to determine if memory eviction thresholds are crossed rather than polling. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the way Kubelet interacts with the kernel. +Default: false
logging*
+LoggingConfiguration +
Logging specifies the options of logging. +Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. +Defaults: + Format: text
enableSystemLogHandler
+bool +
enableSystemLogHandler enables system logs via web interface host:port/logs/ +Default: true
shutdownGracePeriod
+meta/v1.Duration +
ShutdownGracePeriod specifies the total duration that the node should delay the shutdown and total grace period for pod termination during a node shutdown. +Default: "30s"
shutdownGracePeriodCriticalPods
+meta/v1.Duration +
ShutdownGracePeriodCriticalPods specifies the duration used to terminate critical pods during a node shutdown. This should be less than ShutdownGracePeriod. +For example, if ShutdownGracePeriod=30s, and ShutdownGracePeriodCriticalPods=10s, during a node shutdown the first 20 seconds would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating critical pods. +Default: "10s"
+ + + +### `SerializedNodeConfigSource` {#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource} + + + + + +SerializedNodeConfigSource allows us to serialize v1.NodeConfigSource. +This type is used internally by the Kubelet for tracking checkpointed dynamic configs. +It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
SerializedNodeConfigSource
source
+core/v1.NodeConfigSource +
Source is the source that we are serializing
+ + + +### `HairpinMode` {#kubelet-config-k8s-io-v1beta1-HairpinMode} + +(Alias of `string`) + + + +HairpinMode denotes how the kubelet should configure networking to handle +hairpin packets. + + + + + +### `KubeletAnonymousAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAnonymousAuthentication} + + + + +**Appears in:** + +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) + + + + + + + + + + + + + + + + +
FieldDescription
enabled
+bool +
enabled allows anonymous requests to the kubelet server. +Requests that are not rejected by another authentication method are treated as anonymous requests. +Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated.
+ + + +### `KubeletAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAuthentication} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
x509
+KubeletX509Authentication +
x509 contains settings related to x509 client certificate authentication
webhook
+KubeletWebhookAuthentication +
webhook contains settings related to webhook bearer token authentication
anonymous
+KubeletAnonymousAuthentication +
anonymous contains settings related to anonymous authentication
+ + + +### `KubeletAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorization} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
mode
+KubeletAuthorizationMode +
mode is the authorization mode to apply to requests to the kubelet server. +Valid values are AlwaysAllow and Webhook. +Webhook mode uses the SubjectAccessReview API to determine authorization.
webhook
+KubeletWebhookAuthorization +
webhook contains settings related to Webhook authorization.
+ + + +### `KubeletAuthorizationMode` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorizationMode} + +(Alias of `string`) + + +**Appears in:** + +- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) + + + + + + + + +### `KubeletWebhookAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthentication} + + + + +**Appears in:** + +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
enabled
+bool +
enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API
cacheTTL
+meta/v1.Duration +
cacheTTL enables caching of authentication results
+ + + +### `KubeletWebhookAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthorization} + + + + +**Appears in:** + +- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cacheAuthorizedTTL
+meta/v1.Duration +
cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer.
cacheUnauthorizedTTL
+meta/v1.Duration +
cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer.
+ + + +### `KubeletX509Authentication` {#kubelet-config-k8s-io-v1beta1-KubeletX509Authentication} + + + + +**Appears in:** + +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) + + + + + + + + + + + + + + + + +
FieldDescription
clientCAFile
+string +
clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate +signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, +and groups corresponding to the Organization in the client certificate.
+ + + +### `ResourceChangeDetectionStrategy` {#kubelet-config-k8s-io-v1beta1-ResourceChangeDetectionStrategy} + +(Alias of `string`) + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +ResourceChangeDetectionStrategy denotes a mode in which internal +managers (secret, configmap) are discovering object changes. + + + + + + + +### `LoggingConfiguration` {#LoggingConfiguration} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +LoggingConfiguration contains logging options +Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. + + + + + + + + + + + + + + + + + + +
FieldDescription
format*
+string +
Format Flag specifies the structure of log messages. +default value of format is `text`
sanitization*
+bool +
[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). +Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)
diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md index 89ad711a56bec..1b1142913f909 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md @@ -24,11 +24,10 @@ found [here](https://github.com/kubernetes/kubernetes/pull/20439). This document describes the process of node initialization, how to set up TLS client certificate bootstrapping for kubelets, and how it works. - - ## Initialization Process + When a worker node starts up, the kubelet does the following: 1. Look for its `kubeconfig` file @@ -54,6 +53,7 @@ The TLS Bootstrapping described in this document is intended to simplify, and pa a cluster. ### Bootstrap Initialization + In the bootstrap initialization process, the following occurs: 1. kubelet begins @@ -77,6 +77,7 @@ In the bootstrap initialization process, the following occurs: The rest of this document describes the necessary steps to configure TLS Bootstrapping, and its limitations. ## Configuration + To configure for TLS bootstrapping and optional automatic approval, you must configure options on the following components: * kube-apiserver @@ -87,6 +88,7 @@ To configure for TLS bootstrapping and optional automatic approval, you must con In addition, you need your Kubernetes Certificate Authority (CA). ## Certificate Authority + As without bootstrapping, you will need a Certificate Authority (CA) key and certificate. As without bootstrapping, these will be used to sign the kubelet certificate. As before, it is your responsibility to distribute them to master nodes. @@ -96,6 +98,7 @@ We will refer to these as "Kubernetes CA certificate and key". All Kubernetes components that use these certificates - kubelet, kube-apiserver, kube-controller-manager - assume the key and certificate to be PEM-encoded. ## kube-apiserver configuration + The kube-apiserver has several requirements to enable TLS bootstrapping: * Recognizing CA that signs the client certificate @@ -103,6 +106,7 @@ The kube-apiserver has several requirements to enable TLS bootstrapping: * Authorize the bootstrapping kubelet to create a certificate signing request (CSR) ### Recognizing client certificates + This is normal for all client certificate authentication. If not already set, add the `--client-ca-file=FILENAME` flag to the kube-apiserver command to enable client certificate authentication, referencing a certificate authority bundle @@ -110,6 +114,7 @@ containing the signing certificate, for example `--client-ca-file=/var/lib/kubernetes/ca.pem`. ### Initial bootstrap authentication + In order for the bootstrapping kubelet to connect to kube-apiserver and request a certificate, it must first authenticate to the server. You can use any [authenticator](/docs/reference/access-authn-authz/authentication/) that can authenticate the kubelet. @@ -132,13 +137,13 @@ A kubelet authenticating using bootstrap tokens is authenticated as a user in th As this feature matures, you should ensure tokens are bound to a Role Based Access Control (RBAC) policy -which limits requests (using the [bootstrap -token](/docs/reference/access-authn-authz/bootstrap-tokens/)) strictly to client +which limits requests (using the [bootstrap token](/docs/reference/access-authn-authz/bootstrap-tokens/)) strictly to client requests related to certificate provisioning. With RBAC in place, scoping the tokens to a group allows for great flexibility. For example, you could disable a particular bootstrap group's access when you are done provisioning the nodes. #### Bootstrap tokens + Bootstrap tokens are described in detail [here](/docs/reference/access-authn-authz/bootstrap-tokens/). These are tokens that are stored as secrets in the Kubernetes cluster, and then issued to the individual kubelet. You can use a single token for an entire cluster, or issue one per worker node. @@ -148,7 +153,7 @@ The process is two-fold: 2. Issue the token to the kubelet From the kubelet's perspective, one token is like another and has no special meaning. -From the kube-apiserver's perspective, however, the bootstrap token is special. Due to its `Type`, `namespace` and `name`, kube-apiserver recognizes it as a special token, +From the kube-apiserver's perspective, however, the bootstrap token is special. Due to its `type`, `namespace` and `name`, kube-apiserver recognizes it as a special token, and grants anyone authenticating with that token special bootstrap rights, notably treating them as a member of the `system:bootstrappers` group. This fulfills a basic requirement for TLS bootstrapping. @@ -156,17 +161,18 @@ The details for creating the secret are available [here](/docs/reference/access- If you want to use bootstrap tokens, you must enable it on kube-apiserver with the flag: -``` +```console --enable-bootstrap-token-auth=true ``` #### Token authentication file + kube-apiserver has an ability to accept tokens as authentication. These tokens are arbitrary but should represent at least 128 bits of entropy derived from a secure random number generator (such as `/dev/urandom` on most modern Linux systems). There are multiple ways you can generate a token. For example: -``` +```shell head -c 16 /dev/urandom | od -An -t x | tr -d ' ' ``` @@ -175,7 +181,7 @@ will generate tokens that look like `02b50b05283e98dd0fd71db496ef01e8`. The token file should look like the following example, where the first three values can be anything and the quoted group name should be as depicted: -``` +```console 02b50b05283e98dd0fd71db496ef01e8,kubelet-bootstrap,10001,"system:bootstrappers" ``` @@ -185,11 +191,16 @@ systemd unit file perhaps) to enable the token file. See docs further details. ### Authorize kubelet to create CSR -Now that the bootstrapping node is _authenticated_ as part of the `system:bootstrappers` group, it needs to be _authorized_ to create a certificate signing request (CSR) as well as retrieve it when done. Fortunately, Kubernetes ships with a `ClusterRole` with precisely these (and just these) permissions, `system:node-bootstrapper`. -To do this, you just need to create a `ClusterRoleBinding` that binds the `system:bootstrappers` group to the cluster role `system:node-bootstrapper`. +Now that the bootstrapping node is _authenticated_ as part of the +`system:bootstrappers` group, it needs to be _authorized_ to create a +certificate signing request (CSR) as well as retrieve it when done. +Fortunately, Kubernetes ships with a `ClusterRole` with precisely these (and +only these) permissions, `system:node-bootstrapper`. -``` +To do this, you only need to create a `ClusterRoleBinding` that binds the `system:bootstrappers` group to the cluster role `system:node-bootstrapper`. + +```yaml # enable bootstrapping nodes to create CSR apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -206,6 +217,7 @@ roleRef: ``` ## kube-controller-manager configuration + While the apiserver receives the requests for certificates from the kubelet and authenticates those requests, the controller-manager is responsible for issuing actual signed certificates. @@ -221,6 +233,7 @@ In order for the controller-manager to sign certificates, it needs the following * enabling CSR signing ### Access to key and certificate + As described earlier, you need to create a Kubernetes CA key and certificate, and distribute it to the master nodes. These will be used by the controller-manager to sign the kubelet certificates. @@ -230,23 +243,24 @@ with the flag `--client-ca-file=FILENAME` (for example, `--client-ca-file=/var/l To provide the Kubernetes CA key and certificate to kube-controller-manager, use the following flags: -``` +```shell --cluster-signing-cert-file="/etc/path/to/kubernetes/ca/ca.crt" --cluster-signing-key-file="/etc/path/to/kubernetes/ca/ca.key" ``` for example: -``` +```shell --cluster-signing-cert-file="/var/lib/kubernetes/ca.pem" --cluster-signing-key-file="/var/lib/kubernetes/ca-key.pem" ``` The validity duration of signed certificates can be configured with flag: -``` +```shell --cluster-signing-duration ``` ### Approval + In order to approve CSRs, you need to tell the controller-manager that it is acceptable to approve them. This is done by granting RBAC permissions to the correct group. @@ -257,7 +271,7 @@ There are two distinct sets of permissions: To enable the kubelet to request and receive a new certificate, create a `ClusterRoleBinding` that binds the group in which the bootstrapping node is a member `system:bootstrappers` to the `ClusterRole` that grants it permission, `system:certificates.k8s.io:certificatesigningrequests:nodeclient`: -```yml +```yaml # Approve all CSRs for the group "system:bootstrappers" apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -276,7 +290,7 @@ roleRef: To enable the kubelet to renew its own client certificate, create a `ClusterRoleBinding` that binds the group in which the fully functioning node is a member `system:nodes` to the `ClusterRole` that grants it permission, `system:certificates.k8s.io:certificatesigningrequests:selfnodeclient`: -```yml +```yaml # Approve renewal CSRs for the group "system:nodes" apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -294,8 +308,8 @@ roleRef: The `csrapproving` controller that ships as part of [kube-controller-manager](/docs/admin/kube-controller-manager/) and is enabled -by default. The controller uses the [`SubjectAccessReview` -API](/docs/reference/access-authn-authz/authorization/#checking-api-access) to +by default. The controller uses the +[`SubjectAccessReview` API](/docs/reference/access-authn-authz/authorization/#checking-api-access) to determine if a given user is authorized to request a CSR, then approves based on the authorization outcome. To prevent conflicts with other approvers, the builtin approver doesn't explicitly deny CSRs. It only ignores unauthorized @@ -304,6 +318,7 @@ collection. ## kubelet configuration + Finally, with the master nodes properly set up and all of the necessary authentication and authorization in place, we can configure the kubelet. The kubelet requires the following configuration to bootstrap: @@ -317,7 +332,7 @@ The bootstrap `kubeconfig` should be in a path available to the kubelet, for exa Its format is identical to a normal `kubeconfig` file. A sample file might look as follows: -```yml +```yaml apiVersion: v1 kind: Config clusters: @@ -345,7 +360,7 @@ The important elements to note are: * `token`: the token to use The format of the token does not matter, as long as it matches what kube-apiserver expects. In the above example, we used a bootstrap token. -As stated earlier, _any_ valid authentication method can be used, not just tokens. +As stated earlier, _any_ valid authentication method can be used, not only tokens. Because the bootstrap `kubeconfig` _is_ a standard `kubeconfig`, you can use `kubectl` to generate it. To create the above example file: @@ -371,6 +386,7 @@ specified by `--kubeconfig`. The certificate and key file will be placed in the directory specified by `--cert-dir`. ### Client and Serving Certificates + All of the above relate to kubelet _client_ certificates, specifically, the certificates a kubelet uses to authenticate to kube-apiserver. @@ -387,6 +403,7 @@ be used as serving certificates, or `server auth`. However, you _can_ enable its server certificate, at least partially, via certificate rotation. ### Certificate Rotation + Kubernetes v1.8 and higher kubelet implements __beta__ features for enabling rotation of its client and/or serving certificates. These can be enabled through the respective `RotateKubeletClientCertificate` and @@ -429,6 +446,7 @@ A deployment-specific approval process for kubelet serving certificates should t {{< /note >}} ## Other authenticating components + All of TLS bootstrapping described in this document relates to the kubelet. However, other components may need to communicate directly with kube-apiserver. Notable is kube-proxy, which is part of the Kubernetes control plane and runs on every node, but may also include other components such as monitoring or networking. diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index b569177dda5d0..66eb5785de302 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -224,14 +224,14 @@ kubelet [flags] --container-log-max-files int32     Default: 5 -<Warning: Beta feature> Set the maximum number of container log files that can be present for a container. The number must be ≥ 2. This flag can only be used with `--container-runtime=remote`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +Set the maximum number of container log files that can be present for a container. The number must be ≥ 2. This flag can only be used with `--container-runtime=remote`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) --container-log-max-size string     Default: `10Mi` -<Warning: Beta feature> Set the maximum size (e.g. 10Mi) of container log file before it is rotated. This flag can only be used with `--container-runtime=remote`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +Set the maximum size (e.g. 10Mi) of container log file before it is rotated. This flag can only be used with `--container-runtime=remote`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -298,13 +298,6 @@ kubelet [flags] The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The `DynamicKubeletConfig` feature gate must be enabled to pass this flag; this gate currently defaults to `true` because the feature is beta. - ---enable-cadvisor-json-endpoints     Default: `false` - - -Enable cAdvisor json `/spec` and `/stats/*` endpoints. (DEPRECATED: will be removed in a future version) - - --enable-controller-attach-detach     Default: `true` @@ -462,7 +455,6 @@ AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
-CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
@@ -917,7 +909,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
--pod-infra-container-image string     Default: `k8s.gcr.io/pause:3.2` -The image whose network/IPC namespaces containers in each pod will use. This docker-specific flag only works when container-runtime is set to `docker`. + Specified image will not be pruned by the image garbage collector. When container-runtime is set to `docker`, all containers in each pod will use the network/ipc namespaces from this image. Other CRI implementations have their own configuration to set this image. diff --git a/content/en/docs/reference/config-api/_index.md b/content/en/docs/reference/config-api/_index.md new file mode 100644 index 0000000000000..a053b77d60235 --- /dev/null +++ b/content/en/docs/reference/config-api/_index.md @@ -0,0 +1,5 @@ +--- +title: Configuration APIs +weight: 65 +--- + diff --git a/content/en/docs/reference/config-api/apiserver-audit.v1.md b/content/en/docs/reference/config-api/apiserver-audit.v1.md new file mode 100644 index 0000000000000..f0f36c2344f41 --- /dev/null +++ b/content/en/docs/reference/config-api/apiserver-audit.v1.md @@ -0,0 +1,620 @@ +--- +title: kube-apiserver Audit Configuration (v1) +content_type: tool-reference +package: audit.k8s.io/v1 +auto_generated: true +--- + + +## Resource Types + + +- [Event](#audit-k8s-io-v1-Event) +- [EventList](#audit-k8s-io-v1-EventList) +- [Policy](#audit-k8s-io-v1-Policy) +- [PolicyList](#audit-k8s-io-v1-PolicyList) + + + + +## `Event` {#audit-k8s-io-v1-Event} + + + + +**Appears in:** + +- [EventList](#audit-k8s-io-v1-EventList) + + +Event captures all the information that can be included in an API audit log. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
audit.k8s.io/v1
kind
string
Event
level [Required]
+Level +
+ AuditLevel at which event was generated
auditID [Required]
+k8s.io/apimachinery/pkg/types.UID +
+ Unique audit ID, generated for each request.
stage [Required]
+Stage +
+ Stage of the request handling when this event instance was generated.
requestURI [Required]
+string +
+ RequestURI is the request URI as sent by the client to a server.
verb [Required]
+string +
+ Verb is the kubernetes verb associated with the request. +For non-resource requests, this is the lower-cased HTTP method.
user [Required]
+authentication/v1.UserInfo +
+ Authenticated user information.
impersonatedUser
+authentication/v1.UserInfo +
+ Impersonated user information.
sourceIPs
+[]string +
+ Source IPs, from where the request originated and intermediate proxies.
userAgent
+string +
+ UserAgent records the user agent string reported by the client. +Note that the UserAgent is provided by the client, and must not be trusted.
objectRef
+ObjectReference +
+ Object reference this request is targeted at. +Does not apply for List-type requests, or non-resource requests.
responseStatus
+meta/v1.Status +
+ The response status, populated even when the ResponseObject is not a Status type. +For successful responses, this will only include the Code and StatusSuccess. +For non-status type error responses, this will be auto-populated with the error Message.
requestObject
+k8s.io/apimachinery/pkg/runtime.Unknown +
+ API object from the request, in JSON format. The RequestObject is recorded as-is in the request +(possibly re-encoded as JSON), prior to version conversion, defaulting, admission or +merging. It is an external versioned object type, and may not be a valid object on its own. +Omitted for non-resource requests. Only logged at Request Level and higher.
responseObject
+k8s.io/apimachinery/pkg/runtime.Unknown +
+ API object returned in the response, in JSON. The ResponseObject is recorded after conversion +to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged +at Response Level.
requestReceivedTimestamp
+meta/v1.MicroTime +
+ Time the request reached the apiserver.
stageTimestamp
+meta/v1.MicroTime +
+ Time the request reached current audit stage.
annotations
+map[string]string +
+ Annotations is an unstructured key value map stored with an audit event that may be set by +plugins invoked in the request serving chain, including authentication, authorization and +admission plugins. Note that these annotations are for the audit event, and do not correspond +to the metadata.annotations of the submitted object. Keys should uniquely identify the informing +component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values +should be short. Annotations are included in the Metadata level.
+ + + +## `EventList` {#audit-k8s-io-v1-EventList} + + + + + +EventList is a list of audit Events. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
audit.k8s.io/v1
kind
string
EventList
metadata
+meta/v1.ListMeta +
+ No description provided. +
items [Required]
+[]Event +
+ No description provided. +
+ + + +## `Policy` {#audit-k8s-io-v1-Policy} + + + + +**Appears in:** + +- [PolicyList](#audit-k8s-io-v1-PolicyList) + + +Policy defines the configuration of audit logging, and the rules for how different request +categories are logged. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
audit.k8s.io/v1
kind
string
Policy
metadata
+meta/v1.ObjectMeta +
+ ObjectMeta is included for interoperability with API infrastructure.Refer to the Kubernetes API documentation for the fields of the metadata field.
rules [Required]
+[]PolicyRule +
+ Rules specify the audit Level a request should be recorded at. +A request may match multiple rules, in which case the FIRST matching rule is used. +The default audit level is None, but can be overridden by a catch-all rule at the end of the list. +PolicyRules are strictly ordered.
omitStages
+[]Stage +
+ OmitStages is a list of stages for which no events are created. Note that this can also +be specified per rule in which case the union of both are omitted.
+ + + +## `PolicyList` {#audit-k8s-io-v1-PolicyList} + + + + + +PolicyList is a list of audit Policies. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
audit.k8s.io/v1
kind
string
PolicyList
metadata
+meta/v1.ListMeta +
+ No description provided. +
items [Required]
+[]Policy +
+ No description provided. +
+ + + +## `GroupResources` {#audit-k8s-io-v1-GroupResources} + + + + +**Appears in:** + +- [PolicyRule](#audit-k8s-io-v1-PolicyRule) + + +GroupResources represents resource kinds in an API group. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
group
+string +
+ Group is the name of the API group that contains the resources. +The empty string represents the core API group.
resources
+[]string +
+ Resources is a list of resources this rule applies to. + +For example: +'pods' matches pods. +'pods/log' matches the log subresource of pods. +'∗' matches all resources and their subresources. +'pods/∗' matches all subresources of pods. +'∗/scale' matches all scale subresources. + +If wildcard is present, the validation rule will ensure resources do not +overlap with each other. + +An empty list implies all resources and subresources in this API groups apply.
resourceNames
+[]string +
+ ResourceNames is a list of resource instance names that the policy matches. +Using this field requires Resources to be specified. +An empty list implies that every instance of the resource is matched.
+ + + +## `Level` {#audit-k8s-io-v1-Level} + +(Alias of `string`) + + +**Appears in:** + +- [Event](#audit-k8s-io-v1-Event) + +- [PolicyRule](#audit-k8s-io-v1-PolicyRule) + + +Level defines the amount of information logged during auditing + + + + + +## `ObjectReference` {#audit-k8s-io-v1-ObjectReference} + + + + +**Appears in:** + +- [Event](#audit-k8s-io-v1-Event) + + +ObjectReference contains enough information to let you inspect or modify the referred object. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
resource
+string +
+ No description provided. +
namespace
+string +
+ No description provided. +
name
+string +
+ No description provided. +
uid
+k8s.io/apimachinery/pkg/types.UID +
+ No description provided. +
apiGroup
+string +
+ APIGroup is the name of the API group that contains the referred object. +The empty string represents the core API group.
apiVersion
+string +
+ APIVersion is the version of the API group that contains the referred object.
resourceVersion
+string +
+ No description provided. +
subresource
+string +
+ No description provided. +
+ + + +## `PolicyRule` {#audit-k8s-io-v1-PolicyRule} + + + + +**Appears in:** + +- [Policy](#audit-k8s-io-v1-Policy) + + +PolicyRule maps requests based off metadata to an audit Level. +Requests must match the rules of every field (an intersection of rules). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
level [Required]
+Level +
+ The Level that requests matching this rule are recorded at.
users
+[]string +
+ The users (by authenticated user name) this rule applies to. +An empty list implies every user.
userGroups
+[]string +
+ The user groups this rule applies to. A user is considered matching +if it is a member of any of the UserGroups. +An empty list implies every user group.
verbs
+[]string +
+ The verbs that match this rule. +An empty list implies every verb.
resources
+[]GroupResources +
+ Resources that this rule matches. An empty list implies all kinds in all API groups.
namespaces
+[]string +
+ Namespaces that this rule matches. +The empty string "" matches non-namespaced resources. +An empty list implies every namespace.
nonResourceURLs
+[]string +
+ NonResourceURLs is a set of URL paths that should be audited. +∗s are allowed, but only as the full, final step in the path. +Examples: + "/metrics" - Log requests for apiserver metrics + "/healthz∗" - Log all health checks
omitStages
+[]Stage +
+ OmitStages is a list of stages for which no events are created. Note that this can also +be specified policy wide in which case the union of both are omitted. +An empty list means no restrictions will apply.
+ + + +## `Stage` {#audit-k8s-io-v1-Stage} + +(Alias of `string`) + + +**Appears in:** + +- [Event](#audit-k8s-io-v1-Event) + +- [Policy](#audit-k8s-io-v1-Policy) + +- [PolicyRule](#audit-k8s-io-v1-PolicyRule) + + +Stage defines the stages in request handling that audit events may be generated. + + + + diff --git a/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md b/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md new file mode 100644 index 0000000000000..fb45ca7b1adec --- /dev/null +++ b/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md @@ -0,0 +1,46 @@ +--- +title: WebhookAdmission Configuration (v1) +content_type: tool-reference +package: apiserver.config.k8s.io/v1 +auto_generated: true +--- +Package v1 is the v1 version of the API. + +## Resource Types + + +- [WebhookAdmission](#apiserver-config-k8s-io-v1-WebhookAdmission) + + + + +## `WebhookAdmission` {#apiserver-config-k8s-io-v1-WebhookAdmission} + + + + + +WebhookAdmission provides configuration for the webhook admission controller. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
apiserver.config.k8s.io/v1
kind
string
WebhookAdmission
kubeConfigFile [Required]
+string +
+ KubeConfigFile is the path to the kubeconfig file.
+ + diff --git a/content/en/docs/reference/config-api/client-authentication.v1beta1.md b/content/en/docs/reference/config-api/client-authentication.v1beta1.md new file mode 100644 index 0000000000000..e78edd23f65cc --- /dev/null +++ b/content/en/docs/reference/config-api/client-authentication.v1beta1.md @@ -0,0 +1,252 @@ +--- +title: Client Authentication (v1beta1) +content_type: tool-reference +package: client.authentication.k8s.io/v1beta1 +auto_generated: true +--- + + +## Resource Types + + +- [ExecCredential](#client-authentication-k8s-io-v1beta1-ExecCredential) + + + + +## `ExecCredential` {#client-authentication-k8s-io-v1beta1-ExecCredential} + + + + + +ExecCredential is used by exec-based plugins to communicate credentials to +HTTP transports. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
client.authentication.k8s.io/v1beta1
kind
string
ExecCredential
spec [Required]
+ExecCredentialSpec +
+ Spec holds information passed to the plugin by the transport.
status
+ExecCredentialStatus +
+ Status is filled in by the plugin and holds the credentials that the transport +should use to contact the API.
+ + + +## `Cluster` {#client-authentication-k8s-io-v1beta1-Cluster} + + + + +**Appears in:** + +- [ExecCredentialSpec](#client-authentication-k8s-io-v1beta1-ExecCredentialSpec) + + +Cluster contains information to allow an exec plugin to communicate +with the kubernetes cluster being authenticated to. + +To ensure that this struct contains everything someone would need to communicate +with a kubernetes cluster (just like they would via a kubeconfig), the fields +should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +of CertificateAuthority, since CA data will always be passed to the plugin as bytes. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
server [Required]
+string +
+ Server is the address of the kubernetes cluster (https://hostname:port).
tls-server-name
+string +
+ TLSServerName is passed to the server for SNI and is used in the client to +check server certificates against. If ServerName is empty, the hostname +used to contact the server is used.
insecure-skip-tls-verify
+bool +
+ InsecureSkipTLSVerify skips the validity check for the server's certificate. +This will make your HTTPS connections insecure.
certificate-authority-data
+[]byte +
+ CAData contains PEM-encoded certificate authority certificates. +If empty, system roots should be used.
proxy-url
+string +
+ ProxyURL is the URL to the proxy to be used for all requests to this +cluster.
config
+k8s.io/apimachinery/pkg/runtime.RawExtension +
+ Config holds additional config data that is specific to the exec +plugin with regards to the cluster being authenticated to. + +This data is sourced from the clientcmd Cluster object's +extensions[client.authentication.k8s.io/exec] field: + +clusters: +- name: my-cluster + cluster: + ... + extensions: + - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + extension: + audience: 06e3fbd18de8 # arbitrary config + +In some environments, the user config may be exactly the same across many clusters +(i.e. call this exec plugin) minus some details that are specific to each cluster +such as the audience. This field allows the per cluster config to be directly +specified with the cluster info. Using this field to store secret data is not +recommended as one of the prime benefits of exec plugins is that no secrets need +to be stored directly in the kubeconfig.
+ + + +## `ExecCredentialSpec` {#client-authentication-k8s-io-v1beta1-ExecCredentialSpec} + + + + +**Appears in:** + +- [ExecCredential](#client-authentication-k8s-io-v1beta1-ExecCredential) + + +ExecCredentialSpec holds request and runtime specific information provided by +the transport. + + + + + + + + + + + + + +
FieldDescription
cluster
+Cluster +
+ Cluster contains information to allow an exec plugin to communicate with the +kubernetes cluster being authenticated to. Note that Cluster is non-nil only +when provideClusterInfo is set to true in the exec provider config (i.e., +ExecConfig.ProvideClusterInfo).
+ + + +## `ExecCredentialStatus` {#client-authentication-k8s-io-v1beta1-ExecCredentialStatus} + + + + +**Appears in:** + +- [ExecCredential](#client-authentication-k8s-io-v1beta1-ExecCredential) + + +ExecCredentialStatus holds credentials for the transport to use. + +Token and ClientKeyData are sensitive fields. This data should only be +transmitted in-memory between client and exec plugin process. Exec plugin +itself should at least be protected via file permissions. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
expirationTimestamp
+meta/v1.Time +
+ ExpirationTimestamp indicates a time when the provided credentials expire.
token [Required]
+string +
+ Token is a bearer token used by the client for request authentication.
clientCertificateData [Required]
+string +
+ PEM-encoded client TLS certificates (including intermediates, if any).
clientKeyData [Required]
+string +
+ PEM-encoded private key for the above certificate.
+ + diff --git a/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md new file mode 100644 index 0000000000000..86315856b2f87 --- /dev/null +++ b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md @@ -0,0 +1,601 @@ +--- +title: kube-proxy Configuration (v1alpha1) +content_type: tool-reference +package: kubeproxy.config.k8s.io/v1alpha1 +auto_generated: true +--- + + +## Resource Types + + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + + + +## `KubeProxyConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration} + + + + + +KubeProxyConfiguration contains everything necessary to configure the +Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeproxy.config.k8s.io/v1alpha1
kind
string
KubeProxyConfiguration
featureGates [Required]
+map[string]bool +
+ featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
bindAddress [Required]
+string +
+ bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 +for all interfaces)
healthzBindAddress [Required]
+string +
+ healthzBindAddress is the IP address and port for the health check server to serve on, +defaulting to 0.0.0.0:10256
metricsBindAddress [Required]
+string +
+ metricsBindAddress is the IP address and port for the metrics server to serve on, +defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
bindAddressHardFail [Required]
+bool +
+ bindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit
enableProfiling [Required]
+bool +
+ enableProfiling enables profiling via web interface on /debug/pprof handler. +Profiling handlers will be handled by metrics server.
clusterCIDR [Required]
+string +
+ clusterCIDR is the CIDR range of the pods in the cluster. It is used to +bridge traffic coming from outside of the cluster. If not provided, +no off-cluster bridging will be performed.
hostnameOverride [Required]
+string +
+ hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
clientConnection [Required]
+ClientConnectionConfiguration +
+ clientConnection specifies the kubeconfig file and client connection settings for the proxy +server to use when communicating with the apiserver.
iptables [Required]
+KubeProxyIPTablesConfiguration +
+ iptables contains iptables-related configuration options.
ipvs [Required]
+KubeProxyIPVSConfiguration +
+ ipvs contains ipvs-related configuration options.
oomScoreAdj [Required]
+int32 +
+ oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within +the range [-1000, 1000]
mode [Required]
+ProxyMode +
+ mode specifies which proxy mode to use.
portRange [Required]
+string +
+ portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed +in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
udpIdleTimeout [Required]
+meta/v1.Duration +
+ udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +Must be greater than 0. Only applicable for proxyMode=userspace.
conntrack [Required]
+KubeProxyConntrackConfiguration +
+ conntrack contains conntrack-related configuration options.
configSyncPeriod [Required]
+meta/v1.Duration +
+ configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater +than 0.
nodePortAddresses [Required]
+[]string +
+ nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid +IP blocks. These values are as a parameter to select the interfaces where nodeport works. +In case someone would like to expose a service on localhost for local visit and some other interfaces for +particular purpose, a list of IP blocks would do that. +If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. +If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. +An empty string slice is meant to select all network interfaces.
winkernel [Required]
+KubeProxyWinkernelConfiguration +
+ winkernel contains winkernel-related configuration options.
showHiddenMetricsForVersion [Required]
+string +
+ ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics.
detectLocalMode [Required]
+LocalMode +
+ DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR
+ + + +## `KubeProxyConntrackConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConntrackConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyConntrackConfiguration contains conntrack settings for +the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
maxPerCore [Required]
+int32 +
+ maxPerCore is the maximum number of NAT connections to track +per CPU core (0 to leave the limit as-is and ignore min).
min [Required]
+int32 +
+ min is the minimum value of connect-tracking records to allocate, +regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).
tcpEstablishedTimeout [Required]
+meta/v1.Duration +
+ tcpEstablishedTimeout is how long an idle TCP connection will be kept open +(e.g. '2s'). Must be greater than 0 to set.
tcpCloseWaitTimeout [Required]
+meta/v1.Duration +
+ tcpCloseWaitTimeout is how long an idle conntrack entry +in CLOSE_WAIT state will remain in the conntrack +table. (e.g. '60s'). Must be greater than 0 to set.
+ + + +## `KubeProxyIPTablesConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPTablesConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyIPTablesConfiguration contains iptables-related configuration +details for the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
masqueradeBit [Required]
+int32 +
+ masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using +the pure iptables proxy mode. Values must be within the range [0, 31].
masqueradeAll [Required]
+bool +
+ masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
syncPeriod [Required]
+meta/v1.Duration +
+ syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', +'2h22m'). Must be greater than 0.
minSyncPeriod [Required]
+meta/v1.Duration +
+ minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', +'2h22m').
+ + + +## `KubeProxyIPVSConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPVSConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyIPVSConfiguration contains ipvs-related configuration +details for the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
syncPeriod [Required]
+meta/v1.Duration +
+ syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', +'2h22m'). Must be greater than 0.
minSyncPeriod [Required]
+meta/v1.Duration +
+ minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', +'2h22m').
scheduler [Required]
+string +
+ ipvs scheduler
excludeCIDRs [Required]
+[]string +
+ excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch +when cleaning up ipvs services.
strictARP [Required]
+bool +
+ strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries +from kube-ipvs0 interface
tcpTimeout [Required]
+meta/v1.Duration +
+ tcpTimeout is the timeout value used for idle IPVS TCP sessions. +The default value is 0, which preserves the current timeout value on the system.
tcpFinTimeout [Required]
+meta/v1.Duration +
+ tcpFinTimeout is the timeout value used for IPVS TCP sessions after receiving a FIN. +The default value is 0, which preserves the current timeout value on the system.
udpTimeout [Required]
+meta/v1.Duration +
+ udpTimeout is the timeout value used for IPVS UDP packets. +The default value is 0, which preserves the current timeout value on the system.
+ + + +## `KubeProxyWinkernelConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyWinkernelConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyWinkernelConfiguration contains Windows/HNS settings for +the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
networkName [Required]
+string +
+ networkName is the name of the network kube-proxy will use +to create endpoints and policies
sourceVip [Required]
+string +
+ sourceVip is the IP address of the source VIP endoint used for +NAT when loadbalancing
enableDSR [Required]
+bool +
+ enableDSR tells kube-proxy whether HNS policies should be created +with DSR
+ + + +## `LocalMode` {#kubeproxy-config-k8s-io-v1alpha1-LocalMode} + +(Alias of `string`) + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +LocalMode represents modes to detect local traffic from the node + + + + + +## `ProxyMode` {#kubeproxy-config-k8s-io-v1alpha1-ProxyMode} + +(Alias of `string`) + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +ProxyMode represents modes used by the Kubernetes proxy server. + +Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' +(newer, faster), 'ipvs'(newest, better in performance and scalability). + +Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster). + +In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the +future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are +insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', +and the fall back path is firstly iptables and then userspace. + +In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the +future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy, +this always falls back to the userspace proxy. + + + + + + + +## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +ClientConnectionConfiguration contains details for constructing a client. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
kubeconfig [Required]
+string +
+ kubeconfig is the path to a KubeConfig file.
acceptContentTypes [Required]
+string +
+ acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +default value of 'application/json'. This field will control all connections to the server used by a particular +client.
contentType [Required]
+string +
+ contentType is the content type used when sending data to the server from this client.
qps [Required]
+float32 +
+ qps controls the number of queries per second allowed for this connection.
burst [Required]
+int32 +
+ burst allows extra queries to accumulate when a client is exceeding its rate.
diff --git a/content/en/docs/reference/config-api/kube-scheduler-config.v1beta1.md b/content/en/docs/reference/config-api/kube-scheduler-config.v1beta1.md new file mode 100644 index 0000000000000..ac32e65674cb5 --- /dev/null +++ b/content/en/docs/reference/config-api/kube-scheduler-config.v1beta1.md @@ -0,0 +1,2156 @@ +--- +title: kube-scheduler Configuration (v1beta1) +content_type: tool-reference +package: kubescheduler.config.k8s.io/v1 +auto_generated: true +--- + + +## Resource Types + + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) +- [DefaultPreemptionArgs](#kubescheduler-config-k8s-io-v1beta1-DefaultPreemptionArgs) +- [InterPodAffinityArgs](#kubescheduler-config-k8s-io-v1beta1-InterPodAffinityArgs) +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerConfiguration) +- [NodeAffinityArgs](#kubescheduler-config-k8s-io-v1beta1-NodeAffinityArgs) +- [NodeLabelArgs](#kubescheduler-config-k8s-io-v1beta1-NodeLabelArgs) +- [NodeResourcesFitArgs](#kubescheduler-config-k8s-io-v1beta1-NodeResourcesFitArgs) +- [NodeResourcesLeastAllocatedArgs](#kubescheduler-config-k8s-io-v1beta1-NodeResourcesLeastAllocatedArgs) +- [NodeResourcesMostAllocatedArgs](#kubescheduler-config-k8s-io-v1beta1-NodeResourcesMostAllocatedArgs) +- [PodTopologySpreadArgs](#kubescheduler-config-k8s-io-v1beta1-PodTopologySpreadArgs) +- [RequestedToCapacityRatioArgs](#kubescheduler-config-k8s-io-v1beta1-RequestedToCapacityRatioArgs) +- [ServiceAffinityArgs](#kubescheduler-config-k8s-io-v1beta1-ServiceAffinityArgs) +- [VolumeBindingArgs](#kubescheduler-config-k8s-io-v1beta1-VolumeBindingArgs) + + + + +## `Policy` {#kubescheduler-config-k8s-io-v1-Policy} + + + + + +Policy describes a struct for a policy resource used in api. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1
kind
string
Policy
predicates [Required]
+[]PredicatePolicy +
+ Holds the information to configure the fit predicate functions
priorities [Required]
+[]PriorityPolicy +
+ Holds the information to configure the priority functions
extenders [Required]
+[]LegacyExtender +
+ Holds the information to communicate with the extender(s)
hardPodAffinitySymmetricWeight [Required]
+int32 +
+ RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule +corresponding to every RequiredDuringScheduling affinity rule. +HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
alwaysCheckAllPredicates [Required]
+bool +
+ When AlwaysCheckAllPredicates is set to true, scheduler checks all +the configured predicates even after one or more of them fails. +When the flag is set to false, scheduler skips checking the rest +of the predicates after it finds one predicate that failed.
+ + + +## `ExtenderManagedResource` {#kubescheduler-config-k8s-io-v1-ExtenderManagedResource} + + + + +**Appears in:** + +- [Extender](#kubescheduler-config-k8s-io-v1beta1-Extender) + +- [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) + + +ExtenderManagedResource describes the arguments of extended resources +managed by an extender. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name is the extended resource name.
ignoredByScheduler [Required]
+bool +
+ IgnoredByScheduler indicates whether kube-scheduler should ignore this +resource when applying predicates.
+ + + +## `ExtenderTLSConfig` {#kubescheduler-config-k8s-io-v1-ExtenderTLSConfig} + + + + +**Appears in:** + +- [Extender](#kubescheduler-config-k8s-io-v1beta1-Extender) + +- [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) + + +ExtenderTLSConfig contains settings to enable TLS with extender + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
insecure [Required]
+bool +
+ Server should be accessed without verifying the TLS certificate. For testing only.
serverName [Required]
+string +
+ ServerName is passed to the server for SNI and is used in the client to check server +certificates against. If ServerName is empty, the hostname used to contact the +server is used.
certFile [Required]
+string +
+ Server requires TLS client certificate authentication
keyFile [Required]
+string +
+ Server requires TLS client certificate authentication
caFile [Required]
+string +
+ Trusted root certificates for server
certData [Required]
+[]byte +
+ CertData holds PEM-encoded bytes (typically read from a client certificate file). +CertData takes precedence over CertFile
keyData [Required]
+[]byte +
+ KeyData holds PEM-encoded bytes (typically read from a client certificate key file). +KeyData takes precedence over KeyFile
caData [Required]
+[]byte +
+ CAData holds PEM-encoded bytes (typically read from a root certificates bundle). +CAData takes precedence over CAFile
+ + + +## `LabelPreference` {#kubescheduler-config-k8s-io-v1-LabelPreference} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +LabelPreference holds the parameters that are used to configure the corresponding priority function + + + + + + + + + + + + + + + + + + +
FieldDescription
label [Required]
+string +
+ Used to identify node "groups"
presence [Required]
+bool +
+ This is a boolean flag +If true, higher priority is given to nodes that have the label +If false, higher priority is given to nodes that do not have the label
+ + + +## `LabelsPresence` {#kubescheduler-config-k8s-io-v1-LabelsPresence} + + + + +**Appears in:** + +- [PredicateArgument](#kubescheduler-config-k8s-io-v1-PredicateArgument) + + +LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. + + + + + + + + + + + + + + + + + + +
FieldDescription
labels [Required]
+[]string +
+ The list of labels that identify node "groups" +All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
presence [Required]
+bool +
+ The boolean flag that indicates whether the labels should be present or absent from the node
+ + + +## `LegacyExtender` {#kubescheduler-config-k8s-io-v1-LegacyExtender} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +LegacyExtender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +it is assumed that the extender chose not to provide that extension. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
urlPrefix [Required]
+string +
+ URLPrefix at which the extender is available
filterVerb [Required]
+string +
+ Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
preemptVerb [Required]
+string +
+ Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender.
prioritizeVerb [Required]
+string +
+ Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the prioritize call generates. +The weight should be a positive integer
bindVerb [Required]
+string +
+ Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. +If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender +can implement this function.
enableHttps [Required]
+bool +
+ EnableHTTPS specifies whether https should be used to communicate with the extender
tlsConfig [Required]
+ExtenderTLSConfig +
+ TLSConfig specifies the transport layer security config
httpTimeout [Required]
+time.Duration +
+ HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize +timeout is ignored, k8s/other extenders priorities are used to select the node.
nodeCacheCapable [Required]
+bool +
+ NodeCacheCapable specifies that the extender is capable of caching node information, +so the scheduler should only send minimal information about the eligible nodes +assuming that the extender already cached full details of all nodes in the cluster
managedResources
+[]ExtenderManagedResource +
+ ManagedResources is a list of extended resources that are managed by +this extender. +- A pod will be sent to the extender on the Filter, Prioritize and Bind + (if the extender is the binder) phases iff the pod requests at least + one of the extended resources in this list. If empty or unspecified, + all pods will be sent to this extender. +- If IgnoredByScheduler is set to true for a resource, kube-scheduler + will skip checking the resource in predicates.
ignorable [Required]
+bool +
+ Ignorable specifies if the extender is ignorable, i.e. scheduling should not +fail when the extender returns an error or is not reachable.
+ + + +## `PredicateArgument` {#kubescheduler-config-k8s-io-v1-PredicateArgument} + + + + +**Appears in:** + +- [PredicatePolicy](#kubescheduler-config-k8s-io-v1-PredicatePolicy) + + +PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration. +Only one of its members may be specified + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceAffinity [Required]
+ServiceAffinity +
+ The predicate that provides affinity for pods belonging to a service +It uses a label to identify nodes that belong to the same "group"
labelsPresence [Required]
+LabelsPresence +
+ The predicate that checks whether a particular node has a certain label +defined or not, regardless of value
+ + + +## `PredicatePolicy` {#kubescheduler-config-k8s-io-v1-PredicatePolicy} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +PredicatePolicy describes a struct of a predicate policy. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Identifier of the predicate policy +For a custom predicate, the name can be user-defined +For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate
argument [Required]
+PredicateArgument +
+ Holds the parameters to configure the given predicate
+ + + +## `PriorityArgument` {#kubescheduler-config-k8s-io-v1-PriorityArgument} + + + + +**Appears in:** + +- [PriorityPolicy](#kubescheduler-config-k8s-io-v1-PriorityPolicy) + + +PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration. +Only one of its members may be specified + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceAntiAffinity [Required]
+ServiceAntiAffinity +
+ The priority function that ensures a good spread (anti-affinity) for pods belonging to a service +It uses a label to identify nodes that belong to the same "group"
labelPreference [Required]
+LabelPreference +
+ The priority function that checks whether a particular node has a certain label +defined or not, regardless of value
requestedToCapacityRatioArguments [Required]
+RequestedToCapacityRatioArguments +
+ The RequestedToCapacityRatio priority function is parametrized with function shape.
+ + + +## `PriorityPolicy` {#kubescheduler-config-k8s-io-v1-PriorityPolicy} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +PriorityPolicy describes a struct of a priority policy. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Identifier of the priority policy +For a custom priority, the name can be user-defined +For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the priority function generates +The weight should be non-zero and can be a positive or a negative integer
argument [Required]
+PriorityArgument +
+ Holds the parameters to configure the given priority function
+ + + +## `RequestedToCapacityRatioArguments` {#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function. + + + + + + + + + + + + + + + + + + +
FieldDescription
shape [Required]
+[]UtilizationShapePoint +
+ Array of point defining priority function shape.
resources [Required]
+[]ResourceSpec +
+ No description provided. +
+ + + +## `ResourceSpec` {#kubescheduler-config-k8s-io-v1-ResourceSpec} + + + + +**Appears in:** + +- [RequestedToCapacityRatioArguments](#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments) + + +ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name of the resource to be managed by RequestedToCapacityRatio function.
weight [Required]
+int64 +
+ Weight of the resource.
+ + + +## `ServiceAffinity` {#kubescheduler-config-k8s-io-v1-ServiceAffinity} + + + + +**Appears in:** + +- [PredicateArgument](#kubescheduler-config-k8s-io-v1-PredicateArgument) + + +ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. + + + + + + + + + + + + + +
FieldDescription
labels [Required]
+[]string +
+ The list of labels that identify node "groups" +All of the labels should match for the node to be considered a fit for hosting the pod
+ + + +## `ServiceAntiAffinity` {#kubescheduler-config-k8s-io-v1-ServiceAntiAffinity} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function + + + + + + + + + + + + + +
FieldDescription
label [Required]
+string +
+ Used to identify node "groups"
+ + + +## `UtilizationShapePoint` {#kubescheduler-config-k8s-io-v1-UtilizationShapePoint} + + + + +**Appears in:** + +- [RequestedToCapacityRatioArguments](#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments) + + +UtilizationShapePoint represents single point of priority function shape. + + + + + + + + + + + + + + + + + + +
FieldDescription
utilization [Required]
+int32 +
+ Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
score [Required]
+int32 +
+ Score assigned to given utilization (y axis). Valid values are 0 to 10.
+ + + + + +## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerConfiguration) + + +ClientConnectionConfiguration contains details for constructing a client. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
kubeconfig [Required]
+string +
+ kubeconfig is the path to a KubeConfig file.
acceptContentTypes [Required]
+string +
+ acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +default value of 'application/json'. This field will control all connections to the server used by a particular +client.
contentType [Required]
+string +
+ contentType is the content type used when sending data to the server from this client.
qps [Required]
+float32 +
+ qps controls the number of queries per second allowed for this connection.
burst [Required]
+int32 +
+ burst allows extra queries to accumulate when a client is exceeding its rate.
+ +## `DebuggingConfiguration` {#DebuggingConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerConfiguration) + + +DebuggingConfiguration holds configuration for Debugging related features. + + + + + + + + + + + + + + + + + + +
FieldDescription
enableProfiling [Required]
+bool +
+ enableProfiling enables profiling via web interface host:port/debug/pprof/
enableContentionProfiling [Required]
+bool +
+ enableContentionProfiling enables lock contention profiling, if +enableProfiling is true.
+ +## `LeaderElectionConfiguration` {#LeaderElectionConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerConfiguration) + + +LeaderElectionConfiguration defines the configuration of leader election +clients for components that can run with leader election enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
leaderElect [Required]
+bool +
+ leaderElect enables a leader election client to gain leadership +before executing the main loop. Enable this when running replicated +components for high availability.
leaseDuration [Required]
+meta/v1.Duration +
+ leaseDuration is the duration that non-leader candidates will wait +after observing a leadership renewal until attempting to acquire +leadership of a led but unrenewed leader slot. This is effectively the +maximum duration that a leader can be stopped before it is replaced +by another candidate. This is only applicable if leader election is +enabled.
renewDeadline [Required]
+meta/v1.Duration +
+ renewDeadline is the interval between attempts by the acting master to +renew a leadership slot before it stops leading. This must be less +than or equal to the lease duration. This is only applicable if leader +election is enabled.
retryPeriod [Required]
+meta/v1.Duration +
+ retryPeriod is the duration the clients should wait between attempting +acquisition and renewal of a leadership. This is only applicable if +leader election is enabled.
resourceLock [Required]
+string +
+ resourceLock indicates the resource object type that will be used to lock +during leader election cycles.
resourceName [Required]
+string +
+ resourceName indicates the name of resource object that will be used to lock +during leader election cycles.
resourceNamespace [Required]
+string +
+ resourceName indicates the namespace of resource object that will be used to lock +during leader election cycles.
+ +## `LoggingConfiguration` {#LoggingConfiguration} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +LoggingConfiguration contains logging options +Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. + + + + + + + + + + + + + + + + + + +
FieldDescription
format [Required]
+string +
+ Format Flag specifies the structure of log messages. +default value of format is `text`
sanitization [Required]
+bool +
+ [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). +Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)
+ + + + +## `DefaultPreemptionArgs` {#kubescheduler-config-k8s-io-v1beta1-DefaultPreemptionArgs} + + + + + +DefaultPreemptionArgs holds arguments used to configure the +DefaultPreemption plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
DefaultPreemptionArgs
minCandidateNodesPercentage [Required]
+int32 +
+ MinCandidateNodesPercentage is the minimum number of candidates to +shortlist when dry running preemption as a percentage of number of nodes. +Must be in the range [0, 100]. Defaults to 10% of the cluster size if +unspecified.
minCandidateNodesAbsolute [Required]
+int32 +
+ MinCandidateNodesAbsolute is the absolute minimum number of candidates to +shortlist. The likely number of candidates enumerated for dry running +preemption is given by the formula: +numCandidates = max(numNodes ∗ minCandidateNodesPercentage, minCandidateNodesAbsolute) +We say "likely" because there are other factors such as PDB violations +that play a role in the number of candidates shortlisted. Must be at least +0 nodes. Defaults to 100 nodes if unspecified.
+ + + +## `InterPodAffinityArgs` {#kubescheduler-config-k8s-io-v1beta1-InterPodAffinityArgs} + + + + + +InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
InterPodAffinityArgs
hardPodAffinityWeight [Required]
+int32 +
+ HardPodAffinityWeight is the scoring weight for existing pods with a +matching hard affinity to the incoming pod.
+ + + +## `KubeSchedulerConfiguration` {#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerConfiguration} + + + + + +KubeSchedulerConfiguration configures a scheduler + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
KubeSchedulerConfiguration
parallelism [Required]
+int32 +
+ Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16
leaderElection [Required]
+LeaderElectionConfiguration +
+ LeaderElection defines the configuration of leader election client.
clientConnection [Required]
+ClientConnectionConfiguration +
+ ClientConnection specifies the kubeconfig file and client connection +settings for the proxy server to use when communicating with the apiserver.
healthzBindAddress [Required]
+string +
+ HealthzBindAddress is the IP address and port for the health check server to serve on, +defaulting to 0.0.0.0:10251
metricsBindAddress [Required]
+string +
+ MetricsBindAddress is the IP address and port for the metrics server to +serve on, defaulting to 0.0.0.0:10251.
DebuggingConfiguration [Required]
+DebuggingConfiguration +
(Members of DebuggingConfiguration are embedded into this type.) + DebuggingConfiguration holds configuration for Debugging related features +TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration
percentageOfNodesToScore [Required]
+int32 +
+ PercentageOfNodesToScore is the percentage of all nodes that once found feasible +for running a pod, the scheduler stops its search for more feasible nodes in +the cluster. This helps improve scheduler's performance. Scheduler always tries to find +at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. +Example: if the cluster size is 500 nodes and the value of this flag is 30, +then scheduler stops finding further feasible nodes once it finds 150 feasible ones. +When the value is 0, default percentage (5%--50% based on the size of the cluster) of the +nodes will be scored.
podInitialBackoffSeconds [Required]
+int64 +
+ PodInitialBackoffSeconds is the initial backoff for unschedulable pods. +If specified, it must be greater than 0. If this value is null, the default value (1s) +will be used.
podMaxBackoffSeconds [Required]
+int64 +
+ PodMaxBackoffSeconds is the max backoff for unschedulable pods. +If specified, it must be greater than podInitialBackoffSeconds. If this value is null, +the default value (10s) will be used.
profiles [Required]
+[]KubeSchedulerProfile +
+ Profiles are scheduling profiles that kube-scheduler supports. Pods can +choose to be scheduled under a particular profile by setting its associated +scheduler name. Pods that don't specify any scheduler name are scheduled +with the "default-scheduler" profile, if present here.
extenders [Required]
+[]Extender +
+ Extenders are the list of scheduler extenders, each holding the values of how to communicate +with the extender. These extenders are shared by all scheduler profiles.
+ + + +## `NodeAffinityArgs` {#kubescheduler-config-k8s-io-v1beta1-NodeAffinityArgs} + + + + + +NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
NodeAffinityArgs
addedAffinity
+core/v1.NodeAffinity +
+ AddedAffinity is applied to all Pods additionally to the NodeAffinity +specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity +AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes +match). +When AddedAffinity is used, some Pods with affinity requirements that match +a specific Node (such as Daemonset Pods) might remain unschedulable.
+ + + +## `NodeLabelArgs` {#kubescheduler-config-k8s-io-v1beta1-NodeLabelArgs} + + + + + +NodeLabelArgs holds arguments used to configure the NodeLabel plugin. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
NodeLabelArgs
presentLabels [Required]
+[]string +
+ PresentLabels should be present for the node to be considered a fit for hosting the pod
absentLabels [Required]
+[]string +
+ AbsentLabels should be absent for the node to be considered a fit for hosting the pod
presentLabelsPreference [Required]
+[]string +
+ Nodes that have labels in the list will get a higher score.
absentLabelsPreference [Required]
+[]string +
+ Nodes that don't have labels in the list will get a higher score.
+ + + +## `NodeResourcesFitArgs` {#kubescheduler-config-k8s-io-v1beta1-NodeResourcesFitArgs} + + + + + +NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
NodeResourcesFitArgs
ignoredResources [Required]
+[]string +
+ IgnoredResources is the list of resources that NodeResources fit filter +should ignore.
ignoredResourceGroups [Required]
+[]string +
+ IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. +e.g. if group is ["example.com"], it will ignore all resource names that begin +with "example.com", such as "example.com/aaa" and "example.com/bbb". +A resource group name can't contain '/'.
+ + + +## `NodeResourcesLeastAllocatedArgs` {#kubescheduler-config-k8s-io-v1beta1-NodeResourcesLeastAllocatedArgs} + + + + + +NodeResourcesLeastAllocatedArgs holds arguments used to configure NodeResourcesLeastAllocated plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
NodeResourcesLeastAllocatedArgs
resources [Required]
+[]ResourceSpec +
+ Resources to be managed, if no resource is provided, default resource set with both +the weight of "cpu" and "memory" set to "1" will be applied. +Resource with "0" weight will not accountable for the final score.
+ + + +## `NodeResourcesMostAllocatedArgs` {#kubescheduler-config-k8s-io-v1beta1-NodeResourcesMostAllocatedArgs} + + + + + +NodeResourcesMostAllocatedArgs holds arguments used to configure NodeResourcesMostAllocated plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
NodeResourcesMostAllocatedArgs
resources [Required]
+[]ResourceSpec +
+ Resources to be managed, if no resource is provided, default resource set with both +the weight of "cpu" and "memory" set to "1" will be applied. +Resource with "0" weight will not accountable for the final score.
+ + + +## `PodTopologySpreadArgs` {#kubescheduler-config-k8s-io-v1beta1-PodTopologySpreadArgs} + + + + + +PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
PodTopologySpreadArgs
defaultConstraints
+[]core/v1.TopologySpreadConstraint +
+ DefaultConstraints defines topology spread constraints to be applied to +Pods that don't define any in `pod.spec.topologySpreadConstraints`. +`.defaultConstraints[∗].labelSelectors` must be empty, as they are +deduced from the Pod's membership to Services, ReplicationControllers, +ReplicaSets or StatefulSets. +When not empty, .defaultingType must be "List".
defaultingType
+PodTopologySpreadConstraintsDefaulting +
+ DefaultingType determines how .defaultConstraints are deduced. Can be one +of "System" or "List". + +- "System": Use kubernetes defined constraints that spread Pods among + Nodes and Zones. +- "List": Use constraints defined in .defaultConstraints. + +Defaults to "List" if feature gate DefaultPodTopologySpread is disabled +and to "System" if enabled.
+ + + +## `RequestedToCapacityRatioArgs` {#kubescheduler-config-k8s-io-v1beta1-RequestedToCapacityRatioArgs} + + + + + +RequestedToCapacityRatioArgs holds arguments used to configure RequestedToCapacityRatio plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
RequestedToCapacityRatioArgs
shape [Required]
+[]UtilizationShapePoint +
+ Points defining priority function shape
resources [Required]
+[]ResourceSpec +
+ Resources to be managed
+ + + +## `ServiceAffinityArgs` {#kubescheduler-config-k8s-io-v1beta1-ServiceAffinityArgs} + + + + + +ServiceAffinityArgs holds arguments used to configure the ServiceAffinity plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
ServiceAffinityArgs
affinityLabels [Required]
+[]string +
+ AffinityLabels are homogeneous for pods that are scheduled to a node. +(i.e. it returns true IFF this pod can be added to this node such that all other pods in +the same service are running on nodes with the exact same values for Labels).
antiAffinityLabelsPreference [Required]
+[]string +
+ AntiAffinityLabelsPreference are the labels to consider for service anti affinity scoring.
+ + + +## `VolumeBindingArgs` {#kubescheduler-config-k8s-io-v1beta1-VolumeBindingArgs} + + + + + +VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta1
kind
string
VolumeBindingArgs
bindTimeoutSeconds [Required]
+int64 +
+ BindTimeoutSeconds is the timeout in seconds in volume binding operation. +Value must be non-negative integer. The value zero indicates no waiting. +If this value is nil, the default value (600) will be used.
+ + + +## `Extender` {#kubescheduler-config-k8s-io-v1beta1-Extender} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerConfiguration) + + +Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +it is assumed that the extender chose not to provide that extension. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
urlPrefix [Required]
+string +
+ URLPrefix at which the extender is available
filterVerb [Required]
+string +
+ Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
preemptVerb [Required]
+string +
+ Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender.
prioritizeVerb [Required]
+string +
+ Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the prioritize call generates. +The weight should be a positive integer
bindVerb [Required]
+string +
+ Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. +If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender +can implement this function.
enableHTTPS [Required]
+bool +
+ EnableHTTPS specifies whether https should be used to communicate with the extender
tlsConfig [Required]
+ExtenderTLSConfig +
+ TLSConfig specifies the transport layer security config
httpTimeout [Required]
+meta/v1.Duration +
+ HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize +timeout is ignored, k8s/other extenders priorities are used to select the node.
nodeCacheCapable [Required]
+bool +
+ NodeCacheCapable specifies that the extender is capable of caching node information, +so the scheduler should only send minimal information about the eligible nodes +assuming that the extender already cached full details of all nodes in the cluster
managedResources
+[]ExtenderManagedResource +
+ ManagedResources is a list of extended resources that are managed by +this extender. +- A pod will be sent to the extender on the Filter, Prioritize and Bind + (if the extender is the binder) phases iff the pod requests at least + one of the extended resources in this list. If empty or unspecified, + all pods will be sent to this extender. +- If IgnoredByScheduler is set to true for a resource, kube-scheduler + will skip checking the resource in predicates.
ignorable [Required]
+bool +
+ Ignorable specifies if the extender is ignorable, i.e. scheduling should not +fail when the extender returns an error or is not reachable.
+ + + +## `KubeSchedulerProfile` {#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerProfile} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerConfiguration) + + +KubeSchedulerProfile is a scheduling profile. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
schedulerName [Required]
+string +
+ SchedulerName is the name of the scheduler associated to this profile. +If SchedulerName matches with the pod's "spec.schedulerName", then the pod +is scheduled with this profile.
plugins [Required]
+Plugins +
+ Plugins specify the set of plugins that should be enabled or disabled. +Enabled plugins are the ones that should be enabled in addition to the +default plugins. Disabled plugins are any of the default plugins that +should be disabled. +When no enabled or disabled plugin is specified for an extension point, +default plugins for that extension point will be used if there is any. +If a QueueSort plugin is specified, the same QueueSort Plugin and +PluginConfig must be specified for all profiles.
pluginConfig [Required]
+[]PluginConfig +
+ PluginConfig is an optional set of custom plugin arguments for each plugin. +Omitting config args for a plugin is equivalent to using the default config +for that plugin.
+ + + +## `Plugin` {#kubescheduler-config-k8s-io-v1beta1-Plugin} + + + + +**Appears in:** + +- [PluginSet](#kubescheduler-config-k8s-io-v1beta1-PluginSet) + + +Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name defines the name of plugin
weight [Required]
+int32 +
+ Weight defines the weight of plugin, only used for Score plugins.
+ + + +## `PluginConfig` {#kubescheduler-config-k8s-io-v1beta1-PluginConfig} + + + + +**Appears in:** + +- [KubeSchedulerProfile](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerProfile) + + +PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. +A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. +It is up to the plugin to process these Args. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name defines the name of plugin being configured
args [Required]
+k8s.io/apimachinery/pkg/runtime.RawExtension +
+ Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure.
+ + + +## `PluginSet` {#kubescheduler-config-k8s-io-v1beta1-PluginSet} + + + + +**Appears in:** + +- [Plugins](#kubescheduler-config-k8s-io-v1beta1-Plugins) + + +PluginSet specifies enabled and disabled plugins for an extension point. +If an array is empty, missing, or nil, default plugins at that extension point will be used. + + + + + + + + + + + + + + + + + + +
FieldDescription
enabled [Required]
+[]Plugin +
+ Enabled specifies plugins that should be enabled in addition to default plugins. +These are called after default plugins and in the same order specified here.
disabled [Required]
+[]Plugin +
+ Disabled specifies default plugins that should be disabled. +When all default plugins need to be disabled, an array containing only one "∗" should be provided.
+ + + +## `Plugins` {#kubescheduler-config-k8s-io-v1beta1-Plugins} + + + + +**Appears in:** + +- [KubeSchedulerProfile](#kubescheduler-config-k8s-io-v1beta1-KubeSchedulerProfile) + + +Plugins include multiple extension points. When specified, the list of plugins for +a particular extension point are the only ones enabled. If an extension point is +omitted from the config, then the default set of plugins is used for that extension point. +Enabled plugins are called in the order specified here, after default plugins. If they need to +be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
queueSort [Required]
+PluginSet +
+ QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue.
preFilter [Required]
+PluginSet +
+ PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework.
filter [Required]
+PluginSet +
+ Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod.
postFilter [Required]
+PluginSet +
+ PostFilter is a list of plugins that are invoked after filtering phase, no matter whether filtering succeeds or not.
preScore [Required]
+PluginSet +
+ PreScore is a list of plugins that are invoked before scoring.
score [Required]
+PluginSet +
+ Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase.
reserve [Required]
+PluginSet +
+ Reserve is a list of plugins invoked when reserving/unreserving resources +after a node is assigned to run the pod.
permit [Required]
+PluginSet +
+ Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod.
preBind [Required]
+PluginSet +
+ PreBind is a list of plugins that should be invoked before a pod is bound.
bind [Required]
+PluginSet +
+ Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. +The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success.
postBind [Required]
+PluginSet +
+ PostBind is a list of plugins that should be invoked after a pod is successfully bound.
+ + + +## `PodTopologySpreadConstraintsDefaulting` {#kubescheduler-config-k8s-io-v1beta1-PodTopologySpreadConstraintsDefaulting} + +(Alias of `string`) + + +**Appears in:** + +- [PodTopologySpreadArgs](#kubescheduler-config-k8s-io-v1beta1-PodTopologySpreadArgs) + + +PodTopologySpreadConstraintsDefaulting defines how to set default constraints +for the PodTopologySpread plugin. + + + + + +## `ResourceSpec` {#kubescheduler-config-k8s-io-v1beta1-ResourceSpec} + + + + +**Appears in:** + +- [NodeResourcesLeastAllocatedArgs](#kubescheduler-config-k8s-io-v1beta1-NodeResourcesLeastAllocatedArgs) + +- [NodeResourcesMostAllocatedArgs](#kubescheduler-config-k8s-io-v1beta1-NodeResourcesMostAllocatedArgs) + +- [RequestedToCapacityRatioArgs](#kubescheduler-config-k8s-io-v1beta1-RequestedToCapacityRatioArgs) + + +ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name of the resource to be managed by RequestedToCapacityRatio function.
weight [Required]
+int64 +
+ Weight of the resource.
+ + + +## `UtilizationShapePoint` {#kubescheduler-config-k8s-io-v1beta1-UtilizationShapePoint} + + + + +**Appears in:** + +- [RequestedToCapacityRatioArgs](#kubescheduler-config-k8s-io-v1beta1-RequestedToCapacityRatioArgs) + + +UtilizationShapePoint represents single point of priority function shape. + + + + + + + + + + + + + + + + + + +
FieldDescription
utilization [Required]
+int32 +
+ Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
score [Required]
+int32 +
+ Score assigned to given utilization (y axis). Valid values are 0 to 10.
+ + diff --git a/content/en/docs/reference/config-api/kube-scheduler-policy-config.v1.md b/content/en/docs/reference/config-api/kube-scheduler-policy-config.v1.md new file mode 100644 index 0000000000000..e694f7ecbc6ae --- /dev/null +++ b/content/en/docs/reference/config-api/kube-scheduler-policy-config.v1.md @@ -0,0 +1,799 @@ +--- +title: kube-scheduler Policy Configuration (v1) +content_type: tool-reference +package: kubescheduler.config.k8s.io/v1 +auto_generated: true +--- + + +## Resource Types + + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + + + +## `Policy` {#kubescheduler-config-k8s-io-v1-Policy} + + + + + +Policy describes a struct for a policy resource used in api. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1
kind
string
Policy
predicates [Required]
+[]PredicatePolicy +
+ Holds the information to configure the fit predicate functions
priorities [Required]
+[]PriorityPolicy +
+ Holds the information to configure the priority functions
extenders [Required]
+[]LegacyExtender +
+ Holds the information to communicate with the extender(s)
hardPodAffinitySymmetricWeight [Required]
+int32 +
+ RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule +corresponding to every RequiredDuringScheduling affinity rule. +HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
alwaysCheckAllPredicates [Required]
+bool +
+ When AlwaysCheckAllPredicates is set to true, scheduler checks all +the configured predicates even after one or more of them fails. +When the flag is set to false, scheduler skips checking the rest +of the predicates after it finds one predicate that failed.
+ + + +## `ExtenderManagedResource` {#kubescheduler-config-k8s-io-v1-ExtenderManagedResource} + + + + +**Appears in:** + +- [Extender](#kubescheduler-config-k8s-io-v1beta1-Extender) + +- [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) + + +ExtenderManagedResource describes the arguments of extended resources +managed by an extender. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name is the extended resource name.
ignoredByScheduler [Required]
+bool +
+ IgnoredByScheduler indicates whether kube-scheduler should ignore this +resource when applying predicates.
+ + + +## `ExtenderTLSConfig` {#kubescheduler-config-k8s-io-v1-ExtenderTLSConfig} + + + + +**Appears in:** + +- [Extender](#kubescheduler-config-k8s-io-v1beta1-Extender) + +- [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) + + +ExtenderTLSConfig contains settings to enable TLS with extender + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
insecure [Required]
+bool +
+ Server should be accessed without verifying the TLS certificate. For testing only.
serverName [Required]
+string +
+ ServerName is passed to the server for SNI and is used in the client to check server +certificates against. If ServerName is empty, the hostname used to contact the +server is used.
certFile [Required]
+string +
+ Server requires TLS client certificate authentication
keyFile [Required]
+string +
+ Server requires TLS client certificate authentication
caFile [Required]
+string +
+ Trusted root certificates for server
certData [Required]
+[]byte +
+ CertData holds PEM-encoded bytes (typically read from a client certificate file). +CertData takes precedence over CertFile
keyData [Required]
+[]byte +
+ KeyData holds PEM-encoded bytes (typically read from a client certificate key file). +KeyData takes precedence over KeyFile
caData [Required]
+[]byte +
+ CAData holds PEM-encoded bytes (typically read from a root certificates bundle). +CAData takes precedence over CAFile
+ + + +## `LabelPreference` {#kubescheduler-config-k8s-io-v1-LabelPreference} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +LabelPreference holds the parameters that are used to configure the corresponding priority function + + + + + + + + + + + + + + + + + + +
FieldDescription
label [Required]
+string +
+ Used to identify node "groups"
presence [Required]
+bool +
+ This is a boolean flag +If true, higher priority is given to nodes that have the label +If false, higher priority is given to nodes that do not have the label
+ + + +## `LabelsPresence` {#kubescheduler-config-k8s-io-v1-LabelsPresence} + + + + +**Appears in:** + +- [PredicateArgument](#kubescheduler-config-k8s-io-v1-PredicateArgument) + + +LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. + + + + + + + + + + + + + + + + + + +
FieldDescription
labels [Required]
+[]string +
+ The list of labels that identify node "groups" +All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
presence [Required]
+bool +
+ The boolean flag that indicates whether the labels should be present or absent from the node
+ + + +## `LegacyExtender` {#kubescheduler-config-k8s-io-v1-LegacyExtender} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +LegacyExtender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +it is assumed that the extender chose not to provide that extension. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
urlPrefix [Required]
+string +
+ URLPrefix at which the extender is available
filterVerb [Required]
+string +
+ Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
preemptVerb [Required]
+string +
+ Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender.
prioritizeVerb [Required]
+string +
+ Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the prioritize call generates. +The weight should be a positive integer
bindVerb [Required]
+string +
+ Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. +If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender +can implement this function.
enableHttps [Required]
+bool +
+ EnableHTTPS specifies whether https should be used to communicate with the extender
tlsConfig [Required]
+ExtenderTLSConfig +
+ TLSConfig specifies the transport layer security config
httpTimeout [Required]
+time.Duration +
+ HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize +timeout is ignored, k8s/other extenders priorities are used to select the node.
nodeCacheCapable [Required]
+bool +
+ NodeCacheCapable specifies that the extender is capable of caching node information, +so the scheduler should only send minimal information about the eligible nodes +assuming that the extender already cached full details of all nodes in the cluster
managedResources
+[]ExtenderManagedResource +
+ ManagedResources is a list of extended resources that are managed by +this extender. +- A pod will be sent to the extender on the Filter, Prioritize and Bind + (if the extender is the binder) phases iff the pod requests at least + one of the extended resources in this list. If empty or unspecified, + all pods will be sent to this extender. +- If IgnoredByScheduler is set to true for a resource, kube-scheduler + will skip checking the resource in predicates.
ignorable [Required]
+bool +
+ Ignorable specifies if the extender is ignorable, i.e. scheduling should not +fail when the extender returns an error or is not reachable.
+ + + +## `PredicateArgument` {#kubescheduler-config-k8s-io-v1-PredicateArgument} + + + + +**Appears in:** + +- [PredicatePolicy](#kubescheduler-config-k8s-io-v1-PredicatePolicy) + + +PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration. +Only one of its members may be specified + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceAffinity [Required]
+ServiceAffinity +
+ The predicate that provides affinity for pods belonging to a service +It uses a label to identify nodes that belong to the same "group"
labelsPresence [Required]
+LabelsPresence +
+ The predicate that checks whether a particular node has a certain label +defined or not, regardless of value
+ + + +## `PredicatePolicy` {#kubescheduler-config-k8s-io-v1-PredicatePolicy} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +PredicatePolicy describes a struct of a predicate policy. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Identifier of the predicate policy +For a custom predicate, the name can be user-defined +For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate
argument [Required]
+PredicateArgument +
+ Holds the parameters to configure the given predicate
+ + + +## `PriorityArgument` {#kubescheduler-config-k8s-io-v1-PriorityArgument} + + + + +**Appears in:** + +- [PriorityPolicy](#kubescheduler-config-k8s-io-v1-PriorityPolicy) + + +PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration. +Only one of its members may be specified + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceAntiAffinity [Required]
+ServiceAntiAffinity +
+ The priority function that ensures a good spread (anti-affinity) for pods belonging to a service +It uses a label to identify nodes that belong to the same "group"
labelPreference [Required]
+LabelPreference +
+ The priority function that checks whether a particular node has a certain label +defined or not, regardless of value
requestedToCapacityRatioArguments [Required]
+RequestedToCapacityRatioArguments +
+ The RequestedToCapacityRatio priority function is parametrized with function shape.
+ + + +## `PriorityPolicy` {#kubescheduler-config-k8s-io-v1-PriorityPolicy} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +PriorityPolicy describes a struct of a priority policy. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Identifier of the priority policy +For a custom priority, the name can be user-defined +For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the priority function generates +The weight should be non-zero and can be a positive or a negative integer
argument [Required]
+PriorityArgument +
+ Holds the parameters to configure the given priority function
+ + + +## `RequestedToCapacityRatioArguments` {#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function. + + + + + + + + + + + + + + + + + + +
FieldDescription
shape [Required]
+[]UtilizationShapePoint +
+ Array of point defining priority function shape.
resources [Required]
+[]ResourceSpec +
+ No description provided. +
+ + + +## `ResourceSpec` {#kubescheduler-config-k8s-io-v1-ResourceSpec} + + + + +**Appears in:** + +- [RequestedToCapacityRatioArguments](#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments) + + +ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name of the resource to be managed by RequestedToCapacityRatio function.
weight [Required]
+int64 +
+ Weight of the resource.
+ + + +## `ServiceAffinity` {#kubescheduler-config-k8s-io-v1-ServiceAffinity} + + + + +**Appears in:** + +- [PredicateArgument](#kubescheduler-config-k8s-io-v1-PredicateArgument) + + +ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. + + + + + + + + + + + + + +
FieldDescription
labels [Required]
+[]string +
+ The list of labels that identify node "groups" +All of the labels should match for the node to be considered a fit for hosting the pod
+ + + +## `ServiceAntiAffinity` {#kubescheduler-config-k8s-io-v1-ServiceAntiAffinity} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function + + + + + + + + + + + + + +
FieldDescription
label [Required]
+string +
+ Used to identify node "groups"
+ + + +## `UtilizationShapePoint` {#kubescheduler-config-k8s-io-v1-UtilizationShapePoint} + + + + +**Appears in:** + +- [RequestedToCapacityRatioArguments](#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments) + + +UtilizationShapePoint represents single point of priority function shape. + + + + + + + + + + + + + + + + + + +
FieldDescription
utilization [Required]
+int32 +
+ Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
score [Required]
+int32 +
+ Score assigned to given utilization (y axis). Valid values are 0 to 10.
+ + diff --git a/content/en/docs/reference/config-api/kubelet-config.v1beta1.md b/content/en/docs/reference/config-api/kubelet-config.v1beta1.md new file mode 100644 index 0000000000000..bee05b68db403 --- /dev/null +++ b/content/en/docs/reference/config-api/kubelet-config.v1beta1.md @@ -0,0 +1,1604 @@ +--- +title: Kubelet Configuration (v1beta1) +content_type: tool-reference +package: kubelet.config.k8s.io/v1beta1 +auto_generated: true +--- + + +## Resource Types + + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +- [SerializedNodeConfigSource](#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource) + + + + +## `KubeletConfiguration` {#kubelet-config-k8s-io-v1beta1-KubeletConfiguration} + + + + + +KubeletConfiguration contains the configuration for the Kubelet + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
KubeletConfiguration
enableServer [Required]
+bool +
+ enableServer enables Kubelet's secured server. +Note: Kubelet's insecure port is controlled by the readOnlyPort option. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: true
staticPodPath
+string +
+ staticPodPath is the path to the directory containing local (static) pods to +run, or the path to a single static pod file. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +the set of static pods specified at the new path may be different than the +ones the Kubelet initially started with, and this may disrupt your node. +Default: ""
syncFrequency
+meta/v1.Duration +
+ syncFrequency is the max period between synchronizing running +containers and config. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening this duration may have a negative performance impact, especially +as the number of Pods on the node increases. Alternatively, increasing this +duration will result in longer refresh times for ConfigMaps and Secrets. +Default: "1m"
fileCheckFrequency
+meta/v1.Duration +
+ fileCheckFrequency is the duration between checking config files for +new data +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the duration will cause the Kubelet to reload local Static Pod +configurations more frequently, which may have a negative performance impact. +Default: "20s"
httpCheckFrequency
+meta/v1.Duration +
+ httpCheckFrequency is the duration between checking http for new data +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the duration will cause the Kubelet to poll staticPodURL more +frequently, which may have a negative performance impact. +Default: "20s"
staticPodURL
+string +
+ staticPodURL is the URL for accessing static pods to run +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +the set of static pods specified at the new URL may be different than the +ones the Kubelet initially started with, and this may disrupt your node. +Default: ""
staticPodURLHeader
+map[string][]string +
+ staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt the ability to read the latest set of static pods from StaticPodURL. +Default: nil
address
+string +
+ address is the IP address for the Kubelet to serve on (set to 0.0.0.0 +for all interfaces). +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: "0.0.0.0"
port
+int32 +
+ port is the port for the Kubelet to serve on. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: 10250
readOnlyPort
+int32 +
+ readOnlyPort is the read-only port for the Kubelet to serve on with +no authentication/authorization. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: 0 (disabled)
tlsCertFile
+string +
+ tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, +if any, concatenated after server cert). If tlsCertFile and +tlsPrivateKeyFile are not provided, a self-signed certificate +and key are generated for the public address and saved to the directory +passed to the Kubelet's --cert-dir flag. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: ""
tlsPrivateKeyFile
+string +
+ tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: ""
tlsCipherSuites
+[]string +
+ TLSCipherSuites is the list of allowed cipher suites for the server. +Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: nil
tlsMinVersion
+string +
+ TLSMinVersion is the minimum TLS version supported. +Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: ""
rotateCertificates
+bool +
+ rotateCertificates enables client certificate rotation. The Kubelet will request a +new certificate from the certificates.k8s.io API. This requires an approver to approve the +certificate signing requests. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it may disrupt the Kubelet's ability to authenticate with the API server +after the current certificate expires. +Default: false
serverTLSBootstrap
+bool +
+ serverTLSBootstrap enables server certificate bootstrap. Instead of self +signing a serving certificate, the Kubelet will request a certificate from +the certificates.k8s.io API. This requires an approver to approve the +certificate signing requests. The RotateKubeletServerCertificate feature +must be enabled. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it will stop the renewal of Kubelet server certificates, which can +disrupt components that interact with the Kubelet server in the long term, +due to certificate expiration. +Default: false
authentication
+KubeletAuthentication +
+ authentication specifies how requests to the Kubelet's server are authenticated +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Defaults: + anonymous: + enabled: false + webhook: + enabled: true + cacheTTL: "2m"
authorization
+KubeletAuthorization +
+ authorization specifies how requests to the Kubelet's server are authorized +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Defaults: + mode: Webhook + webhook: + cacheAuthorizedTTL: "5m" + cacheUnauthorizedTTL: "30s"
registryPullQPS
+int32 +
+ registryPullQPS is the limit of registry pulls per second. +Set to 0 for no limit. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced +by image pulls. +Default: 5
registryBurst
+int32 +
+ registryBurst is the maximum size of bursty pulls, temporarily allows +pulls to burst to this number, while still not exceeding registryPullQPS. +Only used if registryPullQPS > 0. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced +by image pulls. +Default: 10
eventRecordQPS
+int32 +
+ eventRecordQPS is the maximum event creations per second. If 0, there +is no limit enforced. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced by +event creations. +Default: 5
eventBurst
+int32 +
+ eventBurst is the maximum size of a burst of event creations, temporarily +allows event creations to burst to this number, while still not exceeding +eventRecordQPS. Only used if eventRecordQPS > 0. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic produced by +event creations. +Default: 10
enableDebuggingHandlers
+bool +
+ enableDebuggingHandlers enables server endpoints for log access +and local running of containers and commands, including the exec, +attach, logs, and portforward features. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it may disrupt components that interact with the Kubelet server. +Default: true
enableContentionProfiling
+bool +
+ enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +enabling it may carry a performance impact. +Default: false
healthzPort
+int32 +
+ healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that monitor Kubelet health. +Default: 10248
healthzBindAddress
+string +
+ healthzBindAddress is the IP address for the healthz server to serve on +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that monitor Kubelet health. +Default: "127.0.0.1"
oomScoreAdj
+int32 +
+ oomScoreAdj is The oom-score-adj value for kubelet process. Values +must be within the range [-1000, 1000]. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the stability of nodes under memory pressure. +Default: -999
clusterDomain
+string +
+ clusterDomain is the DNS domain for this cluster. If set, kubelet will +configure all containers to search this domain in addition to the +host's search domains. +Dynamic Kubelet Config (beta): Dynamically updating this field is not recommended, +as it should be kept in sync with the rest of the cluster. +Default: ""
clusterDNS
+[]string +
+ clusterDNS is a list of IP addresses for the cluster DNS server. If set, +kubelet will configure all containers to use this for DNS resolution +instead of the host's DNS servers. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes will only take effect on Pods created after the update. Draining +the node is recommended before changing this field. +Default: nil
streamingConnectionIdleTimeout
+meta/v1.Duration +
+ streamingConnectionIdleTimeout is the maximum time a streaming connection +can be idle before the connection is automatically closed. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact components that rely on infrequent updates over streaming +connections to the Kubelet server. +Default: "4h"
nodeStatusUpdateFrequency
+meta/v1.Duration +
+ nodeStatusUpdateFrequency is the frequency that kubelet computes node +status. If node lease feature is not enabled, it is also the frequency that +kubelet posts node status to master. +Note: When node lease feature is not enabled, be cautious when changing the +constant, it must work with nodeMonitorGracePeriod in nodecontroller. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact node scalability, and also that the node controller's +nodeMonitorGracePeriod must be set to N∗NodeStatusUpdateFrequency, +where N is the number of retries before the node controller marks +the node unhealthy. +Default: "10s"
nodeStatusReportFrequency
+meta/v1.Duration +
+ nodeStatusReportFrequency is the frequency that kubelet posts node +status to master if node status does not change. Kubelet will ignore this +frequency and post node status immediately if any change is detected. It is +only used when node lease feature is enabled. nodeStatusReportFrequency's +default value is 1m. But if nodeStatusUpdateFrequency is set explicitly, +nodeStatusReportFrequency's default value will be set to +nodeStatusUpdateFrequency for backward compatibility. +Default: "1m"
nodeLeaseDurationSeconds
+int32 +
+ nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease, +when the NodeLease feature is enabled. This feature provides an indicator of node +health by having the Kubelet create and periodically renew a lease, named after the node, +in the kube-node-lease namespace. If the lease expires, the node can be considered unhealthy. +The lease is currently renewed every 10s, per KEP-0009. In the future, the lease renewal interval +may be set based on the lease duration. +Requires the NodeLease feature gate to be enabled. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +decreasing the duration may reduce tolerance for issues that temporarily prevent +the Kubelet from renewing the lease (e.g. a short-lived network issue). +Default: 40
imageMinimumGCAge
+meta/v1.Duration +
+ imageMinimumGCAge is the minimum age for an unused image before it is +garbage collected. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay garbage collection, and may change the image overhead +on the node. +Default: "2m"
imageGCHighThresholdPercent
+int32 +
+ imageGCHighThresholdPercent is the percent of disk usage after which +image garbage collection is always run. The percent is calculated as +this field value out of 100. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay garbage collection, and may change the image overhead +on the node. +Default: 85
imageGCLowThresholdPercent
+int32 +
+ imageGCLowThresholdPercent is the percent of disk usage before which +image garbage collection is never run. Lowest disk usage to garbage +collect to. The percent is calculated as this field value out of 100. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay garbage collection, and may change the image overhead +on the node. +Default: 80
volumeStatsAggPeriod
+meta/v1.Duration +
+ How frequently to calculate and cache volume disk usage for all pods +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the period may carry a performance impact. +Default: "1m"
kubeletCgroups
+string +
+ kubeletCgroups is the absolute name of cgroups to isolate the kubelet in +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
systemCgroups
+string +
+ systemCgroups is absolute name of cgroups in which to place +all non-kernel processes that are not already in a container. Empty +for no container. Rolling back the flag requires a reboot. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
cgroupRoot
+string +
+ cgroupRoot is the root cgroup to use for pods. This is handled by the +container runtime on a best effort basis. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
cgroupsPerQOS
+bool +
+ Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes +And all Burstable and BestEffort pods are brought up under their +specific top level QoS cgroup. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: true
cgroupDriver
+string +
+ driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "cgroupfs"
cpuManagerPolicy
+string +
+ CPUManagerPolicy is the name of the policy to use. +Requires the CPUManager feature gate to be enabled. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "none"
cpuManagerReconcilePeriod
+meta/v1.Duration +
+ CPU Manager reconciliation period. +Requires the CPUManager feature gate to be enabled. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +shortening the period may carry a performance impact. +Default: "10s"
topologyManagerPolicy
+string +
+ TopologyManagerPolicy is the name of the policy to use. +Policies other than "none" require the TopologyManager feature gate to be enabled. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "none"
topologyManagerScope
+string +
+ TopologyManagerScope represents the scope of topology hint generation +that topology manager requests and hint providers generate. +"pod" scope requires the TopologyManager feature gate to be enabled. +Default: "container"
qosReserved
+map[string]string +
+ qosReserved is a set of resource name to percentage pairs that specify +the minimum percentage of a resource reserved for exclusive use by the +guaranteed QoS tier. +Currently supported resources: "memory" +Requires the QOSReserved feature gate to be enabled. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: nil
runtimeRequestTimeout
+meta/v1.Duration +
+ runtimeRequestTimeout is the timeout for all runtime requests except long running +requests - pull, logs, exec and attach. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may disrupt components that interact with the Kubelet server. +Default: "2m"
hairpinMode
+string +
+ hairpinMode specifies how the Kubelet should configure the container +bridge for hairpin packets. +Setting this flag allows endpoints in a Service to loadbalance back to +themselves if they should try to access their own Service. Values: + "promiscuous-bridge": make the container bridge promiscuous. + "hairpin-veth": set the hairpin flag on container veth interfaces. + "none": do nothing. +Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT, +because promiscuous-bridge assumes the existence of a container bridge named cbr0. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may require a node reboot, depending on the network plugin. +Default: "promiscuous-bridge"
maxPods
+int32 +
+ maxPods is the number of pods that can run on this Kubelet. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes may cause Pods to fail admission on Kubelet restart, and may change +the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting +future scheduling decisions. Increasing this value may also decrease performance, +as more Pods can be packed into a single node. +Default: 110
podCIDR
+string +
+ The CIDR to use for pod IP addresses, only used in standalone mode. +In cluster mode, this is obtained from the master. +Dynamic Kubelet Config (beta): This field should always be set to the empty default. +It should only set for standalone Kubelets, which cannot use Dynamic Kubelet Config. +Default: ""
podPidsLimit
+int64 +
+ PodPidsLimit is the maximum number of pids in any pod. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it may prevent container processes from forking after the change. +Default: -1
resolvConf
+string +
+ ResolverConfig is the resolver configuration file used as the basis +for the container DNS resolution configuration. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes will only take effect on Pods created after the update. Draining +the node is recommended before changing this field. +Default: "/etc/resolv.conf"
runOnce
+bool +
+ RunOnce causes the Kubelet to check the API server once for pods, +run those in addition to the pods specified by static pod files, and exit. +Default: false
cpuCFSQuota
+bool +
+ cpuCFSQuota enables CPU CFS quota enforcement for containers that +specify CPU limits. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it may reduce node stability. +Default: true
cpuCFSQuotaPeriod
+meta/v1.Duration +
+ CPUCFSQuotaPeriod is the CPU CFS quota period value, cpu.cfs_period_us. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +limits set for containers will result in different cpu.cfs_quota settings. This +will trigger container restarts on the node being reconfigured. +Default: "100ms"
nodeStatusMaxImages
+int32 +
+ nodeStatusMaxImages caps the number of images reported in Node.Status.Images. +Note: If -1 is specified, no cap will be applied. If 0 is specified, no image is returned. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +different values can be reported on node status. +Default: 50
maxOpenFiles
+int64 +
+ maxOpenFiles is Number of files that can be opened by Kubelet process. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the ability of the Kubelet to interact with the node's filesystem. +Default: 1000000
contentType
+string +
+ contentType is contentType of requests sent to apiserver. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the ability for the Kubelet to communicate with the API server. +If the Kubelet loses contact with the API server due to a change to this field, +the change cannot be reverted via dynamic Kubelet config. +Default: "application/vnd.kubernetes.protobuf"
kubeAPIQPS
+int32 +
+ kubeAPIQPS is the QPS to use while talking with kubernetes apiserver +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic the Kubelet +sends to the API server. +Default: 5
kubeAPIBurst
+int32 +
+ kubeAPIBurst is the burst to allow while talking with kubernetes apiserver +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact scalability by changing the amount of traffic the Kubelet +sends to the API server. +Default: 10
serializeImagePulls
+bool +
+ serializeImagePulls when enabled, tells the Kubelet to pull images one +at a time. We recommend ∗not∗ changing the default value on nodes that +run docker daemon with version < 1.9 or an Aufs storage backend. +Issue #10959 has more details. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the performance of image pulls. +Default: true
evictionHard
+map[string]string +
+ Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. +To explicitly disable, pass a 0% or 100% threshold on an arbitrary resource. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay Pod evictions. +Default: + memory.available: "100Mi" + nodefs.available: "10%" + nodefs.inodesFree: "5%" + imagefs.available: "15%"
evictionSoft
+map[string]string +
+ Map of signal names to quantities that defines soft eviction thresholds. +For example: {"memory.available": "300Mi"}. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay Pod evictions, and may change the allocatable reported +by the node. +Default: nil
evictionSoftGracePeriod
+map[string]string +
+ Map of signal names to quantities that defines grace periods for each soft eviction signal. +For example: {"memory.available": "30s"}. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger or delay Pod evictions. +Default: nil
evictionPressureTransitionPeriod
+meta/v1.Duration +
+ Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it may decrease the stability of the node when the node is overcommitted. +Default: "5m"
evictionMaxPodGracePeriod
+int32 +
+ Maximum allowed grace period (in seconds) to use when terminating pods in +response to a soft eviction threshold being met. This value effectively caps +the Pod's TerminationGracePeriodSeconds value during soft evictions. +Note: Due to issue #64530, the behavior has a bug where this value currently just +overrides the grace period during soft eviction, which can increase the grace +period from what is set on the Pod. This bug will be fixed in a future release. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it decreases the amount of time Pods will have to gracefully clean +up before being killed during a soft eviction. +Default: 0
evictionMinimumReclaim
+map[string]string +
+ Map of signal names to quantities that defines minimum reclaims, which describe the minimum +amount of a given resource the kubelet will reclaim when performing a pod eviction while +that resource is under pressure. For example: {"imagefs.available": "2Gi"} +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may change how well eviction can manage resource pressure. +Default: nil
podsPerCore
+int32 +
+ podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods. +If 0, this field is ignored. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changes may cause Pods to fail admission on Kubelet restart, and may change +the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting +future scheduling decisions. Increasing this value may also decrease performance, +as more Pods can be packed into a single node. +Default: 0
enableControllerAttachDetach
+bool +
+ enableControllerAttachDetach enables the Attach/Detach controller to +manage attachment/detachment of volumes scheduled to this node, and +disables kubelet from executing any attach/detach operations +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +changing which component is responsible for volume management on a live node +may result in volumes refusing to detach if the node is not drained prior to +the update, and if Pods are scheduled to the node before the +volumes.kubernetes.io/controller-managed-attach-detach annotation is updated by the +Kubelet. In general, it is safest to leave this value set the same as local config. +Default: true
protectKernelDefaults
+bool +
+ protectKernelDefaults, if true, causes the Kubelet to error if kernel +flags are not as it expects. Otherwise the Kubelet will attempt to modify +kernel flags to match its expectation. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +enabling it may cause the Kubelet to crash-loop if the Kernel is not configured as +Kubelet expects. +Default: false
makeIPTablesUtilChains
+bool +
+ If true, Kubelet ensures a set of iptables rules are present on host. +These rules will serve as utility rules for various components, e.g. KubeProxy. +The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disabling it will prevent the Kubelet from healing locally misconfigured iptables rules. +Default: true
iptablesMasqueradeBit
+int32 +
+ iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT +Values must be within the range [0, 31]. Must be different from other mark bits. +Warning: Please match the value of the corresponding parameter in kube-proxy. +TODO: clean up IPTablesMasqueradeBit in kube-proxy +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it needs to be coordinated with other components, like kube-proxy, and the update +will only be effective if MakeIPTablesUtilChains is enabled. +Default: 14
iptablesDropBit
+int32 +
+ iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. +Values must be within the range [0, 31]. Must be different from other mark bits. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it needs to be coordinated with other components, like kube-proxy, and the update +will only be effective if MakeIPTablesUtilChains is enabled. +Default: 15
featureGates
+map[string]bool +
+ featureGates is a map of feature names to bools that enable or disable alpha/experimental +features. This field modifies piecemeal the built-in default values from +"k8s.io/kubernetes/pkg/features/kube_features.go". +Dynamic Kubelet Config (beta): If dynamically updating this field, consider the +documentation for the features you are enabling or disabling. While we +encourage feature developers to make it possible to dynamically enable +and disable features, some changes may require node reboots, and some +features may require careful coordination to retroactively disable. +Default: nil
failSwapOn
+bool +
+ failSwapOn tells the Kubelet to fail to start if swap is enabled on the node. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +setting it to true will cause the Kubelet to crash-loop if swap is enabled. +Default: true
containerLogMaxSize
+string +
+ A quantity defines the maximum size of the container log file before it is rotated. +For example: "5Mi" or "256Ki". +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may trigger log rotation. +Default: "10Mi"
containerLogMaxFiles
+int32 +
+ Maximum number of container log files that can be present for a container. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +lowering it may cause log files to be deleted. +Default: 5
configMapAndSecretChangeDetectionStrategy
+ResourceChangeDetectionStrategy +
+ ConfigMapAndSecretChangeDetectionStrategy is a mode in which +config map and secret managers are running. +Default: "Watch"
systemReserved
+map[string]string +
+ systemReserved is a set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) +pairs that describe resources reserved for non-kubernetes components. +Currently only cpu and memory are supported. +See http://kubernetes.io/docs/user-guide/compute-resources for more detail. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may not be possible to increase the reserved resources, because this +requires resizing cgroups. Always look for a NodeAllocatableEnforced event +after updating this field to ensure that the update was successful. +Default: nil
kubeReserved
+map[string]string +
+ A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs +that describe resources reserved for kubernetes system components. +Currently cpu, memory and local storage for root file system are supported. +See http://kubernetes.io/docs/user-guide/compute-resources for more detail. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may not be possible to increase the reserved resources, because this +requires resizing cgroups. Always look for a NodeAllocatableEnforced event +after updating this field to ensure that the update was successful. +Default: nil
reservedSystemCPUs [Required]
+string +
+ This ReservedSystemCPUs option specifies the cpu list reserved for the host level system threads and kubernetes related threads. +This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved. +This option overwrites CPUs provided by system-reserved and kube-reserved.
showHiddenMetricsForVersion
+string +
+ The previous version for which you want to show hidden metrics. +Only the previous minor version is meaningful, other values will not be allowed. +The format is ., e.g.: '1.16'. +The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, +rather than being surprised when they are permanently removed in the release after that. +Default: ""
systemReservedCgroup
+string +
+ This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
kubeReservedCgroup
+string +
+ This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. +Dynamic Kubelet Config (beta): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: ""
enforceNodeAllocatable
+[]string +
+ This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. +This flag accepts a list of options. Acceptable options are `none`, `pods`, `system-reserved` & `kube-reserved`. +If `none` is specified, no other options may be specified. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +removing enforcements may reduce the stability of the node. Alternatively, adding +enforcements may reduce the stability of components which were using more than +the reserved amount of resources; for example, enforcing kube-reserved may cause +Kubelets to OOM if it uses more than the reserved resources, and enforcing system-reserved +may cause system daemons to OOM if they use more than the reserved resources. +Default: ["pods"]
allowedUnsafeSysctls
+[]string +
+ A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in ∗). +Unsafe sysctl groups are kernel.shm∗, kernel.msg∗, kernel.sem, fs.mqueue.∗, and net.∗. +These sysctls are namespaced but not allowed by default. For example: "kernel.msg∗,net.ipv4.route.min_pmtu" +Default: []
volumePluginDir
+string +
+ volumePluginDir is the full path of the directory in which to search +for additional third party volume plugins. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that changing +the volumePluginDir may disrupt workloads relying on third party volume plugins. +Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
providerID
+string +
+ providerID, if set, sets the unique id of the instance that an external provider (i.e. cloudprovider) +can use to identify a specific node. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the ability of the Kubelet to interact with cloud providers. +Default: ""
kernelMemcgNotification
+bool +
+ kernelMemcgNotification, if set, the kubelet will integrate with the kernel memcg notification +to determine if memory eviction thresholds are crossed rather than polling. +Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +it may impact the way Kubelet interacts with the kernel. +Default: false
logging [Required]
+LoggingConfiguration +
+ Logging specifies the options of logging. +Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. +Defaults: + Format: text
enableSystemLogHandler
+bool +
+ enableSystemLogHandler enables system logs via web interface host:port/logs/ +Default: true
shutdownGracePeriod
+meta/v1.Duration +
+ ShutdownGracePeriod specifies the total duration that the node should delay the shutdown and total grace period for pod termination during a node shutdown. +Default: "30s"
shutdownGracePeriodCriticalPods
+meta/v1.Duration +
+ ShutdownGracePeriodCriticalPods specifies the duration used to terminate critical pods during a node shutdown. This should be less than ShutdownGracePeriod. +For example, if ShutdownGracePeriod=30s, and ShutdownGracePeriodCriticalPods=10s, during a node shutdown the first 20 seconds would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating critical pods. +Default: "10s"
+ + + +## `SerializedNodeConfigSource` {#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource} + + + + + +SerializedNodeConfigSource allows us to serialize v1.NodeConfigSource. +This type is used internally by the Kubelet for tracking checkpointed dynamic configs. +It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
SerializedNodeConfigSource
source
+core/v1.NodeConfigSource +
+ Source is the source that we are serializing
+ + + +## `HairpinMode` {#kubelet-config-k8s-io-v1beta1-HairpinMode} + +(Alias of `string`) + + + +HairpinMode denotes how the kubelet should configure networking to handle +hairpin packets. + + + + + +## `KubeletAnonymousAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAnonymousAuthentication} + + + + +**Appears in:** + +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) + + + + + + + + + + + + + + + + +
FieldDescription
enabled
+bool +
+ enabled allows anonymous requests to the kubelet server. +Requests that are not rejected by another authentication method are treated as anonymous requests. +Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated.
+ + + +## `KubeletAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAuthentication} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
x509
+KubeletX509Authentication +
+ x509 contains settings related to x509 client certificate authentication
webhook
+KubeletWebhookAuthentication +
+ webhook contains settings related to webhook bearer token authentication
anonymous
+KubeletAnonymousAuthentication +
+ anonymous contains settings related to anonymous authentication
+ + + +## `KubeletAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorization} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
mode
+KubeletAuthorizationMode +
+ mode is the authorization mode to apply to requests to the kubelet server. +Valid values are AlwaysAllow and Webhook. +Webhook mode uses the SubjectAccessReview API to determine authorization.
webhook
+KubeletWebhookAuthorization +
+ webhook contains settings related to Webhook authorization.
+ + + +## `KubeletAuthorizationMode` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorizationMode} + +(Alias of `string`) + + +**Appears in:** + +- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) + + + + + + + + +## `KubeletWebhookAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthentication} + + + + +**Appears in:** + +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
enabled
+bool +
+ enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API
cacheTTL
+meta/v1.Duration +
+ cacheTTL enables caching of authentication results
+ + + +## `KubeletWebhookAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthorization} + + + + +**Appears in:** + +- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cacheAuthorizedTTL
+meta/v1.Duration +
+ cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer.
cacheUnauthorizedTTL
+meta/v1.Duration +
+ cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer.
+ + + +## `KubeletX509Authentication` {#kubelet-config-k8s-io-v1beta1-KubeletX509Authentication} + + + + +**Appears in:** + +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) + + + + + + + + + + + + + + + + +
FieldDescription
clientCAFile
+string +
+ clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate +signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, +and groups corresponding to the Organization in the client certificate.
+ + + +## `ResourceChangeDetectionStrategy` {#kubelet-config-k8s-io-v1beta1-ResourceChangeDetectionStrategy} + +(Alias of `string`) + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +ResourceChangeDetectionStrategy denotes a mode in which internal +managers (secret, configmap) are discovering object changes. + + + + + + + +## `LoggingConfiguration` {#LoggingConfiguration} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +LoggingConfiguration contains logging options +Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. + + + + + + + + + + + + + + + + + + +
FieldDescription
format [Required]
+string +
+ Format Flag specifies the structure of log messages. +default value of format is `text`
sanitization [Required]
+bool +
+ [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). +Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)
diff --git a/content/en/docs/reference/glossary/api-group.md b/content/en/docs/reference/glossary/api-group.md index 6f87e3b0419a2..0eccd9bf6fa81 100644 --- a/content/en/docs/reference/glossary/api-group.md +++ b/content/en/docs/reference/glossary/api-group.md @@ -2,7 +2,7 @@ title: API Group id: api-group date: 2019-09-02 -full_link: /docs/concepts/overview/kubernetes-api/#api-groups +full_link: /docs/concepts/overview/kubernetes-api/#api-groups-and-versioning short_description: > A set of related paths in the Kubernetes API. diff --git a/content/en/docs/reference/glossary/cloud-controller-manager.md b/content/en/docs/reference/glossary/cloud-controller-manager.md index c78bf393cb215..874d0925cfe12 100755 --- a/content/en/docs/reference/glossary/cloud-controller-manager.md +++ b/content/en/docs/reference/glossary/cloud-controller-manager.md @@ -14,7 +14,7 @@ tags: A Kubernetes {{< glossary_tooltip text="control plane" term_id="control-plane" >}} component that embeds cloud-specific control logic. The cloud controller manager lets you link your cluster into your cloud provider's API, and separates out the components that interact -with that cloud platform from components that just interact with your cluster. +with that cloud platform from components that only interact with your cluster. diff --git a/content/en/docs/reference/glossary/cluster-operator.md b/content/en/docs/reference/glossary/cluster-operator.md index c8973438302c1..48bdd4d3dfbdf 100755 --- a/content/en/docs/reference/glossary/cluster-operator.md +++ b/content/en/docs/reference/glossary/cluster-operator.md @@ -17,6 +17,6 @@ tags: Their primary responsibility is keeping a cluster up and running, which may involve periodic maintenance activities or upgrades.
{{< note >}} -Cluster operators are different from the [Operator pattern](https://coreos.com/operators) that extends the Kubernetes API. +Cluster operators are different from the [Operator pattern](https://www.openshift.com/learn/topics/operators) that extends the Kubernetes API. {{< /note >}} diff --git a/content/en/docs/reference/glossary/index.md b/content/en/docs/reference/glossary/index.md index 1fb8799a16b51..29bd54bd21d36 100755 --- a/content/en/docs/reference/glossary/index.md +++ b/content/en/docs/reference/glossary/index.md @@ -2,7 +2,7 @@ approvers: - chenopis - abiogenesis-now -title: Standardized Glossary +title: Glossary layout: glossary noedit: true default_active_tag: fundamental diff --git a/content/en/docs/reference/glossary/wg.md b/content/en/docs/reference/glossary/wg.md index 2a3b8786f63b5..89ea85fca7b75 100755 --- a/content/en/docs/reference/glossary/wg.md +++ b/content/en/docs/reference/glossary/wg.md @@ -12,9 +12,8 @@ tags: --- Facilitates the discussion and/or implementation of a short-lived, narrow, or decoupled project for a committee, {{< glossary_tooltip text="SIG" term_id="sig" >}}, or cross-SIG effort. - + -Working groups are a way of organizing people to accomplish a discrete task, and are relatively easy to create and deprecate when inactive. +Working groups are a way of organizing people to accomplish a discrete task. For more information, see the [kubernetes/community](https://github.com/kubernetes/community) repo and the current list of [SIGs and working groups](https://github.com/kubernetes/community/blob/master/sig-list.md). - diff --git a/content/en/docs/reference/issues-security/_index.md b/content/en/docs/reference/issues-security/_index.md index 530e98bf6196e..50c3f29333462 100644 --- a/content/en/docs/reference/issues-security/_index.md +++ b/content/en/docs/reference/issues-security/_index.md @@ -1,4 +1,4 @@ --- title: Kubernetes Issues and Security -weight: 10 +weight: 40 --- \ No newline at end of file diff --git a/content/en/docs/reference/kubectl/_index.md b/content/en/docs/reference/kubectl/_index.md index 7b6c2d720b12a..765adb6fe8790 100755 --- a/content/en/docs/reference/kubectl/_index.md +++ b/content/en/docs/reference/kubectl/_index.md @@ -1,5 +1,5 @@ --- -title: "kubectl CLI" +title: "kubectl" weight: 60 --- diff --git a/content/en/docs/reference/kubectl/cheatsheet.md b/content/en/docs/reference/kubectl/cheatsheet.md index 2ed62c2e2ac9f..f5a971d3bda69 100644 --- a/content/en/docs/reference/kubectl/cheatsheet.md +++ b/content/en/docs/reference/kubectl/cheatsheet.md @@ -195,7 +195,7 @@ JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.ty && kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True" # Output decoded secrets without external tools -kubectl get secret ${secret_name} -o go-template='{{range $k,$v := .data}}{{$k}}={{$v|base64decode}}{{"\n"}}{{end}}' +kubectl get secret my-secret -o go-template='{{range $k,$v := .data}}{{"### "}}{{$k}}{{"\n"}}{{$v|base64decode}}{{"\n\n"}}{{end}}' # List all Secrets currently in use by a pod kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq @@ -320,6 +320,18 @@ kubectl top pod POD_NAME --containers # Show metrics for a given p kubectl top pod POD_NAME --sort-by=cpu # Show metrics for a given pod and sort it by 'cpu' or 'memory' ``` +## Interacting with Deployments and Services +```bash +kubectl logs deploy/my-deployment # dump Pod logs for a Deployment (single-container case) +kubectl logs deploy/my-deployment -c my-container # dump Pod logs for a Deployment (multi-container case) + +kubectl port-forward svc/my-service 5000 # listen on local port 5000 and forward to port 5000 on Service backend +kubectl port-forward svc/my-service 5000:my-service-port # listen on local port 5000 and forward to Service target port with name + +kubectl port-forward deploy/my-deployment 5000:6000 # listen on local port 5000 and forward to port 6000 on a Pod created by +kubectl exec deploy/my-deployment -- ls # run command in first Pod and first container in Deployment (single- or multi-container cases) +``` + ## Interacting with Nodes and cluster ```bash @@ -337,7 +349,7 @@ kubectl taint nodes foo dedicated=special-user:NoSchedule ### Resource types -List all supported resource types along with their shortnames, [API group](/docs/concepts/overview/kubernetes-api/#api-groups), whether they are [namespaced](/docs/concepts/overview/working-with-objects/namespaces), and [Kind](/docs/concepts/overview/working-with-objects/kubernetes-objects): +List all supported resource types along with their shortnames, [API group](/docs/concepts/overview/kubernetes-api/#api-groups-and-versioning), whether they are [namespaced](/docs/concepts/overview/working-with-objects/namespaces), and [Kind](/docs/concepts/overview/working-with-objects/kubernetes-objects): ```bash kubectl api-resources @@ -348,7 +360,7 @@ Other operations for exploring API resources: ```bash kubectl api-resources --namespaced=true # All namespaced resources kubectl api-resources --namespaced=false # All non-namespaced resources -kubectl api-resources -o name # All resources with simple output (just the resource name) +kubectl api-resources -o name # All resources with simple output (only the resource name) kubectl api-resources -o wide # All resources with expanded (aka "wide") output kubectl api-resources --verbs=list,get # All resources that support the "list" and "get" request verbs kubectl api-resources --api-group=extensions # All resources in the "extensions" API group @@ -375,6 +387,9 @@ Examples using `-o=custom-columns`: # All images running in a cluster kubectl get pods -A -o=custom-columns='DATA:spec.containers[*].image' +# All images running in namespace: default, grouped by Pod +kubectl get pods --namespace default --output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image" + # All images excluding "k8s.gcr.io/coredns:1.6.2" kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image' diff --git a/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md b/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md index 6c214513a051c..ac7b7a49f92cf 100644 --- a/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md +++ b/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md @@ -7,7 +7,7 @@ reviewers: --- -You can use the Kubernetes command line tool kubectl to interact with the API Server. Using kubectl is straightforward if you are familiar with the Docker command line tool. However, there are a few differences between the docker commands and the kubectl commands. The following sections show a docker sub-command and describe the equivalent kubectl command. +You can use the Kubernetes command line tool `kubectl` to interact with the API Server. Using kubectl is straightforward if you are familiar with the Docker command line tool. However, there are a few differences between the Docker commands and the kubectl commands. The following sections show a Docker sub-command and describe the equivalent `kubectl` command. diff --git a/content/en/docs/reference/kubectl/overview.md b/content/en/docs/reference/kubectl/overview.md index a9f1550659420..f8ec7e5603762 100644 --- a/content/en/docs/reference/kubectl/overview.md +++ b/content/en/docs/reference/kubectl/overview.md @@ -19,7 +19,7 @@ files by setting the KUBECONFIG environment variable or by setting the This overview covers `kubectl` syntax, describes the command operations, and provides common examples. For details about each command, including all the supported flags and subcommands, see the [kubectl](/docs/reference/generated/kubectl/kubectl-commands/) reference documentation. -For installation instructions see [installing kubectl](/docs/tasks/tools/install-kubectl/). +For installation instructions see [installing kubectl](/docs/tasks/tools/). @@ -69,7 +69,7 @@ for example `create`, `get`, `describe`, `delete`. Flags that you specify from the command line override default values and any corresponding environment variables. {{< /caution >}} -If you need help, just run `kubectl help` from the terminal window. +If you need help, run `kubectl help` from the terminal window. ## Operations diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md b/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md index cca65e53022eb..fac624bf4b8e8 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Authentication Resources" weight: 4 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md index c6a0d34757f8e..5cf56dd6e6157 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued." title: "CertificateSigningRequest" weight: 4 +auto_generated: true --- + + `apiVersion: certificates.k8s.io/v1` `import "k8s.io/api/certificates/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md index 8e202ad28d6f6..a83d44bbf9636 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets." title: "ServiceAccount" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md index c0ddd62af309f..b9ee5ab8588a3 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "TokenRequest requests a token for a given service account." title: "TokenRequest" weight: 2 +auto_generated: true --- + + `apiVersion: authentication.k8s.io/v1` `import "k8s.io/api/authentication/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md index 06e0ffd5cd19b..df71bf4e1a552 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "TokenReview attempts to authenticate a token to a known user." title: "TokenReview" weight: 3 +auto_generated: true --- + + `apiVersion: authentication.k8s.io/v1` `import "k8s.io/api/authentication/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md b/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md index e5390914a1b15..5b58698bd8601 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Authorization Resources" weight: 5 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md index d7dfea2179920..ad6a0ff732507 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ClusterRoleBinding references a ClusterRole, but not contain it." title: "ClusterRoleBinding" weight: 6 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md index 34bb8099ef771..cc58c7804e2d4 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding." title: "ClusterRole" weight: 5 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md index 144dfe257b4a4..a163bfa743eb2 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace." title: "LocalSubjectAccessReview" weight: 1 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md index 36b5ebc4d6a95..bb847f33704ad 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "RoleBinding references a role, but does not contain it." title: "RoleBinding" weight: 8 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md index f87d305247963..6c1f8b1fdecbc 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding." title: "Role" weight: 7 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md index aacafd39b114d..430a4a953b0c3 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "SelfSubjectAccessReview checks whether or the current user can perform an action." title: "SelfSubjectAccessReview" weight: 2 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md index aa3d95332beb2..82ab54ec4fe6d 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace." title: "SelfSubjectRulesReview" weight: 3 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` @@ -51,13 +63,13 @@ SelfSubjectRulesReview enumerates the set of actions the current user can perfor *NonResourceRule holds information that describes a rule for the non-resource* - - **status.nonResourceRules.verbs** ([]string), required + - **status.nonResourceRules.verbs** ([]string), required - Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. - - **status.nonResourceRules.nonResourceURLs** ([]string) + - **status.nonResourceRules.nonResourceURLs** ([]string) - NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all. + NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all. - **status.resourceRules** ([]ResourceRule), required @@ -66,22 +78,22 @@ SelfSubjectRulesReview enumerates the set of actions the current user can perfor *ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.* - - **status.resourceRules.verbs** ([]string), required + - **status.resourceRules.verbs** ([]string), required - Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. - - **status.resourceRules.apiGroups** ([]string) + - **status.resourceRules.apiGroups** ([]string) - APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all. + APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all. - - **status.resourceRules.resourceNames** ([]string) + - **status.resourceRules.resourceNames** ([]string) - ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. - - **status.resourceRules.resources** ([]string) + - **status.resourceRules.resources** ([]string) - Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. - "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. - **status.evaluationError** (string) diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md index 5385b125e162d..5c8d23ea4db16 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "SubjectAccessReview checks whether or not a user or group can perform an action." title: "SubjectAccessReview" weight: 4 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md b/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md index 40d8cdc68d070..c0fbcc0813149 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Cluster Resources" weight: 8 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md index 19ed5e0eb09fd..45f3629c397c6 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "APIService represents a server for a particular GroupVersion." title: "APIService" weight: 4 +auto_generated: true --- + + `apiVersion: apiregistration.k8s.io/v1` `import "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md index a007116367908..4acdf07c98b56 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Binding ties one object to another; for example, a pod is bound to a node by a scheduler." title: "Binding" weight: 9 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md index 3b526675502b9..0542fedfbd79a 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ComponentStatus (and ComponentStatusList) holds the cluster validation info." title: "ComponentStatus" weight: 10 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md index 065cf5a7e7faa..d01f3ee70910c 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Event is a report of an event somewhere in the cluster." title: "Event" weight: 3 +auto_generated: true --- + + `apiVersion: events.k8s.io/v1` `import "k8s.io/api/events/v1"` @@ -26,6 +38,10 @@ Event is a report of an event somewhere in the cluster. It generally denotes som - **kind**: Event +- **metadata** (}}">ObjectMeta) + + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + - **eventTime** (MicroTime), required eventTime is the time when this Event was first observed. It is required. @@ -33,9 +49,6 @@ Event is a report of an event somewhere in the cluster. It generally denotes som *MicroTime is version of Time with microsecond level precision.* -- **metadata** (}}">ObjectMeta), required - - - **action** (string) action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters. diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md index 8f94555ee2759..8329c6016b1ca 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "FlowSchema defines the schema of a group of flows." title: "FlowSchema v1beta1" weight: 7 +auto_generated: true --- + + `apiVersion: flowcontrol.apiserver.k8s.io/v1beta1` `import "k8s.io/api/flowcontrol/v1beta1"` @@ -92,43 +104,43 @@ FlowSchemaSpec describes how the FlowSchema's specification looks like. *Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.* - - **rules.subjects.kind** (string), required + - **rules.subjects.kind** (string), required - Required + Required - - **rules.subjects.group** (GroupSubject) + - **rules.subjects.group** (GroupSubject) - - *GroupSubject holds detailed information for group-kind subject.* + + *GroupSubject holds detailed information for group-kind subject.* - - **rules.subjects.group.name** (string), required + - **rules.subjects.group.name** (string), required - name is the user group that matches, or "*" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. + name is the user group that matches, or "*" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. - - **rules.subjects.serviceAccount** (ServiceAccountSubject) + - **rules.subjects.serviceAccount** (ServiceAccountSubject) - - *ServiceAccountSubject holds detailed information for service-account-kind subject.* + + *ServiceAccountSubject holds detailed information for service-account-kind subject.* - - **rules.subjects.serviceAccount.name** (string), required + - **rules.subjects.serviceAccount.name** (string), required - `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. Required. + `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. Required. - - **rules.subjects.serviceAccount.namespace** (string), required + - **rules.subjects.serviceAccount.namespace** (string), required - `namespace` is the namespace of matching ServiceAccount objects. Required. + `namespace` is the namespace of matching ServiceAccount objects. Required. - - **rules.subjects.user** (UserSubject) + - **rules.subjects.user** (UserSubject) - - *UserSubject holds detailed information for user-kind subject.* + + *UserSubject holds detailed information for user-kind subject.* - - **rules.subjects.user.name** (string), required + - **rules.subjects.user.name** (string), required - `name` is the username that matches, or "*" to match all usernames. Required. + `name` is the username that matches, or "*" to match all usernames. Required. - **rules.nonResourceRules** ([]NonResourcePolicyRule) @@ -139,23 +151,23 @@ FlowSchemaSpec describes how the FlowSchema's specification looks like. *NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.* - - **rules.nonResourceRules.nonResourceURLs** ([]string), required + - **rules.nonResourceRules.nonResourceURLs** ([]string), required - *Set: unique values will be kept during a merge* - - `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - - "/healthz" is legal - - "/hea*" is illegal - - "/hea" is legal but matches nothing - - "/hea/*" also matches nothing - - "/healthz/*" matches all per-component health checks. - "*" matches all non-resource urls. if it is present, it must be the only entry. Required. + *Set: unique values will be kept during a merge* + + `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: + - "/healthz" is legal + - "/hea*" is illegal + - "/hea" is legal but matches nothing + - "/hea/*" also matches nothing + - "/healthz/*" matches all per-component health checks. + "*" matches all non-resource urls. if it is present, it must be the only entry. Required. - - **rules.nonResourceRules.verbs** ([]string), required + - **rules.nonResourceRules.verbs** ([]string), required - *Set: unique values will be kept during a merge* - - `verbs` is a list of matching verbs and may not be empty. "*" matches all verbs. If it is present, it must be the only entry. Required. + *Set: unique values will be kept during a merge* + + `verbs` is a list of matching verbs and may not be empty. "*" matches all verbs. If it is present, it must be the only entry. Required. - **rules.resourceRules** ([]ResourcePolicyRule) @@ -166,33 +178,33 @@ FlowSchemaSpec describes how the FlowSchema's specification looks like. *ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.* - - **rules.resourceRules.apiGroups** ([]string), required + - **rules.resourceRules.apiGroups** ([]string), required - *Set: unique values will be kept during a merge* - - `apiGroups` is a list of matching API groups and may not be empty. "*" matches all API groups and, if present, must be the only entry. Required. + *Set: unique values will be kept during a merge* + + `apiGroups` is a list of matching API groups and may not be empty. "*" matches all API groups and, if present, must be the only entry. Required. - - **rules.resourceRules.resources** ([]string), required + - **rules.resourceRules.resources** ([]string), required - *Set: unique values will be kept during a merge* - - `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ "services", "nodes/status" ]. This list may not be empty. "*" matches all resources and, if present, must be the only entry. Required. + *Set: unique values will be kept during a merge* + + `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ "services", "nodes/status" ]. This list may not be empty. "*" matches all resources and, if present, must be the only entry. Required. - - **rules.resourceRules.verbs** ([]string), required + - **rules.resourceRules.verbs** ([]string), required - *Set: unique values will be kept during a merge* - - `verbs` is a list of matching verbs and may not be empty. "*" matches all verbs and, if present, must be the only entry. Required. + *Set: unique values will be kept during a merge* + + `verbs` is a list of matching verbs and may not be empty. "*" matches all verbs and, if present, must be the only entry. Required. - - **rules.resourceRules.clusterScope** (boolean) + - **rules.resourceRules.clusterScope** (boolean) - `clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list. + `clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list. - - **rules.resourceRules.namespaces** ([]string) + - **rules.resourceRules.namespaces** ([]string) - *Set: unique values will be kept during a merge* - - `namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains "*". Note that "*" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true. + *Set: unique values will be kept during a merge* + + `namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains "*". Note that "*" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true. diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md index 478bc2a8e75aa..8f74401a59c40 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Lease defines a lease concept." title: "Lease" weight: 5 +auto_generated: true --- + + `apiVersion: coordination.k8s.io/v1` `import "k8s.io/api/coordination/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md index caa39a5856efc..a05f7f4f26dbb 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Namespace provides a scope for Names." title: "Namespace" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md index a5e796904f373..23764d90337ba 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Node is a worker node in Kubernetes." title: "Node" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -62,25 +74,25 @@ NodeSpec describes the attributes that a node is created with. *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* - - **configSource.configMap.kubeletConfigKey** (string), required + - **configSource.configMap.kubeletConfigKey** (string), required - KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. + KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. - - **configSource.configMap.name** (string), required + - **configSource.configMap.name** (string), required - Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. + Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. - - **configSource.configMap.namespace** (string), required + - **configSource.configMap.namespace** (string), required - Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. + Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. - - **configSource.configMap.resourceVersion** (string) + - **configSource.configMap.resourceVersion** (string) - ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - - **configSource.configMap.uid** (string) + - **configSource.configMap.uid** (string) - UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - **externalID** (string) @@ -216,32 +228,32 @@ NodeStatus is information about the current status of a node. *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.* - - **config.active.configMap** (ConfigMapNodeConfigSource) + - **config.active.configMap** (ConfigMapNodeConfigSource) - ConfigMap is a reference to a Node's ConfigMap + ConfigMap is a reference to a Node's ConfigMap - - *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* + + *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* - - **config.active.configMap.kubeletConfigKey** (string), required + - **config.active.configMap.kubeletConfigKey** (string), required - KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. + KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. - - **config.active.configMap.name** (string), required + - **config.active.configMap.name** (string), required - Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. + Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. - - **config.active.configMap.namespace** (string), required + - **config.active.configMap.namespace** (string), required - Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. + Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. - - **config.active.configMap.resourceVersion** (string) + - **config.active.configMap.resourceVersion** (string) - ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - - **config.active.configMap.uid** (string) + - **config.active.configMap.uid** (string) - UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - **config.assigned** (NodeConfigSource) @@ -250,32 +262,32 @@ NodeStatus is information about the current status of a node. *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.* - - **config.assigned.configMap** (ConfigMapNodeConfigSource) + - **config.assigned.configMap** (ConfigMapNodeConfigSource) - ConfigMap is a reference to a Node's ConfigMap + ConfigMap is a reference to a Node's ConfigMap - - *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* + + *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* - - **config.assigned.configMap.kubeletConfigKey** (string), required + - **config.assigned.configMap.kubeletConfigKey** (string), required - KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. + KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. - - **config.assigned.configMap.name** (string), required + - **config.assigned.configMap.name** (string), required - Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. + Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. - - **config.assigned.configMap.namespace** (string), required + - **config.assigned.configMap.namespace** (string), required - Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. + Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. - - **config.assigned.configMap.resourceVersion** (string) + - **config.assigned.configMap.resourceVersion** (string) - ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - - **config.assigned.configMap.uid** (string) + - **config.assigned.configMap.uid** (string) - UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - **config.error** (string) @@ -288,32 +300,32 @@ NodeStatus is information about the current status of a node. *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.* - - **config.lastKnownGood.configMap** (ConfigMapNodeConfigSource) + - **config.lastKnownGood.configMap** (ConfigMapNodeConfigSource) - ConfigMap is a reference to a Node's ConfigMap + ConfigMap is a reference to a Node's ConfigMap - - *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* + + *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* - - **config.lastKnownGood.configMap.kubeletConfigKey** (string), required + - **config.lastKnownGood.configMap.kubeletConfigKey** (string), required - KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. + KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. - - **config.lastKnownGood.configMap.name** (string), required + - **config.lastKnownGood.configMap.name** (string), required - Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. + Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. - - **config.lastKnownGood.configMap.namespace** (string), required + - **config.lastKnownGood.configMap.namespace** (string), required - Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. + Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. - - **config.lastKnownGood.configMap.resourceVersion** (string) + - **config.lastKnownGood.configMap.resourceVersion** (string) - ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - - **config.lastKnownGood.configMap.uid** (string) + - **config.lastKnownGood.configMap.uid** (string) - UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. + UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. - **daemonEndpoints** (NodeDaemonEndpoints) @@ -329,9 +341,9 @@ NodeStatus is information about the current status of a node. *DaemonEndpoint contains information about a single Daemon endpoint.* - - **daemonEndpoints.kubeletEndpoint.Port** (int32), required + - **daemonEndpoints.kubeletEndpoint.Port** (int32), required - Port number of the given endpoint. + Port number of the given endpoint. - **images** ([]ContainerImage) diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md index f5776972a0575..e7bd624556f61 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PriorityLevelConfiguration represents the configuration of a priority level." title: "PriorityLevelConfiguration v1beta1" weight: 8 +auto_generated: true --- + + `apiVersion: flowcontrol.apiserver.k8s.io/v1beta1` `import "k8s.io/api/flowcontrol/v1beta1"` @@ -76,28 +88,28 @@ PriorityLevelConfigurationSpec specifies the configuration of a priority level. *LimitResponse defines how to handle requests that can not be executed right now.* - - **limited.limitResponse.type** (string), required + - **limited.limitResponse.type** (string), required - `type` is "Queue" or "Reject". "Queue" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. "Reject" means that requests that can not be executed upon arrival are rejected. Required. + `type` is "Queue" or "Reject". "Queue" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. "Reject" means that requests that can not be executed upon arrival are rejected. Required. - - **limited.limitResponse.queuing** (QueuingConfiguration) + - **limited.limitResponse.queuing** (QueuingConfiguration) - `queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `"Queue"`. + `queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `"Queue"`. - - *QueuingConfiguration holds the configuration parameters for queuing* + + *QueuingConfiguration holds the configuration parameters for queuing* - - **limited.limitResponse.queuing.handSize** (int32) + - **limited.limitResponse.queuing.handSize** (int32) - `handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8. + `handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8. - - **limited.limitResponse.queuing.queueLengthLimit** (int32) + - **limited.limitResponse.queuing.queueLengthLimit** (int32) - `queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50. + `queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50. - - **limited.limitResponse.queuing.queues** (int32) + - **limited.limitResponse.queuing.queues** (int32) - `queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64. + `queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64. diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md index 040c894ae3a4e..b505277ccc207 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "RuntimeClass defines a class of container runtime supported in the cluster." title: "RuntimeClass" weight: 6 +auto_generated: true --- + + `apiVersion: node.k8s.io/v1` `import "k8s.io/api/node/v1"` @@ -67,25 +79,25 @@ RuntimeClass defines a class of container runtime supported in the cluster. The *The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .* - - **scheduling.tolerations.key** (string) + - **scheduling.tolerations.key** (string) - Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - - **scheduling.tolerations.operator** (string) + - **scheduling.tolerations.operator** (string) - Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - - **scheduling.tolerations.value** (string) + - **scheduling.tolerations.value** (string) - Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - - **scheduling.tolerations.effect** (string) + - **scheduling.tolerations.effect** (string) - Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - - **scheduling.tolerations.tolerationSeconds** (int64) + - **scheduling.tolerations.tolerationSeconds** (int64) - TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/_index.md b/content/en/docs/reference/kubernetes-api/common-definitions/_index.md index 580fa96ff2e5e..00b036aa6e87d 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/_index.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/_index.md @@ -1,4 +1,17 @@ --- title: "Common Definitions" weight: 9 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md b/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md index 09fd2ce3d130a..4131bcb1ddda9 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "DeleteOptions may be provided when deleting an API object." title: "DeleteOptions" weight: 1 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/downward-api-volume-file.md b/content/en/docs/reference/kubernetes-api/common-definitions/downward-api-volume-file.md deleted file mode 100644 index 5aa9f29822286..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/downward-api-volume-file.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "DownwardAPIVolumeFile" -content_type: "api_reference" -description: "DownwardAPIVolumeFile represents information to create the file containing the pod field." -title: "DownwardAPIVolumeFile" -weight: 2 ---- - - - -`import "k8s.io/api/core/v1"` - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - -
- -- **path** (string), required - - Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - -- **fieldRef** (}}">ObjectFieldSelector) - - Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - -- **mode** (int32) - - Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - -- **resourceFieldRef** (}}">ResourceFieldSelector) - - Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/exec-action.md b/content/en/docs/reference/kubernetes-api/common-definitions/exec-action.md deleted file mode 100644 index 0078859720175..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/exec-action.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "ExecAction" -content_type: "api_reference" -description: "ExecAction describes a \"run in container\" action." -title: "ExecAction" -weight: 3 ---- - - - -`import "k8s.io/api/core/v1"` - - -ExecAction describes a "run in container" action. - -
- -- **command** ([]string) - - Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/http-get-action.md b/content/en/docs/reference/kubernetes-api/common-definitions/http-get-action.md deleted file mode 100644 index 1eabe9ac2d579..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/http-get-action.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "HTTPGetAction" -content_type: "api_reference" -description: "HTTPGetAction describes an action based on HTTP Get requests." -title: "HTTPGetAction" -weight: 4 ---- - - - -`import "k8s.io/api/core/v1"` - - -HTTPGetAction describes an action based on HTTP Get requests. - -
- -- **port** (IntOrString), required - - Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - -- **host** (string) - - Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. - -- **httpHeaders** ([]HTTPHeader) - - Custom headers to set in the request. HTTP allows repeated headers. - - - *HTTPHeader describes a custom header to be used in HTTP probes* - - - **httpHeaders.name** (string), required - - The header field name - - - **httpHeaders.value** (string), required - - The header field value - -- **path** (string) - - Path to access on the HTTP server. - -- **scheme** (string) - - Scheme to use for connecting to the host. Defaults to HTTP. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/json-schema-props.md b/content/en/docs/reference/kubernetes-api/common-definitions/json-schema-props.md deleted file mode 100644 index 622dba69a2712..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/json-schema-props.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - kind: "JSONSchemaProps" -content_type: "api_reference" -description: "JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema." -title: "JSONSchemaProps" -weight: 5 ---- - - - -`import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"` - - -JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). - -
- -- **$ref** (string) - - -- **$schema** (string) - - -- **additionalItems** (JSONSchemaPropsOrBool) - - - - *JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property.* - -- **additionalProperties** (JSONSchemaPropsOrBool) - - - - *JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property.* - -- **allOf** ([]}}">JSONSchemaProps) - - -- **anyOf** ([]}}">JSONSchemaProps) - - -- **default** (JSON) - - default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. - - - *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* - -- **definitions** (map[string]}}">JSONSchemaProps) - - -- **dependencies** (map[string]JSONSchemaPropsOrStringArray) - - - - *JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array.* - -- **description** (string) - - -- **enum** ([]JSON) - - - - *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* - -- **example** (JSON) - - - - *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* - -- **exclusiveMaximum** (boolean) - - -- **exclusiveMinimum** (boolean) - - -- **externalDocs** (ExternalDocumentation) - - - - *ExternalDocumentation allows referencing an external resource for extended documentation.* - - - **externalDocs.description** (string) - - - - **externalDocs.url** (string) - - -- **format** (string) - - format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - - - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\d{3}[- ]?\d{2}[- ]?\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. - -- **id** (string) - - -- **items** (JSONSchemaPropsOrArray) - - - - *JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes.* - -- **maxItems** (int64) - - -- **maxLength** (int64) - - -- **maxProperties** (int64) - - -- **maximum** (double) - - -- **minItems** (int64) - - -- **minLength** (int64) - - -- **minProperties** (int64) - - -- **minimum** (double) - - -- **multipleOf** (double) - - -- **not** (}}">JSONSchemaProps) - - -- **nullable** (boolean) - - -- **oneOf** ([]}}">JSONSchemaProps) - - -- **pattern** (string) - - -- **patternProperties** (map[string]}}">JSONSchemaProps) - - -- **properties** (map[string]}}">JSONSchemaProps) - - -- **required** ([]string) - - -- **title** (string) - - -- **type** (string) - - -- **uniqueItems** (boolean) - - -- **x-kubernetes-embedded-resource** (boolean) - - x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata). - -- **x-kubernetes-int-or-string** (boolean) - - x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: - - 1) anyOf: - - type: integer - - type: string - 2) allOf: - - anyOf: - - type: integer - - type: string - - ... zero or more - -- **x-kubernetes-list-map-keys** ([]string) - - x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. - - This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). - - The properties specified must either be required or have a default value, to ensure those properties are present for all list items. - -- **x-kubernetes-list-type** (string) - - x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: - - 1) `atomic`: the list is treated as a single entity, like a scalar. - Atomic lists will be entirely replaced when updated. This extension - may be used on any type of list (struct, scalar, ...). - 2) `set`: - Sets are lists that must not have multiple items with the same value. Each - value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - array with x-kubernetes-list-type `atomic`. - 3) `map`: - These lists are like maps in that their elements have a non-index key - used to identify them. Order is preserved upon merge. The map tag - must only be used on a list with elements of type object. - Defaults to atomic for arrays. - -- **x-kubernetes-map-type** (string) - - x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: - - 1) `granular`: - These maps are actual maps (key-value pairs) and each fields are independent - from each other (they can each be manipulated by separate actors). This is - the default behaviour for all maps. - 2) `atomic`: the list is treated as a single entity, like a scalar. - Atomic maps will be entirely replaced when updated. - -- **x-kubernetes-preserve-unknown-fields** (boolean) - - x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/key-to-path.md b/content/en/docs/reference/kubernetes-api/common-definitions/key-to-path.md deleted file mode 100644 index 64068fe1a2710..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/key-to-path.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "KeyToPath" -content_type: "api_reference" -description: "Maps a string key to a path within a volume." -title: "KeyToPath" -weight: 6 ---- - - - -`import "k8s.io/api/core/v1"` - - -Maps a string key to a path within a volume. - -
- -- **key** (string), required - - The key to project. - -- **path** (string), required - - The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. - -- **mode** (int32) - - Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md b/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md index a588677077a64..d81c26d17a13e 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "A label selector is a label query over a set of resources." title: "LabelSelector" -weight: 7 +weight: 2 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md b/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md index 0b703734b0ba6..96f2dafd3017d 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ListMeta describes metadata that synthetic resources must have, including lists and various status objects." title: "ListMeta" -weight: 8 +weight: 3 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md b/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md index e97b84aca9e39..ce56803bacbd9 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace." title: "LocalObjectReference" -weight: 9 +weight: 4 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/node-affinity.md b/content/en/docs/reference/kubernetes-api/common-definitions/node-affinity.md deleted file mode 100644 index 57b0b627159af..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/node-affinity.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "NodeAffinity" -content_type: "api_reference" -description: "Node affinity is a group of node affinity scheduling rules." -title: "NodeAffinity" -weight: 10 ---- - - - -`import "k8s.io/api/core/v1"` - - -Node affinity is a group of node affinity scheduling rules. - -
- -- **preferredDuringSchedulingIgnoredDuringExecution** ([]PreferredSchedulingTerm) - - The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - - - *An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).* - - - **preferredDuringSchedulingIgnoredDuringExecution.preference** (NodeSelectorTerm), required - - A node selector term, associated with the corresponding weight. - - - *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* - - - **preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's labels. - - - **preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's fields. - - - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required - - Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - -- **requiredDuringSchedulingIgnoredDuringExecution** (NodeSelector) - - If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - *A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.* - - - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms** ([]NodeSelectorTerm), required - - Required. A list of node selector terms. The terms are ORed. - - - *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* - - - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's labels. - - - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's fields. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md b/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md index 3e8b935ef7f3c..33af2e88e30bb 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." title: "NodeSelectorRequirement" -weight: 11 +weight: 5 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md b/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md index c109fa3fdf89e..9d1fc9e9c8ce4 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ObjectFieldSelector selects an APIVersioned field of an object." title: "ObjectFieldSelector" -weight: 12 +weight: 6 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md b/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md index b3bc31e716113..81d66b38c7d2f 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." title: "ObjectMeta" -weight: 13 +weight: 7 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md b/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md index 71c40a0702d1b..fa999b11f4f4a 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ObjectReference contains enough information to let you inspect or modify the referred object." title: "ObjectReference" -weight: 14 +weight: 8 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/patch.md b/content/en/docs/reference/kubernetes-api/common-definitions/patch.md index 054508e8078e7..a32a88b309165 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/patch.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/patch.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body." title: "Patch" -weight: 15 +weight: 9 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/pod-affinity.md b/content/en/docs/reference/kubernetes-api/common-definitions/pod-affinity.md deleted file mode 100644 index 06291de393133..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/pod-affinity.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "PodAffinity" -content_type: "api_reference" -description: "Pod affinity is a group of inter pod affinity scheduling rules." -title: "PodAffinity" -weight: 16 ---- - - - -`import "k8s.io/api/core/v1"` - - -Pod affinity is a group of inter pod affinity scheduling rules. - -
- -- **preferredDuringSchedulingIgnoredDuringExecution** ([]WeightedPodAffinityTerm) - - The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - - - *The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm** (PodAffinityTerm), required - - Required. A pod affinity term, associated with the corresponding weight. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required - - weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - -- **requiredDuringSchedulingIgnoredDuringExecution** ([]PodAffinityTerm) - - If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **requiredDuringSchedulingIgnoredDuringExecution.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **requiredDuringSchedulingIgnoredDuringExecution.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/pod-anti-affinity.md b/content/en/docs/reference/kubernetes-api/common-definitions/pod-anti-affinity.md deleted file mode 100644 index baebcc3c0c17d..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/pod-anti-affinity.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "PodAntiAffinity" -content_type: "api_reference" -description: "Pod anti affinity is a group of inter pod anti affinity scheduling rules." -title: "PodAntiAffinity" -weight: 17 ---- - - - -`import "k8s.io/api/core/v1"` - - -Pod anti affinity is a group of inter pod anti affinity scheduling rules. - -
- -- **preferredDuringSchedulingIgnoredDuringExecution** ([]WeightedPodAffinityTerm) - - The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - - - *The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm** (PodAffinityTerm), required - - Required. A pod affinity term, associated with the corresponding weight. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required - - weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - -- **requiredDuringSchedulingIgnoredDuringExecution** ([]PodAffinityTerm) - - If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **requiredDuringSchedulingIgnoredDuringExecution.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **requiredDuringSchedulingIgnoredDuringExecution.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md b/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md index c049cd40e4fe4..45f909c5e783c 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "Quantity is a fixed-point representation of a number." title: "Quantity" -weight: 18 +weight: 10 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/api/resource"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md b/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md index 85e7a9a8146e3..76576bdb5d3a3 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ResourceFieldSelector represents container resources (cpu, memory) and their output format." title: "ResourceFieldSelector" -weight: 19 +weight: 11 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/status.md b/content/en/docs/reference/kubernetes-api/common-definitions/status.md index 634e5068fb7d0..d40a22d6daeb9 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/status.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/status.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "Status is a return value for calls that don't return other objects." title: "Status" -weight: 20 +weight: 12 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` @@ -40,21 +52,21 @@ Status is a return value for calls that don't return other objects. *StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.* - - **details.causes.field** (string) + - **details.causes.field** (string) - The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. - - Examples: - "name" - the field "name" on the current resource - "items[0].name" - the field "name" on the first array entry in "items" + The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. + + Examples: + "name" - the field "name" on the current resource + "items[0].name" - the field "name" on the first array entry in "items" - - **details.causes.message** (string) + - **details.causes.message** (string) - A human-readable description of the cause of the error. This field may be presented as-is to a reader. + A human-readable description of the cause of the error. This field may be presented as-is to a reader. - - **details.causes.reason** (string) + - **details.causes.reason** (string) - A machine-readable description of the cause of the error. If this value is empty there is no information available. + A machine-readable description of the cause of the error. If this value is empty there is no information available. - **details.group** (string) diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/tcp-socket-action.md b/content/en/docs/reference/kubernetes-api/common-definitions/tcp-socket-action.md deleted file mode 100644 index 991518a57a741..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/tcp-socket-action.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "TCPSocketAction" -content_type: "api_reference" -description: "TCPSocketAction describes an action based on opening a socket." -title: "TCPSocketAction" -weight: 21 ---- - - - -`import "k8s.io/api/core/v1"` - - -TCPSocketAction describes an action based on opening a socket - -
- -- **port** (IntOrString), required - - Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - -- **host** (string) - - Optional: Host name to connect to, defaults to the pod IP. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md b/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md index c03c3c8409b74..a6d75abf25d2d 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace." title: "TypedLocalObjectReference" -weight: 22 +weight: 13 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md index 12792cbd08a60..deb8164881822 100644 --- a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md +++ b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "" title: "Common Parameters" weight: 10 +auto_generated: true --- + + diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md index 4f29e8ec4fe5a..aac30882a90a2 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Config and Storage Resources" weight: 3 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md index b543e6c9acd57..be3ef5c8e4193 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ConfigMap holds configuration data for pods to consume." title: "ConfigMap" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -40,7 +52,7 @@ ConfigMap holds configuration data for pods to consume. - **immutable** (boolean) - Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is a beta field enabled by ImmutableEphemeralVolumes feature gate. + Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md index b1c634b6b2115..22fcf194ee0d1 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster." title: "CSIDriver" weight: 8 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` @@ -47,17 +59,23 @@ CSIDriverSpec is the specification of a CSIDriver. - **attachRequired** (boolean) attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. + + This field is immutable. - **fsGroupPolicy** (string) Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate. + + This field is immutable. - **podInfoOnMount** (boolean) - If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. "csi.storage.k8s.io/pod.name": pod.Name "csi.storage.k8s.io/pod.namespace": pod.Namespace "csi.storage.k8s.io/pod.uid": string(pod.UID) "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. "csi.storage.k8s.io/pod.name": pod.Name "csi.storage.k8s.io/pod.namespace": pod.Namespace "csi.storage.k8s.io/pod.uid": string(pod.UID) "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume defined by a CSIVolumeSource, otherwise "false" "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. + + This field is immutable. - **requiresRepublish** (boolean) @@ -65,7 +83,7 @@ CSIDriverSpec is the specification of a CSIDriver. Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. - This is an alpha feature and only available when the CSIServiceAccountToken feature is enabled. + This is a beta feature and only available when the CSIServiceAccountToken feature is enabled. - **storageCapacity** (boolean) @@ -75,7 +93,9 @@ CSIDriverSpec is the specification of a CSIDriver. Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. - This is an alpha field and only available when the CSIStorageCapacity feature is enabled. The default is false. + This field is immutable. + + This is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false. - **tokenRequests** ([]TokenRequest) @@ -91,7 +111,7 @@ CSIDriverSpec is the specification of a CSIDriver. Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. - This is an alpha feature and only available when the CSIServiceAccountToken feature is enabled. + This is a beta feature and only available when the CSIServiceAccountToken feature is enabled. *TokenRequest contains parameters of a service account token.* @@ -109,6 +129,8 @@ CSIDriverSpec is the specification of a CSIDriver. *Set: unique values will be kept during a merge* volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is "Persistent", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta. + + This field is immutable. diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md index aaee041979232..343ab011350c7 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CSINode holds information about all CSI drivers installed on a node." title: "CSINode" weight: 9 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` @@ -68,9 +80,9 @@ CSINodeSpec holds information about the specification of all CSI drivers install *VolumeNodeResources is a set of resource limits for scheduling of volumes.* - - **drivers.allocatable.count** (int32) + - **drivers.allocatable.count** (int32) - Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded. + Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded. - **drivers.topologyKeys** ([]string) diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md new file mode 100644 index 0000000000000..cc915b8b15fc6 --- /dev/null +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md @@ -0,0 +1,578 @@ +--- +api_metadata: + apiVersion: "storage.k8s.io/v1beta1" + import: "k8s.io/api/storage/v1beta1" + kind: "CSIStorageCapacity" +content_type: "api_reference" +description: "CSIStorageCapacity stores the result of one CSI GetCapacity call." +title: "CSIStorageCapacity v1beta1" +weight: 10 +auto_generated: true +--- + + + +`apiVersion: storage.k8s.io/v1beta1` + +`import "k8s.io/api/storage/v1beta1"` + + +## CSIStorageCapacity {#CSIStorageCapacity} + +CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. + +For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" + +The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero + +The producer of these objects can decide which approach is more suitable. + +They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity. + +
+ +- **apiVersion**: storage.k8s.io/v1beta1 + + +- **kind**: CSIStorageCapacity + + +- **metadata** (}}">ObjectMeta) + + Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-\, a generated name, or a reverse-domain name which ends with the unique CSI driver name. + + Objects are namespaced. + + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + +- **storageClassName** (string), required + + The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. + +- **capacity** (}}">Quantity) + + Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. + + The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. + +- **maximumVolumeSize** (}}">Quantity) + + MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. + + This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. + +- **nodeTopology** (}}">LabelSelector) + + NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. + + + + + +## CSIStorageCapacityList {#CSIStorageCapacityList} + +CSIStorageCapacityList is a collection of CSIStorageCapacity objects. + +
+ +- **apiVersion**: storage.k8s.io/v1beta1 + + +- **kind**: CSIStorageCapacityList + + +- **metadata** (}}">ListMeta) + + Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + +- **items** ([]}}">CSIStorageCapacity), required + + *Map: unique values on key name will be kept during a merge* + + Items is the list of CSIStorageCapacity objects. + + + + + +## Operations {#Operations} + + + +
+ + + + + + +### `get` read the specified CSIStorageCapacity + +#### HTTP Request + +GET /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the CSIStorageCapacity + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">CSIStorageCapacity): OK + +401: Unauthorized + + +### `list` list or watch objects of kind CSIStorageCapacity + +#### HTTP Request + +GET /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities + +#### Parameters + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **allowWatchBookmarks** (*in query*): boolean + + }}">allowWatchBookmarks + + +- **continue** (*in query*): string + + }}">continue + + +- **fieldSelector** (*in query*): string + + }}">fieldSelector + + +- **labelSelector** (*in query*): string + + }}">labelSelector + + +- **limit** (*in query*): integer + + }}">limit + + +- **pretty** (*in query*): string + + }}">pretty + + +- **resourceVersion** (*in query*): string + + }}">resourceVersion + + +- **resourceVersionMatch** (*in query*): string + + }}">resourceVersionMatch + + +- **timeoutSeconds** (*in query*): integer + + }}">timeoutSeconds + + +- **watch** (*in query*): boolean + + }}">watch + + + +#### Response + + +200 (}}">CSIStorageCapacityList): OK + +401: Unauthorized + + +### `list` list or watch objects of kind CSIStorageCapacity + +#### HTTP Request + +GET /apis/storage.k8s.io/v1beta1/csistoragecapacities + +#### Parameters + + +- **allowWatchBookmarks** (*in query*): boolean + + }}">allowWatchBookmarks + + +- **continue** (*in query*): string + + }}">continue + + +- **fieldSelector** (*in query*): string + + }}">fieldSelector + + +- **labelSelector** (*in query*): string + + }}">labelSelector + + +- **limit** (*in query*): integer + + }}">limit + + +- **pretty** (*in query*): string + + }}">pretty + + +- **resourceVersion** (*in query*): string + + }}">resourceVersion + + +- **resourceVersionMatch** (*in query*): string + + }}">resourceVersionMatch + + +- **timeoutSeconds** (*in query*): integer + + }}">timeoutSeconds + + +- **watch** (*in query*): boolean + + }}">watch + + + +#### Response + + +200 (}}">CSIStorageCapacityList): OK + +401: Unauthorized + + +### `create` create a CSIStorageCapacity + +#### HTTP Request + +POST /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities + +#### Parameters + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">CSIStorageCapacity, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">CSIStorageCapacity): OK + +201 (}}">CSIStorageCapacity): Created + +202 (}}">CSIStorageCapacity): Accepted + +401: Unauthorized + + +### `update` replace the specified CSIStorageCapacity + +#### HTTP Request + +PUT /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the CSIStorageCapacity + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">CSIStorageCapacity, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">CSIStorageCapacity): OK + +201 (}}">CSIStorageCapacity): Created + +401: Unauthorized + + +### `patch` partially update the specified CSIStorageCapacity + +#### HTTP Request + +PATCH /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the CSIStorageCapacity + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Patch, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **force** (*in query*): boolean + + }}">force + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">CSIStorageCapacity): OK + +401: Unauthorized + + +### `delete` delete a CSIStorageCapacity + +#### HTTP Request + +DELETE /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the CSIStorageCapacity + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">DeleteOptions + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **gracePeriodSeconds** (*in query*): integer + + }}">gracePeriodSeconds + + +- **pretty** (*in query*): string + + }}">pretty + + +- **propagationPolicy** (*in query*): string + + }}">propagationPolicy + + + +#### Response + + +200 (}}">Status): OK + +202 (}}">Status): Accepted + +401: Unauthorized + + +### `deletecollection` delete collection of CSIStorageCapacity + +#### HTTP Request + +DELETE /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities + +#### Parameters + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">DeleteOptions + + + + +- **continue** (*in query*): string + + }}">continue + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldSelector** (*in query*): string + + }}">fieldSelector + + +- **gracePeriodSeconds** (*in query*): integer + + }}">gracePeriodSeconds + + +- **labelSelector** (*in query*): string + + }}">labelSelector + + +- **limit** (*in query*): integer + + }}">limit + + +- **pretty** (*in query*): string + + }}">pretty + + +- **propagationPolicy** (*in query*): string + + }}">propagationPolicy + + +- **resourceVersion** (*in query*): string + + }}">resourceVersion + + +- **resourceVersionMatch** (*in query*): string + + }}">resourceVersionMatch + + +- **timeoutSeconds** (*in query*): integer + + }}">timeoutSeconds + + + +#### Response + + +200 (}}">Status): OK + +401: Unauthorized + diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md index 23b4f3214edb9..f73ded9ff56b5 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PersistentVolumeClaim is a user's request for and claim to a persistent volume." title: "PersistentVolumeClaim" weight: 4 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -65,11 +77,11 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and - **resources.limits** (map[string]}}">Quantity) - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - **resources.requests** (map[string]}}">Quantity) - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - **volumeName** (string) diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md index cbc4a613d5325..c5c68b19d7f27 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PersistentVolume (PV) is a storage resource provisioned by an administrator." title: "PersistentVolume" weight: 5 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -78,20 +90,20 @@ PersistentVolumeSpec is the specification of a persistent volume. *A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.* - - **nodeAffinity.required.nodeSelectorTerms** ([]NodeSelectorTerm), required + - **nodeAffinity.required.nodeSelectorTerms** ([]NodeSelectorTerm), required - Required. A list of node selector terms. The terms are ORed. + Required. A list of node selector terms. The terms are ORed. - - *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* + + *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* - - **nodeAffinity.required.nodeSelectorTerms.matchExpressions** ([]}}">NodeSelectorRequirement) + - **nodeAffinity.required.nodeSelectorTerms.matchExpressions** ([]}}">NodeSelectorRequirement) - A list of node selector requirements by node's labels. + A list of node selector requirements by node's labels. - - **nodeAffinity.required.nodeSelectorTerms.matchFields** ([]}}">NodeSelectorRequirement) + - **nodeAffinity.required.nodeSelectorTerms.matchFields** ([]}}">NodeSelectorRequirement) - A list of node selector requirements by node's fields. + A list of node selector requirements by node's fields. - **persistentVolumeReclaimPolicy** (string) @@ -252,13 +264,13 @@ PersistentVolumeSpec is the specification of a persistent volume. *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - **cephfs.secretRef.name** (string) + - **cephfs.secretRef.name** (string) - Name is unique within a namespace to reference a secret resource. + Name is unique within a namespace to reference a secret resource. - - **cephfs.secretRef.namespace** (string) + - **cephfs.secretRef.namespace** (string) - Namespace defines the space within which the secret name must be unique. + Namespace defines the space within which the secret name must be unique. - **cephfs.user** (string) @@ -290,13 +302,100 @@ PersistentVolumeSpec is the specification of a persistent volume. *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - **cinder.secretRef.name** (string) + - **cinder.secretRef.name** (string) + + Name is unique within a namespace to reference a secret resource. + + - **cinder.secretRef.namespace** (string) + + Namespace defines the space within which the secret name must be unique. + +- **csi** (CSIPersistentVolumeSource) + + CSI represents storage that is handled by an external CSI driver (Beta feature). + + + *Represents storage that is managed by an external CSI volume driver (Beta feature)* + + - **csi.driver** (string), required + + Driver is the name of the driver to use for this volume. Required. + + - **csi.volumeHandle** (string), required + + VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required. + + - **csi.controllerExpandSecretRef** (SecretReference) + + ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. + + + *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* + + - **csi.controllerExpandSecretRef.name** (string) + + Name is unique within a namespace to reference a secret resource. + + - **csi.controllerExpandSecretRef.namespace** (string) + + Namespace defines the space within which the secret name must be unique. + + - **csi.controllerPublishSecretRef** (SecretReference) + + ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. + + + *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - Name is unique within a namespace to reference a secret resource. + - **csi.controllerPublishSecretRef.name** (string) - - **cinder.secretRef.namespace** (string) + Name is unique within a namespace to reference a secret resource. - Namespace defines the space within which the secret name must be unique. + - **csi.controllerPublishSecretRef.namespace** (string) + + Namespace defines the space within which the secret name must be unique. + + - **csi.fsType** (string) + + Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". + + - **csi.nodePublishSecretRef** (SecretReference) + + NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. + + + *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* + + - **csi.nodePublishSecretRef.name** (string) + + Name is unique within a namespace to reference a secret resource. + + - **csi.nodePublishSecretRef.namespace** (string) + + Namespace defines the space within which the secret name must be unique. + + - **csi.nodeStageSecretRef** (SecretReference) + + NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. + + + *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* + + - **csi.nodeStageSecretRef.name** (string) + + Name is unique within a namespace to reference a secret resource. + + - **csi.nodeStageSecretRef.namespace** (string) + + Namespace defines the space within which the secret name must be unique. + + - **csi.readOnly** (boolean) + + Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write). + + - **csi.volumeAttributes** (map[string]string) + + Attributes of the volume to publish. - **fc** (FCVolumeSource) @@ -355,13 +454,13 @@ PersistentVolumeSpec is the specification of a persistent volume. *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - **flexVolume.secretRef.name** (string) + - **flexVolume.secretRef.name** (string) - Name is unique within a namespace to reference a secret resource. + Name is unique within a namespace to reference a secret resource. - - **flexVolume.secretRef.namespace** (string) + - **flexVolume.secretRef.namespace** (string) - Namespace defines the space within which the secret name must be unique. + Namespace defines the space within which the secret name must be unique. - **flocker** (FlockerVolumeSource) @@ -480,13 +579,13 @@ PersistentVolumeSpec is the specification of a persistent volume. *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - **iscsi.secretRef.name** (string) + - **iscsi.secretRef.name** (string) - Name is unique within a namespace to reference a secret resource. + Name is unique within a namespace to reference a secret resource. - - **iscsi.secretRef.namespace** (string) + - **iscsi.secretRef.namespace** (string) - Namespace defines the space within which the secret name must be unique. + Namespace defines the space within which the secret name must be unique. - **nfs** (NFSVolumeSource) @@ -610,13 +709,13 @@ PersistentVolumeSpec is the specification of a persistent volume. *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - **rbd.secretRef.name** (string) + - **rbd.secretRef.name** (string) - Name is unique within a namespace to reference a secret resource. + Name is unique within a namespace to reference a secret resource. - - **rbd.secretRef.namespace** (string) + - **rbd.secretRef.namespace** (string) - Namespace defines the space within which the secret name must be unique. + Namespace defines the space within which the secret name must be unique. - **rbd.user** (string) @@ -640,13 +739,13 @@ PersistentVolumeSpec is the specification of a persistent volume. *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - **scaleIO.secretRef.name** (string) + - **scaleIO.secretRef.name** (string) - Name is unique within a namespace to reference a secret resource. + Name is unique within a namespace to reference a secret resource. - - **scaleIO.secretRef.namespace** (string) + - **scaleIO.secretRef.namespace** (string) - Namespace defines the space within which the secret name must be unique. + Namespace defines the space within which the secret name must be unique. - **scaleIO.system** (string), required @@ -730,96 +829,6 @@ PersistentVolumeSpec is the specification of a persistent volume. Storage Policy Based Management (SPBM) profile name. -### Beta level - - -- **csi** (CSIPersistentVolumeSource) - - CSI represents storage that is handled by an external CSI driver (Beta feature). - - - *Represents storage that is managed by an external CSI volume driver (Beta feature)* - - - **csi.driver** (string), required - - Driver is the name of the driver to use for this volume. Required. - - - **csi.volumeHandle** (string), required - - VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required. - - - **csi.controllerExpandSecretRef** (SecretReference) - - ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. - - - *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - - **csi.controllerExpandSecretRef.name** (string) - - Name is unique within a namespace to reference a secret resource. - - - **csi.controllerExpandSecretRef.namespace** (string) - - Namespace defines the space within which the secret name must be unique. - - - **csi.controllerPublishSecretRef** (SecretReference) - - ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. - - - *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - - **csi.controllerPublishSecretRef.name** (string) - - Name is unique within a namespace to reference a secret resource. - - - **csi.controllerPublishSecretRef.namespace** (string) - - Namespace defines the space within which the secret name must be unique. - - - **csi.fsType** (string) - - Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". - - - **csi.nodePublishSecretRef** (SecretReference) - - NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. - - - *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - - **csi.nodePublishSecretRef.name** (string) - - Name is unique within a namespace to reference a secret resource. - - - **csi.nodePublishSecretRef.namespace** (string) - - Namespace defines the space within which the secret name must be unique. - - - **csi.nodeStageSecretRef** (SecretReference) - - NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. - - - *SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace* - - - **csi.nodeStageSecretRef.name** (string) - - Name is unique within a namespace to reference a secret resource. - - - **csi.nodeStageSecretRef.namespace** (string) - - Namespace defines the space within which the secret name must be unique. - - - **csi.readOnly** (boolean) - - Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write). - - - **csi.volumeAttributes** (map[string]string) - - Attributes of the volume to publish. - ## PersistentVolumeStatus {#PersistentVolumeStatus} diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md index 84d9a08c0f0c9..5e75c90b7904a 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Secret holds secret data of a certain type." title: "Secret" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -36,11 +48,11 @@ Secret holds secret data of a certain type. The total bytes of the values in the - **immutable** (boolean) - Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is a beta field enabled by ImmutableEphemeralVolumes feature gate. + Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. - **stringData** (map[string]string) - stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API. + stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API. - **type** (string) diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md index 752735f6f6771..c00f36797de3d 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned." title: "StorageClass" weight: 6 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` @@ -54,13 +66,13 @@ StorageClasses are non-namespaced; the name of the storage class according to et *A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.* - - **allowedTopologies.matchLabelExpressions.key** (string), required + - **allowedTopologies.matchLabelExpressions.key** (string), required - The label key that the selector applies to. + The label key that the selector applies to. - - **allowedTopologies.matchLabelExpressions.values** ([]string), required + - **allowedTopologies.matchLabelExpressions.values** ([]string), required - An array of string values. One value must match the label to be selected. Each entry in Values is ORed. + An array of string values. One value must match the label to be selected. Each entry in Values is ORed. - **mountOptions** ([]string) diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md index 95102c3988a1e..b9bc3ee83124b 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node." title: "VolumeAttachment" weight: 7 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` @@ -67,7 +79,7 @@ VolumeAttachmentSpec is the specification of a VolumeAttachment request. - **source.inlineVolumeSpec** (}}">PersistentVolumeSpec) - inlineVolumeSpec contains all the information necessary to attach a persistent volume defined by a pod's inline VolumeSource. This field is populated only for the CSIMigration feature. It contains translated fields from a pod's inline VolumeSource to a PersistentVolumeSpec. This field is alpha-level and is only honored by servers that enabled the CSIMigration feature. + inlineVolumeSpec contains all the information necessary to attach a persistent volume defined by a pod's inline VolumeSource. This field is populated only for the CSIMigration feature. It contains translated fields from a pod's inline VolumeSource to a PersistentVolumeSpec. This field is beta-level and is only honored by servers that enabled the CSIMigration feature. - **source.persistentVolumeName** (string) diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md index ff45652ace7aa..60badecc43024 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md @@ -7,13 +7,27 @@ content_type: "api_reference" description: "Volume represents a named volume in a pod that may be accessed by any container in the pod." title: "Volume" weight: 3 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` +## Volume {#Volume} + Volume represents a named volume in a pod that may be accessed by any container in the pod.
@@ -24,7 +38,7 @@ Volume represents a named volume in a pod that may be accessed by any container -### Exposed Persistent volumes {#Exposed-Persistent-volumes} +### Exposed Persistent volumes - **persistentVolumeClaim** (PersistentVolumeClaimVolumeSource) @@ -42,7 +56,7 @@ Volume represents a named volume in a pod that may be accessed by any container Will force the ReadOnly setting in VolumeMounts. Default false. -### Projections {#Projections} +### Projections - **configMap** (ConfigMapVolumeSource) @@ -66,7 +80,7 @@ Volume represents a named volume in a pod that may be accessed by any container Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - - **configMap.items** ([]}}">KeyToPath) + - **configMap.items** ([]}}">KeyToPath) If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. @@ -91,7 +105,7 @@ Volume represents a named volume in a pod that may be accessed by any container Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - - **secret.items** ([]}}">KeyToPath) + - **secret.items** ([]}}">KeyToPath) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. @@ -106,7 +120,7 @@ Volume represents a named volume in a pod that may be accessed by any container Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - - **downwardAPI.items** ([]}}">DownwardAPIVolumeFile) + - **downwardAPI.items** ([]}}">DownwardAPIVolumeFile) Items is a list of downward API volume file @@ -128,79 +142,79 @@ Volume represents a named volume in a pod that may be accessed by any container *Projection that may be projected along with other supported volume types* - - **projected.sources.configMap** (ConfigMapProjection) + - **projected.sources.configMap** (ConfigMapProjection) - information about the configMap data to project + information about the configMap data to project - - *Adapts a ConfigMap into a projected volume. - - The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.* + + *Adapts a ConfigMap into a projected volume. + + The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.* - - **projected.sources.configMap.name** (string) + - **projected.sources.configMap.name** (string) - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - **projected.sources.configMap.optional** (boolean) + - **projected.sources.configMap.optional** (boolean) - Specify whether the ConfigMap or its keys must be defined + Specify whether the ConfigMap or its keys must be defined - - **projected.sources.configMap.items** ([]}}">KeyToPath) + - **projected.sources.configMap.items** ([]}}">KeyToPath) - If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. - - **projected.sources.downwardAPI** (DownwardAPIProjection) + - **projected.sources.downwardAPI** (DownwardAPIProjection) - information about the downwardAPI data to project + information about the downwardAPI data to project - - *Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.* + + *Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.* - - **projected.sources.downwardAPI.items** ([]}}">DownwardAPIVolumeFile) + - **projected.sources.downwardAPI.items** ([]}}">DownwardAPIVolumeFile) - Items is a list of DownwardAPIVolume file + Items is a list of DownwardAPIVolume file - - **projected.sources.secret** (SecretProjection) + - **projected.sources.secret** (SecretProjection) - information about the secret data to project + information about the secret data to project - - *Adapts a secret into a projected volume. - - The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.* + + *Adapts a secret into a projected volume. + + The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.* - - **projected.sources.secret.name** (string) + - **projected.sources.secret.name** (string) - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - **projected.sources.secret.optional** (boolean) + - **projected.sources.secret.optional** (boolean) - Specify whether the Secret or its key must be defined + Specify whether the Secret or its key must be defined - - **projected.sources.secret.items** ([]}}">KeyToPath) + - **projected.sources.secret.items** ([]}}">KeyToPath) - If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. - - **projected.sources.serviceAccountToken** (ServiceAccountTokenProjection) + - **projected.sources.serviceAccountToken** (ServiceAccountTokenProjection) - information about the serviceAccountToken data to project + information about the serviceAccountToken data to project - - *ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).* + + *ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).* - - **projected.sources.serviceAccountToken.path** (string), required + - **projected.sources.serviceAccountToken.path** (string), required - Path is the path relative to the mount point of the file to project the token into. + Path is the path relative to the mount point of the file to project the token into. - - **projected.sources.serviceAccountToken.audience** (string) + - **projected.sources.serviceAccountToken.audience** (string) - Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. - - **projected.sources.serviceAccountToken.expirationSeconds** (int64) + - **projected.sources.serviceAccountToken.expirationSeconds** (int64) - ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. -### Local / Temporary Directory {#Local-Temporary-Directory} +### Local / Temporary Directory - **emptyDir** (EmptyDirVolumeSource) @@ -233,7 +247,7 @@ Volume represents a named volume in a pod that may be accessed by any container Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath -### Persistent volumes {#Persistent-volumes} +### Persistent volumes - **awsElasticBlockStore** (AWSElasticBlockStoreVolumeSource) @@ -365,6 +379,33 @@ Volume represents a named volume in a pod that may be accessed by any container Optional: points to a secret object containing parameters used to connect to OpenStack. +- **csi** (CSIVolumeSource) + + CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + + + *Represents a source location of a volume to mount, managed by an external CSI driver* + + - **csi.driver** (string), required + + Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + + - **csi.fsType** (string) + + Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + + - **csi.nodePublishSecretRef** (}}">LocalObjectReference) + + NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + + - **csi.readOnly** (boolean) + + Specifies a read-only configuration for the volume. Defaults to false (read/write). + + - **csi.volumeAttributes** (map[string]string) + + VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + - **fc** (FCVolumeSource) FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. @@ -749,42 +790,12 @@ Volume represents a named volume in a pod that may be accessed by any container Storage Policy Based Management (SPBM) profile name. -### Beta level {#Beta-level} - - -- **csi** (CSIVolumeSource) - - CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). - - - *Represents a source location of a volume to mount, managed by an external CSI driver* - - - **csi.driver** (string), required - - Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. - - - **csi.fsType** (string) - - Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. - - - **csi.nodePublishSecretRef** (}}">LocalObjectReference) - - NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. - - - **csi.readOnly** (boolean) - - Specifies a read-only configuration for the volume. Defaults to false (read/write). - - - **csi.volumeAttributes** (map[string]string) - - VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. - -### Alpha level {#Alpha-level} +### Alpha level - **ephemeral** (EphemeralVolumeSource) - Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature). The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. + Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, @@ -798,14 +809,12 @@ Volume represents a named volume in a pod that may be accessed by any container Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. A pod can use both types of ephemeral volumes and persistent volumes at the same time. + + This is a beta feature and only available when the GenericEphemeralVolume feature gate is enabled. *Represents an ephemeral volume that is handled by a normal storage driver.* - - **ephemeral.readOnly** (boolean) - - Specifies a read-only configuration for the volume. Defaults to false (read/write). - - **ephemeral.volumeClaimTemplate** (PersistentVolumeClaimTemplate) Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\-\` where `\` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). @@ -819,15 +828,15 @@ Volume represents a named volume in a pod that may be accessed by any container *PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.* - - **ephemeral.volumeClaimTemplate.spec** (}}">PersistentVolumeClaimSpec), required + - **ephemeral.volumeClaimTemplate.spec** (}}">PersistentVolumeClaimSpec), required - The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. - - **ephemeral.volumeClaimTemplate.metadata** (}}">ObjectMeta) + - **ephemeral.volumeClaimTemplate.metadata** (}}">ObjectMeta) - May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. -### Deprecated {#Deprecated} +### Deprecated - **gitRepo** (GitRepoVolumeSource) @@ -853,3 +862,51 @@ Volume represents a named volume in a pod that may be accessed by any container +## DownwardAPIVolumeFile {#DownwardAPIVolumeFile} + +DownwardAPIVolumeFile represents information to create the file containing the pod field + +
+ +- **path** (string), required + + Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + +- **fieldRef** (}}">ObjectFieldSelector) + + Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + +- **mode** (int32) + + Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + +- **resourceFieldRef** (}}">ResourceFieldSelector) + + Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + + + + + +## KeyToPath {#KeyToPath} + +Maps a string key to a path within a volume. + +
+ +- **key** (string), required + + The key to project. + +- **path** (string), required + + The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + +- **mode** (int32) + + Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + + + + + diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/_index.md b/content/en/docs/reference/kubernetes-api/extend-resources/_index.md index ce6d3d3e39cb8..1886f6e26c816 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Extend Resources" weight: 7 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md index 3fa3bc9a740f9..ce88ef945de49 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CustomResourceDefinition represents a resource that should be exposed on the API server." title: "CustomResourceDefinition" weight: 1 +auto_generated: true --- + + `apiVersion: apiextensions.k8s.io/v1` `import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"` @@ -112,29 +124,29 @@ CustomResourceDefinitionSpec describes how a user wants their resource to appear *CustomResourceColumnDefinition specifies a column for server side printing.* - - **versions.additionalPrinterColumns.jsonPath** (string), required + - **versions.additionalPrinterColumns.jsonPath** (string), required - jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column. + jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column. - - **versions.additionalPrinterColumns.name** (string), required + - **versions.additionalPrinterColumns.name** (string), required - name is a human readable name for the column. + name is a human readable name for the column. - - **versions.additionalPrinterColumns.type** (string), required + - **versions.additionalPrinterColumns.type** (string), required - type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - - **versions.additionalPrinterColumns.description** (string) + - **versions.additionalPrinterColumns.description** (string) - description is a human readable description of this column. + description is a human readable description of this column. - - **versions.additionalPrinterColumns.format** (string) + - **versions.additionalPrinterColumns.format** (string) - format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - - **versions.additionalPrinterColumns.priority** (int32) + - **versions.additionalPrinterColumns.priority** (int32) - priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0. + priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0. - **versions.deprecated** (boolean) @@ -151,9 +163,9 @@ CustomResourceDefinitionSpec describes how a user wants their resource to appear *CustomResourceValidation is a list of validation methods for CustomResources.* - - **versions.schema.openAPIV3Schema** (}}">JSONSchemaProps) + - **versions.schema.openAPIV3Schema** (}}">JSONSchemaProps) - openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. - **versions.subresources** (CustomResourceSubresources) @@ -162,31 +174,31 @@ CustomResourceDefinitionSpec describes how a user wants their resource to appear *CustomResourceSubresources defines the status and scale subresources for CustomResources.* - - **versions.subresources.scale** (CustomResourceSubresourceScale) + - **versions.subresources.scale** (CustomResourceSubresourceScale) - scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. - - *CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.* + + *CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.* - - **versions.subresources.scale.specReplicasPath** (string), required + - **versions.subresources.scale.specReplicasPath** (string), required - specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. - - **versions.subresources.scale.statusReplicasPath** (string), required + - **versions.subresources.scale.statusReplicasPath** (string), required - statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0. + statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0. - - **versions.subresources.scale.labelSelectorPath** (string) + - **versions.subresources.scale.labelSelectorPath** (string) - labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string. + labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string. - - **versions.subresources.status** (CustomResourceSubresourceStatus) + - **versions.subresources.status** (CustomResourceSubresourceStatus) - status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. - - *CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza* + + *CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza* - **conversion** (CustomResourceConversion) @@ -207,59 +219,59 @@ CustomResourceDefinitionSpec describes how a user wants their resource to appear *WebhookConversion describes how to call a conversion webhook* - - **conversion.webhook.conversionReviewVersions** ([]string), required + - **conversion.webhook.conversionReviewVersions** ([]string), required - conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. + conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. - - **conversion.webhook.clientConfig** (WebhookClientConfig) + - **conversion.webhook.clientConfig** (WebhookClientConfig) - clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. - - *WebhookClientConfig contains the information to make a TLS connection with the webhook.* + + *WebhookClientConfig contains the information to make a TLS connection with the webhook.* - - **conversion.webhook.clientConfig.caBundle** ([]byte) + - **conversion.webhook.clientConfig.caBundle** ([]byte) - caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. + caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. - - **conversion.webhook.clientConfig.service** (ServiceReference) + - **conversion.webhook.clientConfig.service** (ServiceReference) - service is a reference to the service for this webhook. Either service or url must be specified. - - If the webhook is running within the cluster, then you should use `service`. + service is a reference to the service for this webhook. Either service or url must be specified. + + If the webhook is running within the cluster, then you should use `service`. - - *ServiceReference holds a reference to Service.legacy.k8s.io* + + *ServiceReference holds a reference to Service.legacy.k8s.io* - - **conversion.webhook.clientConfig.service.name** (string), required + - **conversion.webhook.clientConfig.service.name** (string), required - name is the name of the service. Required + name is the name of the service. Required - - **conversion.webhook.clientConfig.service.namespace** (string), required + - **conversion.webhook.clientConfig.service.namespace** (string), required - namespace is the namespace of the service. Required + namespace is the namespace of the service. Required - - **conversion.webhook.clientConfig.service.path** (string) + - **conversion.webhook.clientConfig.service.path** (string) - path is an optional URL path at which the webhook will be contacted. + path is an optional URL path at which the webhook will be contacted. - - **conversion.webhook.clientConfig.service.port** (int32) + - **conversion.webhook.clientConfig.service.port** (int32) - port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. + port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. - - **conversion.webhook.clientConfig.url** (string) + - **conversion.webhook.clientConfig.url** (string) - url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. - - The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. - - Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. - - The scheme must be "https"; the URL must begin with "https://". - - A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. - - Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. + url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. + + The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. + + Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. + + The scheme must be "https"; the URL must begin with "https://". + + A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. + + Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. - **preserveUnknownFields** (boolean) @@ -269,6 +281,218 @@ CustomResourceDefinitionSpec describes how a user wants their resource to appear +## JSONSchemaProps {#JSONSchemaProps} + +JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). + +
+ +- **$ref** (string) + + +- **$schema** (string) + + +- **additionalItems** (JSONSchemaPropsOrBool) + + + + *JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property.* + +- **additionalProperties** (JSONSchemaPropsOrBool) + + + + *JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property.* + +- **allOf** ([]}}">JSONSchemaProps) + + +- **anyOf** ([]}}">JSONSchemaProps) + + +- **default** (JSON) + + default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. + + + *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* + +- **definitions** (map[string]}}">JSONSchemaProps) + + +- **dependencies** (map[string]JSONSchemaPropsOrStringArray) + + + + *JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array.* + +- **description** (string) + + +- **enum** ([]JSON) + + + + *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* + +- **example** (JSON) + + + + *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* + +- **exclusiveMaximum** (boolean) + + +- **exclusiveMinimum** (boolean) + + +- **externalDocs** (ExternalDocumentation) + + + + *ExternalDocumentation allows referencing an external resource for extended documentation.* + + - **externalDocs.description** (string) + + + - **externalDocs.url** (string) + + +- **format** (string) + + format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + + - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\d{3}[- ]?\d{2}[- ]?\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. + +- **id** (string) + + +- **items** (JSONSchemaPropsOrArray) + + + + *JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes.* + +- **maxItems** (int64) + + +- **maxLength** (int64) + + +- **maxProperties** (int64) + + +- **maximum** (double) + + +- **minItems** (int64) + + +- **minLength** (int64) + + +- **minProperties** (int64) + + +- **minimum** (double) + + +- **multipleOf** (double) + + +- **not** (}}">JSONSchemaProps) + + +- **nullable** (boolean) + + +- **oneOf** ([]}}">JSONSchemaProps) + + +- **pattern** (string) + + +- **patternProperties** (map[string]}}">JSONSchemaProps) + + +- **properties** (map[string]}}">JSONSchemaProps) + + +- **required** ([]string) + + +- **title** (string) + + +- **type** (string) + + +- **uniqueItems** (boolean) + + +- **x-kubernetes-embedded-resource** (boolean) + + x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata). + +- **x-kubernetes-int-or-string** (boolean) + + x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: + + 1) anyOf: + - type: integer + - type: string + 2) allOf: + - anyOf: + - type: integer + - type: string + - ... zero or more + +- **x-kubernetes-list-map-keys** ([]string) + + x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. + + This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). + + The properties specified must either be required or have a default value, to ensure those properties are present for all list items. + +- **x-kubernetes-list-type** (string) + + x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: + + 1) `atomic`: the list is treated as a single entity, like a scalar. + Atomic lists will be entirely replaced when updated. This extension + may be used on any type of list (struct, scalar, ...). + 2) `set`: + Sets are lists that must not have multiple items with the same value. Each + value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + array with x-kubernetes-list-type `atomic`. + 3) `map`: + These lists are like maps in that their elements have a non-index key + used to identify them. Order is preserved upon merge. The map tag + must only be used on a list with elements of type object. + Defaults to atomic for arrays. + +- **x-kubernetes-map-type** (string) + + x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: + + 1) `granular`: + These maps are actual maps (key-value pairs) and each fields are independent + from each other (they can each be manipulated by separate actors). This is + the default behaviour for all maps. + 2) `atomic`: the list is treated as a single entity, like a scalar. + Atomic maps will be entirely replaced when updated. + +- **x-kubernetes-preserve-unknown-fields** (boolean) + + x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden. + + + + + ## CustomResourceDefinitionStatus {#CustomResourceDefinitionStatus} CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition @@ -308,6 +532,8 @@ CustomResourceDefinitionStatus indicates the state of the CustomResourceDefiniti - **conditions** ([]CustomResourceDefinitionCondition) + *Map: unique values on key type will be kept during a merge* + conditions indicate state for particular aspects of a CustomResourceDefinition @@ -350,18 +576,20 @@ CustomResourceDefinitionList is a list of CustomResourceDefinition objects.
-- **apiVersion**: apiextensions.k8s.io/v1 +- **items** ([]}}">CustomResourceDefinition), required + items list individual CustomResourceDefinition objects -- **kind**: CustomResourceDefinitionList +- **apiVersion** (string) + APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources -- **metadata** (}}">ListMeta) +- **kind** (string) + Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">CustomResourceDefinition), required +- **metadata** (}}">ListMeta) - items list individual CustomResourceDefinition objects diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md index ac32c9aaaad84..e335c9fc2e402 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object." title: "MutatingWebhookConfiguration" weight: 2 +auto_generated: true --- + + `apiVersion: admissionregistration.k8s.io/v1` `import "k8s.io/api/admissionregistration/v1"` @@ -50,48 +62,48 @@ MutatingWebhookConfiguration describes the configuration of and admission webhoo *WebhookClientConfig contains the information to make a TLS connection with the webhook* - - **webhooks.clientConfig.caBundle** ([]byte) + - **webhooks.clientConfig.caBundle** ([]byte) - `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. + `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. - - **webhooks.clientConfig.service** (ServiceReference) + - **webhooks.clientConfig.service** (ServiceReference) - `service` is a reference to the service for this webhook. Either `service` or `url` must be specified. - - If the webhook is running within the cluster, then you should use `service`. + `service` is a reference to the service for this webhook. Either `service` or `url` must be specified. + + If the webhook is running within the cluster, then you should use `service`. - - *ServiceReference holds a reference to Service.legacy.k8s.io* + + *ServiceReference holds a reference to Service.legacy.k8s.io* - - **webhooks.clientConfig.service.name** (string), required + - **webhooks.clientConfig.service.name** (string), required - `name` is the name of the service. Required + `name` is the name of the service. Required - - **webhooks.clientConfig.service.namespace** (string), required + - **webhooks.clientConfig.service.namespace** (string), required - `namespace` is the namespace of the service. Required + `namespace` is the namespace of the service. Required - - **webhooks.clientConfig.service.path** (string) + - **webhooks.clientConfig.service.path** (string) - `path` is an optional URL path which will be sent in any request to this service. + `path` is an optional URL path which will be sent in any request to this service. - - **webhooks.clientConfig.service.port** (int32) + - **webhooks.clientConfig.service.port** (int32) - If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). + If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). - - **webhooks.clientConfig.url** (string) + - **webhooks.clientConfig.url** (string) - `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. - - The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. - - Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. - - The scheme must be "https"; the URL must begin with "https://". - - A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. - - Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. + `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. + + The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. + + Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. + + The scheme must be "https"; the URL must begin with "https://". + + A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. + + Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. - **webhooks.name** (string), required @@ -99,7 +111,7 @@ MutatingWebhookConfiguration describes the configuration of and admission webhoo - **webhooks.sideEffects** (string), required - SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. + SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. - **webhooks.failurePolicy** (string) @@ -170,31 +182,31 @@ MutatingWebhookConfiguration describes the configuration of and admission webhoo *RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.* - - **webhooks.rules.apiGroups** ([]string) + - **webhooks.rules.apiGroups** ([]string) - APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. + APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. - - **webhooks.rules.apiVersions** ([]string) + - **webhooks.rules.apiVersions** ([]string) - APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. + APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. - - **webhooks.rules.operations** ([]string) + - **webhooks.rules.operations** ([]string) - Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. + Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. - - **webhooks.rules.resources** ([]string) + - **webhooks.rules.resources** ([]string) - Resources is a list of resources this rule applies to. - - For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. - - If wildcard is present, the validation rule will ensure resources do not overlap with each other. - - Depending on the enclosing object, subresources might not be allowed. Required. + Resources is a list of resources this rule applies to. + + For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. + + If wildcard is present, the validation rule will ensure resources do not overlap with each other. + + Depending on the enclosing object, subresources might not be allowed. Required. - - **webhooks.rules.scope** (string) + - **webhooks.rules.scope** (string) - scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*". + scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*". - **webhooks.timeoutSeconds** (int32) diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md index a680b1f7aadb2..a985763cb3d4a 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it." title: "ValidatingWebhookConfiguration" weight: 3 +auto_generated: true --- + + `apiVersion: admissionregistration.k8s.io/v1` `import "k8s.io/api/admissionregistration/v1"` @@ -50,48 +62,48 @@ ValidatingWebhookConfiguration describes the configuration of and admission webh *WebhookClientConfig contains the information to make a TLS connection with the webhook* - - **webhooks.clientConfig.caBundle** ([]byte) + - **webhooks.clientConfig.caBundle** ([]byte) - `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. + `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. - - **webhooks.clientConfig.service** (ServiceReference) + - **webhooks.clientConfig.service** (ServiceReference) - `service` is a reference to the service for this webhook. Either `service` or `url` must be specified. - - If the webhook is running within the cluster, then you should use `service`. + `service` is a reference to the service for this webhook. Either `service` or `url` must be specified. + + If the webhook is running within the cluster, then you should use `service`. - - *ServiceReference holds a reference to Service.legacy.k8s.io* + + *ServiceReference holds a reference to Service.legacy.k8s.io* - - **webhooks.clientConfig.service.name** (string), required + - **webhooks.clientConfig.service.name** (string), required - `name` is the name of the service. Required + `name` is the name of the service. Required - - **webhooks.clientConfig.service.namespace** (string), required + - **webhooks.clientConfig.service.namespace** (string), required - `namespace` is the namespace of the service. Required + `namespace` is the namespace of the service. Required - - **webhooks.clientConfig.service.path** (string) + - **webhooks.clientConfig.service.path** (string) - `path` is an optional URL path which will be sent in any request to this service. + `path` is an optional URL path which will be sent in any request to this service. - - **webhooks.clientConfig.service.port** (int32) + - **webhooks.clientConfig.service.port** (int32) - If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). + If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). - - **webhooks.clientConfig.url** (string) + - **webhooks.clientConfig.url** (string) - `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. - - The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. - - Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. - - The scheme must be "https"; the URL must begin with "https://". - - A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. - - Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. + `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. + + The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. + + Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. + + The scheme must be "https"; the URL must begin with "https://". + + A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. + + Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. - **webhooks.name** (string), required @@ -99,7 +111,7 @@ ValidatingWebhookConfiguration describes the configuration of and admission webh - **webhooks.sideEffects** (string), required - SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. + SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. - **webhooks.failurePolicy** (string) @@ -160,31 +172,31 @@ ValidatingWebhookConfiguration describes the configuration of and admission webh *RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.* - - **webhooks.rules.apiGroups** ([]string) + - **webhooks.rules.apiGroups** ([]string) - APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. + APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. - - **webhooks.rules.apiVersions** ([]string) + - **webhooks.rules.apiVersions** ([]string) - APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. + APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. - - **webhooks.rules.operations** ([]string) + - **webhooks.rules.operations** ([]string) - Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. + Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. - - **webhooks.rules.resources** ([]string) + - **webhooks.rules.resources** ([]string) - Resources is a list of resources this rule applies to. - - For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. - - If wildcard is present, the validation rule will ensure resources do not overlap with each other. - - Depending on the enclosing object, subresources might not be allowed. Required. + Resources is a list of resources this rule applies to. + + For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. + + If wildcard is present, the validation rule will ensure resources do not overlap with each other. + + Depending on the enclosing object, subresources might not be allowed. Required. - - **webhooks.rules.scope** (string) + - **webhooks.rules.scope** (string) - scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*". + scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*". - **webhooks.timeoutSeconds** (int32) diff --git a/content/en/docs/reference/kubernetes-api/policies-resources/_index.md b/content/en/docs/reference/kubernetes-api/policies-resources/_index.md deleted file mode 100644 index 251e411647bf3..0000000000000 --- a/content/en/docs/reference/kubernetes-api/policies-resources/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Policies Resources" -weight: 6 ---- diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/_index.md b/content/en/docs/reference/kubernetes-api/policy-resources/_index.md new file mode 100644 index 0000000000000..2b614bb534ab5 --- /dev/null +++ b/content/en/docs/reference/kubernetes-api/policy-resources/_index.md @@ -0,0 +1,17 @@ +--- +title: "Policy Resources" +weight: 6 +auto_generated: true +--- + + + diff --git a/content/en/docs/reference/kubernetes-api/policies-resources/limit-range-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md similarity index 86% rename from content/en/docs/reference/kubernetes-api/policies-resources/limit-range-v1.md rename to content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md index d73c4d3630f10..5d84379b6e889 100644 --- a/content/en/docs/reference/kubernetes-api/policies-resources/limit-range-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "LimitRange sets resource usage limits for each kind of resource in a Namespace." title: "LimitRange" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -30,7 +42,7 @@ LimitRange sets resource usage limits for each kind of resource in a Namespace. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">LimitRangeSpec) +- **spec** (}}">LimitRangeSpec) Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -95,9 +107,9 @@ LimitRangeList is a list of LimitRange items. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">LimitRange), required +- **items** ([]}}">LimitRange), required - Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -142,7 +154,7 @@ GET /api/v1/namespaces/{namespace}/limitranges/{name} #### Response -200 (}}">LimitRange): OK +200 (}}">LimitRange): OK 401: Unauthorized @@ -215,7 +227,7 @@ GET /api/v1/namespaces/{namespace}/limitranges #### Response -200 (}}">LimitRangeList): OK +200 (}}">LimitRangeList): OK 401: Unauthorized @@ -283,7 +295,7 @@ GET /api/v1/limitranges #### Response -200 (}}">LimitRangeList): OK +200 (}}">LimitRangeList): OK 401: Unauthorized @@ -302,7 +314,7 @@ POST /api/v1/namespaces/{namespace}/limitranges }}">namespace -- **body**: }}">LimitRange, required +- **body**: }}">LimitRange, required @@ -326,11 +338,11 @@ POST /api/v1/namespaces/{namespace}/limitranges #### Response -200 (}}">LimitRange): OK +200 (}}">LimitRange): OK -201 (}}">LimitRange): Created +201 (}}">LimitRange): Created -202 (}}">LimitRange): Accepted +202 (}}">LimitRange): Accepted 401: Unauthorized @@ -354,7 +366,7 @@ PUT /api/v1/namespaces/{namespace}/limitranges/{name} }}">namespace -- **body**: }}">LimitRange, required +- **body**: }}">LimitRange, required @@ -378,9 +390,9 @@ PUT /api/v1/namespaces/{namespace}/limitranges/{name} #### Response -200 (}}">LimitRange): OK +200 (}}">LimitRange): OK -201 (}}">LimitRange): Created +201 (}}">LimitRange): Created 401: Unauthorized @@ -433,7 +445,7 @@ PATCH /api/v1/namespaces/{namespace}/limitranges/{name} #### Response -200 (}}">LimitRange): OK +200 (}}">LimitRange): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policies-resources/network-policy-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md similarity index 63% rename from content/en/docs/reference/kubernetes-api/policies-resources/network-policy-v1.md rename to content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md index 798a3dc8911b0..6643b81beeed9 100644 --- a/content/en/docs/reference/kubernetes-api/policies-resources/network-policy-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "NetworkPolicy describes what network traffic is allowed for a set of Pods." title: "NetworkPolicy" weight: 3 +auto_generated: true --- + + `apiVersion: networking.k8s.io/v1` `import "k8s.io/api/networking/v1"` @@ -30,7 +42,7 @@ NetworkPolicy describes what network traffic is allowed for a set of Pods Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">NetworkPolicySpec) +- **spec** (}}">NetworkPolicySpec) Specification of the desired behavior for this NetworkPolicy. @@ -50,7 +62,7 @@ NetworkPolicySpec provides the specification of a NetworkPolicy - **policyTypes** ([]string) - List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8 + List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8 - **ingress** ([]NetworkPolicyIngressRule) @@ -66,32 +78,32 @@ NetworkPolicySpec provides the specification of a NetworkPolicy *NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed* - - **ingress.from.ipBlock** (IPBlock) + - **ingress.from.ipBlock** (IPBlock) - IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be. + IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be. - - *IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.* + + *IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.* - - **ingress.from.ipBlock.cidr** (string), required + - **ingress.from.ipBlock.cidr** (string), required - CIDR is a string representing the IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" + CIDR is a string representing the IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" - - **ingress.from.ipBlock.except** ([]string) + - **ingress.from.ipBlock.except** ([]string) - Except is a slice of CIDRs that should not be included within an IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" Except values will be rejected if they are outside the CIDR range + Except is a slice of CIDRs that should not be included within an IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" Except values will be rejected if they are outside the CIDR range - - **ingress.from.namespaceSelector** (}}">LabelSelector) + - **ingress.from.namespaceSelector** (}}">LabelSelector) - Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. - - If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. + + If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. - - **ingress.from.podSelector** (}}">LabelSelector) + - **ingress.from.podSelector** (}}">LabelSelector) - This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. - - If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. + + If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. - **ingress.ports** ([]NetworkPolicyPort) @@ -100,16 +112,20 @@ NetworkPolicySpec provides the specification of a NetworkPolicy *NetworkPolicyPort describes a port to allow traffic on* - - **ingress.ports.port** (IntOrString) + - **ingress.ports.port** (IntOrString) + + The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched. + + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. + - **ingress.ports.endPort** (int32) - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate "NetworkPolicyEndPort". - - **ingress.ports.protocol** (string) + - **ingress.ports.protocol** (string) - The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP. + The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP. - **egress** ([]NetworkPolicyEgressRule) @@ -125,32 +141,32 @@ NetworkPolicySpec provides the specification of a NetworkPolicy *NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed* - - **egress.to.ipBlock** (IPBlock) + - **egress.to.ipBlock** (IPBlock) - IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be. + IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be. - - *IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.* + + *IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.* - - **egress.to.ipBlock.cidr** (string), required + - **egress.to.ipBlock.cidr** (string), required - CIDR is a string representing the IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" + CIDR is a string representing the IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" - - **egress.to.ipBlock.except** ([]string) + - **egress.to.ipBlock.except** ([]string) - Except is a slice of CIDRs that should not be included within an IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" Except values will be rejected if they are outside the CIDR range + Except is a slice of CIDRs that should not be included within an IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" Except values will be rejected if they are outside the CIDR range - - **egress.to.namespaceSelector** (}}">LabelSelector) + - **egress.to.namespaceSelector** (}}">LabelSelector) - Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. - - If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. + + If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. - - **egress.to.podSelector** (}}">LabelSelector) + - **egress.to.podSelector** (}}">LabelSelector) - This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. - - If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. + + If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. - **egress.ports** ([]NetworkPolicyPort) @@ -159,16 +175,20 @@ NetworkPolicySpec provides the specification of a NetworkPolicy *NetworkPolicyPort describes a port to allow traffic on* - - **egress.ports.port** (IntOrString) + - **egress.ports.port** (IntOrString) + + The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched. + + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. + - **egress.ports.endPort** (int32) - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate "NetworkPolicyEndPort". - - **egress.ports.protocol** (string) + - **egress.ports.protocol** (string) - The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP. + The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP. @@ -190,7 +210,7 @@ NetworkPolicyList is a list of NetworkPolicy objects. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">NetworkPolicy), required +- **items** ([]}}">NetworkPolicy), required Items is a list of schema objects. @@ -237,7 +257,7 @@ GET /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name} #### Response -200 (}}">NetworkPolicy): OK +200 (}}">NetworkPolicy): OK 401: Unauthorized @@ -310,7 +330,7 @@ GET /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies #### Response -200 (}}">NetworkPolicyList): OK +200 (}}">NetworkPolicyList): OK 401: Unauthorized @@ -378,7 +398,7 @@ GET /apis/networking.k8s.io/v1/networkpolicies #### Response -200 (}}">NetworkPolicyList): OK +200 (}}">NetworkPolicyList): OK 401: Unauthorized @@ -397,7 +417,7 @@ POST /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies }}">namespace -- **body**: }}">NetworkPolicy, required +- **body**: }}">NetworkPolicy, required @@ -421,11 +441,11 @@ POST /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies #### Response -200 (}}">NetworkPolicy): OK +200 (}}">NetworkPolicy): OK -201 (}}">NetworkPolicy): Created +201 (}}">NetworkPolicy): Created -202 (}}">NetworkPolicy): Accepted +202 (}}">NetworkPolicy): Accepted 401: Unauthorized @@ -449,7 +469,7 @@ PUT /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name} }}">namespace -- **body**: }}">NetworkPolicy, required +- **body**: }}">NetworkPolicy, required @@ -473,9 +493,9 @@ PUT /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name} #### Response -200 (}}">NetworkPolicy): OK +200 (}}">NetworkPolicy): OK -201 (}}">NetworkPolicy): Created +201 (}}">NetworkPolicy): Created 401: Unauthorized @@ -528,7 +548,7 @@ PATCH /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name} #### Response -200 (}}">NetworkPolicy): OK +200 (}}">NetworkPolicy): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policies-resources/pod-disruption-budget-v1beta1.md b/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md similarity index 68% rename from content/en/docs/reference/kubernetes-api/policies-resources/pod-disruption-budget-v1beta1.md rename to content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md index 47b9d83a7e092..3b21024aeb40f 100644 --- a/content/en/docs/reference/kubernetes-api/policies-resources/pod-disruption-budget-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md @@ -1,17 +1,29 @@ --- api_metadata: - apiVersion: "policy/v1beta1" - import: "k8s.io/api/policy/v1beta1" + apiVersion: "policy/v1" + import: "k8s.io/api/policy/v1" kind: "PodDisruptionBudget" content_type: "api_reference" description: "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods." -title: "PodDisruptionBudget v1beta1" +title: "PodDisruptionBudget" weight: 4 +auto_generated: true --- -`apiVersion: policy/v1beta1` + -`import "k8s.io/api/policy/v1beta1"` +`apiVersion: policy/v1` + +`import "k8s.io/api/policy/v1"` ## PodDisruptionBudget {#PodDisruptionBudget} @@ -20,7 +32,7 @@ PodDisruptionBudget is an object to define the max disruption that can be caused
-- **apiVersion**: policy/v1beta1 +- **apiVersion**: policy/v1 - **kind**: PodDisruptionBudget @@ -28,12 +40,13 @@ PodDisruptionBudget is an object to define the max disruption that can be caused - **metadata** (}}">ObjectMeta) + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">PodDisruptionBudgetSpec) +- **spec** (}}">PodDisruptionBudgetSpec) Specification of the desired behavior of the PodDisruptionBudget. -- **status** (}}">PodDisruptionBudgetStatus) +- **status** (}}">PodDisruptionBudgetStatus) Most recently observed status of the PodDisruptionBudget. @@ -63,7 +76,7 @@ PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. - **selector** (}}">LabelSelector) - Label query over pods whose evictions are managed by the disruption budget. + Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace. @@ -91,6 +104,52 @@ PodDisruptionBudgetStatus represents information about the status of a PodDisrup total number of pods counted by this disruption budget +- **conditions** ([]Condition) + + *Patch strategy: merge on key `type`* + + *Map: unique values on key type will be kept during a merge* + + Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute + the number of allowed disruptions. Therefore no disruptions are + allowed and the status of the condition will be False. + - InsufficientPods: The number of pods are either at or below the number + required by the PodDisruptionBudget. No disruptions are + allowed and the status of the condition will be False. + - SufficientPods: There are more pods than required by the PodDisruptionBudget. + The condition will be True, and the number of allowed + disruptions are provided by the disruptionsAllowed property. + + + *Condition contains details for one aspect of the current state of this API Resource.* + + - **conditions.lastTransitionTime** (Time), required + + lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **conditions.message** (string), required + + message is a human readable message indicating details about the transition. This may be an empty string. + + - **conditions.reason** (string), required + + reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + + - **conditions.status** (string), required + + status of the condition, one of True, False, Unknown. + + - **conditions.type** (string), required + + type of condition in CamelCase or in foo.example.com/CamelCase. + + - **conditions.observedGeneration** (int64) + + observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + - **disruptedPods** (map[string]Time) DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions. @@ -112,7 +171,7 @@ PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
-- **apiVersion**: policy/v1beta1 +- **apiVersion**: policy/v1 - **kind**: PodDisruptionBudgetList @@ -120,9 +179,11 @@ PodDisruptionBudgetList is a collection of PodDisruptionBudgets. - **metadata** (}}">ListMeta) + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">PodDisruptionBudget), required +- **items** ([]}}">PodDisruptionBudget), required + Items is a list of PodDisruptionBudgets @@ -143,7 +204,7 @@ PodDisruptionBudgetList is a collection of PodDisruptionBudgets. #### HTTP Request -GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} +GET /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name} #### Parameters @@ -167,7 +228,7 @@ GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} #### Response -200 (}}">PodDisruptionBudget): OK +200 (}}">PodDisruptionBudget): OK 401: Unauthorized @@ -176,7 +237,7 @@ GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} #### HTTP Request -GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status +GET /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status #### Parameters @@ -200,7 +261,7 @@ GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/stat #### Response -200 (}}">PodDisruptionBudget): OK +200 (}}">PodDisruptionBudget): OK 401: Unauthorized @@ -209,7 +270,7 @@ GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/stat #### HTTP Request -GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets +GET /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets #### Parameters @@ -273,7 +334,7 @@ GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets #### Response -200 (}}">PodDisruptionBudgetList): OK +200 (}}">PodDisruptionBudgetList): OK 401: Unauthorized @@ -282,7 +343,7 @@ GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets #### HTTP Request -GET /apis/policy/v1beta1/poddisruptionbudgets +GET /apis/policy/v1/poddisruptionbudgets #### Parameters @@ -341,7 +402,7 @@ GET /apis/policy/v1beta1/poddisruptionbudgets #### Response -200 (}}">PodDisruptionBudgetList): OK +200 (}}">PodDisruptionBudgetList): OK 401: Unauthorized @@ -350,7 +411,7 @@ GET /apis/policy/v1beta1/poddisruptionbudgets #### HTTP Request -POST /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets +POST /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets #### Parameters @@ -360,7 +421,7 @@ POST /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets }}">namespace -- **body**: }}">PodDisruptionBudget, required +- **body**: }}">PodDisruptionBudget, required @@ -384,11 +445,11 @@ POST /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets #### Response -200 (}}">PodDisruptionBudget): OK +200 (}}">PodDisruptionBudget): OK -201 (}}">PodDisruptionBudget): Created +201 (}}">PodDisruptionBudget): Created -202 (}}">PodDisruptionBudget): Accepted +202 (}}">PodDisruptionBudget): Accepted 401: Unauthorized @@ -397,7 +458,7 @@ POST /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets #### HTTP Request -PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} +PUT /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name} #### Parameters @@ -412,7 +473,7 @@ PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} }}">namespace -- **body**: }}">PodDisruptionBudget, required +- **body**: }}">PodDisruptionBudget, required @@ -436,9 +497,9 @@ PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} #### Response -200 (}}">PodDisruptionBudget): OK +200 (}}">PodDisruptionBudget): OK -201 (}}">PodDisruptionBudget): Created +201 (}}">PodDisruptionBudget): Created 401: Unauthorized @@ -447,7 +508,7 @@ PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} #### HTTP Request -PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status +PUT /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status #### Parameters @@ -462,7 +523,7 @@ PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/stat }}">namespace -- **body**: }}">PodDisruptionBudget, required +- **body**: }}">PodDisruptionBudget, required @@ -486,9 +547,9 @@ PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/stat #### Response -200 (}}">PodDisruptionBudget): OK +200 (}}">PodDisruptionBudget): OK -201 (}}">PodDisruptionBudget): Created +201 (}}">PodDisruptionBudget): Created 401: Unauthorized @@ -497,7 +558,7 @@ PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/stat #### HTTP Request -PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} +PATCH /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name} #### Parameters @@ -541,7 +602,7 @@ PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} #### Response -200 (}}">PodDisruptionBudget): OK +200 (}}">PodDisruptionBudget): OK 401: Unauthorized @@ -550,7 +611,7 @@ PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} #### HTTP Request -PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status +PATCH /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status #### Parameters @@ -594,7 +655,7 @@ PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/st #### Response -200 (}}">PodDisruptionBudget): OK +200 (}}">PodDisruptionBudget): OK 401: Unauthorized @@ -603,7 +664,7 @@ PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/st #### HTTP Request -DELETE /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} +DELETE /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name} #### Parameters @@ -658,7 +719,7 @@ DELETE /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} #### HTTP Request -DELETE /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets +DELETE /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets #### Parameters diff --git a/content/en/docs/reference/kubernetes-api/policies-resources/pod-security-policy-v1beta1.md b/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md similarity index 83% rename from content/en/docs/reference/kubernetes-api/policies-resources/pod-security-policy-v1beta1.md rename to content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md index 1d2eac8837808..b6050390e60de 100644 --- a/content/en/docs/reference/kubernetes-api/policies-resources/pod-security-policy-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container." title: "PodSecurityPolicy v1beta1" weight: 5 +auto_generated: true --- + + `apiVersion: policy/v1beta1` `import "k8s.io/api/policy/v1beta1"` @@ -16,7 +28,7 @@ weight: 5 ## PodSecurityPolicy {#PodSecurityPolicy} -PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. +PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.
@@ -30,7 +42,7 @@ PodSecurityPolicy governs the ability to make requests that affect the Security Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">PodSecurityPolicySpec) +- **spec** (}}">PodSecurityPolicySpec) spec defines the policy enforced. @@ -62,13 +74,13 @@ PodSecurityPolicySpec defines the policy enforced. *IDRange provides a min/max of an allowed range of IDs.* - - **runAsUser.ranges.max** (int64), required + - **runAsUser.ranges.max** (int64), required - max is the end of the range, inclusive. + max is the end of the range, inclusive. - - **runAsUser.ranges.min** (int64), required + - **runAsUser.ranges.min** (int64), required - min is the start of the range, inclusive. + min is the start of the range, inclusive. - **runAsGroup** (RunAsGroupStrategyOptions) @@ -88,13 +100,13 @@ PodSecurityPolicySpec defines the policy enforced. *IDRange provides a min/max of an allowed range of IDs.* - - **runAsGroup.ranges.max** (int64), required + - **runAsGroup.ranges.max** (int64), required - max is the end of the range, inclusive. + max is the end of the range, inclusive. - - **runAsGroup.ranges.min** (int64), required + - **runAsGroup.ranges.min** (int64), required - min is the start of the range, inclusive. + min is the start of the range, inclusive. - **fsGroup** (FSGroupStrategyOptions), required @@ -110,13 +122,13 @@ PodSecurityPolicySpec defines the policy enforced. *IDRange provides a min/max of an allowed range of IDs.* - - **fsGroup.ranges.max** (int64), required + - **fsGroup.ranges.max** (int64), required - max is the end of the range, inclusive. + max is the end of the range, inclusive. - - **fsGroup.ranges.min** (int64), required + - **fsGroup.ranges.min** (int64), required - min is the start of the range, inclusive. + min is the start of the range, inclusive. - **fsGroup.rule** (string) @@ -136,13 +148,13 @@ PodSecurityPolicySpec defines the policy enforced. *IDRange provides a min/max of an allowed range of IDs.* - - **supplementalGroups.ranges.max** (int64), required + - **supplementalGroups.ranges.max** (int64), required - max is the end of the range, inclusive. + max is the end of the range, inclusive. - - **supplementalGroups.ranges.min** (int64), required + - **supplementalGroups.ranges.min** (int64), required - min is the start of the range, inclusive. + min is the start of the range, inclusive. - **supplementalGroups.rule** (string) @@ -166,21 +178,21 @@ PodSecurityPolicySpec defines the policy enforced. *SELinuxOptions are the labels to be applied to the container* - - **seLinux.seLinuxOptions.level** (string) + - **seLinux.seLinuxOptions.level** (string) - Level is SELinux level label that applies to the container. + Level is SELinux level label that applies to the container. - - **seLinux.seLinuxOptions.role** (string) + - **seLinux.seLinuxOptions.role** (string) - Role is a SELinux role label that applies to the container. + Role is a SELinux role label that applies to the container. - - **seLinux.seLinuxOptions.type** (string) + - **seLinux.seLinuxOptions.type** (string) - Type is a SELinux type label that applies to the container. + Type is a SELinux type label that applies to the container. - - **seLinux.seLinuxOptions.user** (string) + - **seLinux.seLinuxOptions.user** (string) - User is a SELinux user label that applies to the container. + User is a SELinux user label that applies to the container. - **readOnlyRootFilesystem** (boolean) @@ -331,7 +343,7 @@ PodSecurityPolicyList is a list of PodSecurityPolicy objects. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">PodSecurityPolicy), required +- **items** ([]}}">PodSecurityPolicy), required items is a list of schema objects. @@ -373,7 +385,7 @@ GET /apis/policy/v1beta1/podsecuritypolicies/{name} #### Response -200 (}}">PodSecurityPolicy): OK +200 (}}">PodSecurityPolicy): OK 401: Unauthorized @@ -441,7 +453,7 @@ GET /apis/policy/v1beta1/podsecuritypolicies #### Response -200 (}}">PodSecurityPolicyList): OK +200 (}}">PodSecurityPolicyList): OK 401: Unauthorized @@ -455,7 +467,7 @@ POST /apis/policy/v1beta1/podsecuritypolicies #### Parameters -- **body**: }}">PodSecurityPolicy, required +- **body**: }}">PodSecurityPolicy, required @@ -479,11 +491,11 @@ POST /apis/policy/v1beta1/podsecuritypolicies #### Response -200 (}}">PodSecurityPolicy): OK +200 (}}">PodSecurityPolicy): OK -201 (}}">PodSecurityPolicy): Created +201 (}}">PodSecurityPolicy): Created -202 (}}">PodSecurityPolicy): Accepted +202 (}}">PodSecurityPolicy): Accepted 401: Unauthorized @@ -502,7 +514,7 @@ PUT /apis/policy/v1beta1/podsecuritypolicies/{name} name of the PodSecurityPolicy -- **body**: }}">PodSecurityPolicy, required +- **body**: }}">PodSecurityPolicy, required @@ -526,9 +538,9 @@ PUT /apis/policy/v1beta1/podsecuritypolicies/{name} #### Response -200 (}}">PodSecurityPolicy): OK +200 (}}">PodSecurityPolicy): OK -201 (}}">PodSecurityPolicy): Created +201 (}}">PodSecurityPolicy): Created 401: Unauthorized @@ -576,7 +588,7 @@ PATCH /apis/policy/v1beta1/podsecuritypolicies/{name} #### Response -200 (}}">PodSecurityPolicy): OK +200 (}}">PodSecurityPolicy): OK 401: Unauthorized @@ -624,9 +636,9 @@ DELETE /apis/policy/v1beta1/podsecuritypolicies/{name} #### Response -200 (}}">PodSecurityPolicy): OK +200 (}}">PodSecurityPolicy): OK -202 (}}">PodSecurityPolicy): Accepted +202 (}}">PodSecurityPolicy): Accepted 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policies-resources/resource-quota-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md similarity index 81% rename from content/en/docs/reference/kubernetes-api/policies-resources/resource-quota-v1.md rename to content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md index 66b0451d2d8cc..2f66235c0f562 100644 --- a/content/en/docs/reference/kubernetes-api/policies-resources/resource-quota-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ResourceQuota sets aggregate quota restrictions enforced per namespace." title: "ResourceQuota" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -30,11 +42,11 @@ ResourceQuota sets aggregate quota restrictions enforced per namespace Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">ResourceQuotaSpec) +- **spec** (}}">ResourceQuotaSpec) Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">ResourceQuotaStatus) +- **status** (}}">ResourceQuotaStatus) Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -66,17 +78,17 @@ ResourceQuotaSpec defines the desired hard limits to enforce for Quota. *A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.* - - **scopeSelector.matchExpressions.operator** (string), required + - **scopeSelector.matchExpressions.operator** (string), required - Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. + Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. - - **scopeSelector.matchExpressions.scopeName** (string), required + - **scopeSelector.matchExpressions.scopeName** (string), required - The name of the scope that the selector applies to. + The name of the scope that the selector applies to. - - **scopeSelector.matchExpressions.values** ([]string) + - **scopeSelector.matchExpressions.values** ([]string) - An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - **scopes** ([]string) @@ -120,7 +132,7 @@ ResourceQuotaList is a list of ResourceQuota items. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">ResourceQuota), required +- **items** ([]}}">ResourceQuota), required Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ @@ -167,7 +179,7 @@ GET /api/v1/namespaces/{namespace}/resourcequotas/{name} #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK 401: Unauthorized @@ -200,7 +212,7 @@ GET /api/v1/namespaces/{namespace}/resourcequotas/{name}/status #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK 401: Unauthorized @@ -273,7 +285,7 @@ GET /api/v1/namespaces/{namespace}/resourcequotas #### Response -200 (}}">ResourceQuotaList): OK +200 (}}">ResourceQuotaList): OK 401: Unauthorized @@ -341,7 +353,7 @@ GET /api/v1/resourcequotas #### Response -200 (}}">ResourceQuotaList): OK +200 (}}">ResourceQuotaList): OK 401: Unauthorized @@ -360,7 +372,7 @@ POST /api/v1/namespaces/{namespace}/resourcequotas }}">namespace -- **body**: }}">ResourceQuota, required +- **body**: }}">ResourceQuota, required @@ -384,11 +396,11 @@ POST /api/v1/namespaces/{namespace}/resourcequotas #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK -201 (}}">ResourceQuota): Created +201 (}}">ResourceQuota): Created -202 (}}">ResourceQuota): Accepted +202 (}}">ResourceQuota): Accepted 401: Unauthorized @@ -412,7 +424,7 @@ PUT /api/v1/namespaces/{namespace}/resourcequotas/{name} }}">namespace -- **body**: }}">ResourceQuota, required +- **body**: }}">ResourceQuota, required @@ -436,9 +448,9 @@ PUT /api/v1/namespaces/{namespace}/resourcequotas/{name} #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK -201 (}}">ResourceQuota): Created +201 (}}">ResourceQuota): Created 401: Unauthorized @@ -462,7 +474,7 @@ PUT /api/v1/namespaces/{namespace}/resourcequotas/{name}/status }}">namespace -- **body**: }}">ResourceQuota, required +- **body**: }}">ResourceQuota, required @@ -486,9 +498,9 @@ PUT /api/v1/namespaces/{namespace}/resourcequotas/{name}/status #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK -201 (}}">ResourceQuota): Created +201 (}}">ResourceQuota): Created 401: Unauthorized @@ -541,7 +553,7 @@ PATCH /api/v1/namespaces/{namespace}/resourcequotas/{name} #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK 401: Unauthorized @@ -594,7 +606,7 @@ PATCH /api/v1/namespaces/{namespace}/resourcequotas/{name}/status #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK 401: Unauthorized @@ -647,9 +659,9 @@ DELETE /api/v1/namespaces/{namespace}/resourcequotas/{name} #### Response -200 (}}">ResourceQuota): OK +200 (}}">ResourceQuota): OK -202 (}}">ResourceQuota): Accepted +202 (}}">ResourceQuota): Accepted 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/service-resources/_index.md b/content/en/docs/reference/kubernetes-api/service-resources/_index.md new file mode 100644 index 0000000000000..4f2d9118d9602 --- /dev/null +++ b/content/en/docs/reference/kubernetes-api/service-resources/_index.md @@ -0,0 +1,17 @@ +--- +title: "Service Resources" +weight: 2 +auto_generated: true +--- + + + diff --git a/content/en/docs/reference/kubernetes-api/services-resources/endpoint-slice-v1beta1.md b/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md similarity index 73% rename from content/en/docs/reference/kubernetes-api/services-resources/endpoint-slice-v1beta1.md rename to content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md index bb8c7213bb53f..a405d5066077b 100644 --- a/content/en/docs/reference/kubernetes-api/services-resources/endpoint-slice-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md @@ -1,17 +1,29 @@ --- api_metadata: - apiVersion: "discovery.k8s.io/v1beta1" - import: "k8s.io/api/discovery/v1beta1" + apiVersion: "discovery.k8s.io/v1" + import: "k8s.io/api/discovery/v1" kind: "EndpointSlice" content_type: "api_reference" description: "EndpointSlice represents a subset of the endpoints that implement a service." -title: "EndpointSlice v1beta1" +title: "EndpointSlice" weight: 3 +auto_generated: true --- -`apiVersion: discovery.k8s.io/v1beta1` + -`import "k8s.io/api/discovery/v1beta1"` +`apiVersion: discovery.k8s.io/v1` + +`import "k8s.io/api/discovery/v1"` ## EndpointSlice {#EndpointSlice} @@ -20,7 +32,7 @@ EndpointSlice represents a subset of the endpoints that implement a service. For
-- **apiVersion**: discovery.k8s.io/v1beta1 +- **apiVersion**: discovery.k8s.io/v1 - **kind**: EndpointSlice @@ -56,17 +68,41 @@ EndpointSlice represents a subset of the endpoints that implement a service. For *EndpointConditions represents the current condition of an endpoint.* - - **endpoints.conditions.ready** (boolean) + - **endpoints.conditions.ready** (boolean) + + ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be "true" for terminating endpoints. + + - **endpoints.conditions.serving** (boolean) + + serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate. + + - **endpoints.conditions.terminating** (boolean) + + terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate. + + - **endpoints.deprecatedTopology** (map[string]string) + + deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead. + + - **endpoints.hints** (EndpointHints) + + hints contains information associated with how an endpoint should be consumed. + + + *EndpointHints provides hints describing how an endpoint should be consumed.* - ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be "true" for terminating endpoints. + - **endpoints.hints.forZones** ([]ForZone) - - **endpoints.conditions.serving** (boolean) + *Atomic: will be replaced during a merge* + + forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. - serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate. + + *ForZone provides information about which zones should consume this endpoint.* - - **endpoints.conditions.terminating** (boolean) + - **endpoints.hints.forZones.name** (string), required - terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate. + name represents the name of the zone. - **endpoints.hostname** (string) @@ -80,16 +116,9 @@ EndpointSlice represents a subset of the endpoints that implement a service. For targetRef is a reference to a Kubernetes object that represents this endpoint. - - **endpoints.topology** (map[string]string) + - **endpoints.zone** (string) - topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node - where the endpoint is located. This should match the corresponding - node label. - * topology.kubernetes.io/zone: the value indicates the zone where the - endpoint is located. This should match the corresponding node label. - * topology.kubernetes.io/region: the value indicates the region where the - endpoint is located. This should match the corresponding node label. - This field is deprecated and will be removed in future api versions. + zone is the name of the Zone this endpoint exists in. - **ports** ([]EndpointPort) @@ -126,7 +155,7 @@ EndpointSliceList represents a list of endpoint slices
-- **apiVersion**: discovery.k8s.io/v1beta1 +- **apiVersion**: discovery.k8s.io/v1 - **kind**: EndpointSliceList @@ -136,7 +165,7 @@ EndpointSliceList represents a list of endpoint slices Standard list metadata. -- **items** ([]}}">EndpointSlice), required +- **items** ([]}}">EndpointSlice), required List of endpoint slices @@ -159,7 +188,7 @@ EndpointSliceList represents a list of endpoint slices #### HTTP Request -GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} +GET /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name} #### Parameters @@ -183,7 +212,7 @@ GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} #### Response -200 (}}">EndpointSlice): OK +200 (}}">EndpointSlice): OK 401: Unauthorized @@ -192,7 +221,7 @@ GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} #### HTTP Request -GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices +GET /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices #### Parameters @@ -256,7 +285,7 @@ GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices #### Response -200 (}}">EndpointSliceList): OK +200 (}}">EndpointSliceList): OK 401: Unauthorized @@ -265,7 +294,7 @@ GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices #### HTTP Request -GET /apis/discovery.k8s.io/v1beta1/endpointslices +GET /apis/discovery.k8s.io/v1/endpointslices #### Parameters @@ -324,7 +353,7 @@ GET /apis/discovery.k8s.io/v1beta1/endpointslices #### Response -200 (}}">EndpointSliceList): OK +200 (}}">EndpointSliceList): OK 401: Unauthorized @@ -333,7 +362,7 @@ GET /apis/discovery.k8s.io/v1beta1/endpointslices #### HTTP Request -POST /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices +POST /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices #### Parameters @@ -343,7 +372,7 @@ POST /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices }}">namespace -- **body**: }}">EndpointSlice, required +- **body**: }}">EndpointSlice, required @@ -367,11 +396,11 @@ POST /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices #### Response -200 (}}">EndpointSlice): OK +200 (}}">EndpointSlice): OK -201 (}}">EndpointSlice): Created +201 (}}">EndpointSlice): Created -202 (}}">EndpointSlice): Accepted +202 (}}">EndpointSlice): Accepted 401: Unauthorized @@ -380,7 +409,7 @@ POST /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices #### HTTP Request -PUT /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} +PUT /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name} #### Parameters @@ -395,7 +424,7 @@ PUT /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} }}">namespace -- **body**: }}">EndpointSlice, required +- **body**: }}">EndpointSlice, required @@ -419,9 +448,9 @@ PUT /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} #### Response -200 (}}">EndpointSlice): OK +200 (}}">EndpointSlice): OK -201 (}}">EndpointSlice): Created +201 (}}">EndpointSlice): Created 401: Unauthorized @@ -430,7 +459,7 @@ PUT /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} #### HTTP Request -PATCH /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} +PATCH /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name} #### Parameters @@ -474,7 +503,7 @@ PATCH /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name #### Response -200 (}}">EndpointSlice): OK +200 (}}">EndpointSlice): OK 401: Unauthorized @@ -483,7 +512,7 @@ PATCH /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name #### HTTP Request -DELETE /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} +DELETE /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name} #### Parameters @@ -538,7 +567,7 @@ DELETE /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{nam #### HTTP Request -DELETE /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices +DELETE /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices #### Parameters diff --git a/content/en/docs/reference/kubernetes-api/services-resources/endpoints-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md similarity index 75% rename from content/en/docs/reference/kubernetes-api/services-resources/endpoints-v1.md rename to content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md index b0c73533f8e20..acc7d938f9139 100644 --- a/content/en/docs/reference/kubernetes-api/services-resources/endpoints-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Endpoints is a collection of endpoints that implement the actual service." title: "Endpoints" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -62,21 +74,21 @@ Endpoints is a collection of endpoints that implement the actual service. Exampl *EndpointAddress is a tuple that describes single IP address.* - - **subsets.addresses.ip** (string), required + - **subsets.addresses.ip** (string), required - The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. + The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. - - **subsets.addresses.hostname** (string) + - **subsets.addresses.hostname** (string) - The Hostname of this endpoint + The Hostname of this endpoint - - **subsets.addresses.nodeName** (string) + - **subsets.addresses.nodeName** (string) - Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - - **subsets.addresses.targetRef** (}}">ObjectReference) + - **subsets.addresses.targetRef** (}}">ObjectReference) - Reference to object providing the endpoint. + Reference to object providing the endpoint. - **subsets.notReadyAddresses** ([]EndpointAddress) @@ -85,21 +97,21 @@ Endpoints is a collection of endpoints that implement the actual service. Exampl *EndpointAddress is a tuple that describes single IP address.* - - **subsets.notReadyAddresses.ip** (string), required + - **subsets.notReadyAddresses.ip** (string), required - The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. + The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. - - **subsets.notReadyAddresses.hostname** (string) + - **subsets.notReadyAddresses.hostname** (string) - The Hostname of this endpoint + The Hostname of this endpoint - - **subsets.notReadyAddresses.nodeName** (string) + - **subsets.notReadyAddresses.nodeName** (string) - Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - - **subsets.notReadyAddresses.targetRef** (}}">ObjectReference) + - **subsets.notReadyAddresses.targetRef** (}}">ObjectReference) - Reference to object providing the endpoint. + Reference to object providing the endpoint. - **subsets.ports** ([]EndpointPort) @@ -108,21 +120,21 @@ Endpoints is a collection of endpoints that implement the actual service. Exampl *EndpointPort is a tuple that describes a single port.* - - **subsets.ports.port** (int32), required + - **subsets.ports.port** (int32), required - The port number of the endpoint. + The port number of the endpoint. - - **subsets.ports.protocol** (string) + - **subsets.ports.protocol** (string) - The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. + The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. - - **subsets.ports.name** (string) + - **subsets.ports.name** (string) - The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. + The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. - - **subsets.ports.appProtocol** (string) + - **subsets.ports.appProtocol** (string) - The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default. + The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default. @@ -144,7 +156,7 @@ EndpointsList is a list of endpoints. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">Endpoints), required +- **items** ([]}}">Endpoints), required List of endpoints. @@ -191,7 +203,7 @@ GET /api/v1/namespaces/{namespace}/endpoints/{name} #### Response -200 (}}">Endpoints): OK +200 (}}">Endpoints): OK 401: Unauthorized @@ -264,7 +276,7 @@ GET /api/v1/namespaces/{namespace}/endpoints #### Response -200 (}}">EndpointsList): OK +200 (}}">EndpointsList): OK 401: Unauthorized @@ -332,7 +344,7 @@ GET /api/v1/endpoints #### Response -200 (}}">EndpointsList): OK +200 (}}">EndpointsList): OK 401: Unauthorized @@ -351,7 +363,7 @@ POST /api/v1/namespaces/{namespace}/endpoints }}">namespace -- **body**: }}">Endpoints, required +- **body**: }}">Endpoints, required @@ -375,11 +387,11 @@ POST /api/v1/namespaces/{namespace}/endpoints #### Response -200 (}}">Endpoints): OK +200 (}}">Endpoints): OK -201 (}}">Endpoints): Created +201 (}}">Endpoints): Created -202 (}}">Endpoints): Accepted +202 (}}">Endpoints): Accepted 401: Unauthorized @@ -403,7 +415,7 @@ PUT /api/v1/namespaces/{namespace}/endpoints/{name} }}">namespace -- **body**: }}">Endpoints, required +- **body**: }}">Endpoints, required @@ -427,9 +439,9 @@ PUT /api/v1/namespaces/{namespace}/endpoints/{name} #### Response -200 (}}">Endpoints): OK +200 (}}">Endpoints): OK -201 (}}">Endpoints): Created +201 (}}">Endpoints): Created 401: Unauthorized @@ -482,7 +494,7 @@ PATCH /api/v1/namespaces/{namespace}/endpoints/{name} #### Response -200 (}}">Endpoints): OK +200 (}}">Endpoints): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/services-resources/ingress-class-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md similarity index 75% rename from content/en/docs/reference/kubernetes-api/services-resources/ingress-class-v1.md rename to content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md index 121c9551ebbd1..c549ac2f83d7c 100644 --- a/content/en/docs/reference/kubernetes-api/services-resources/ingress-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "IngressClass represents the class of the Ingress, referenced by the Ingress Spec." title: "IngressClass" weight: 5 +auto_generated: true --- + + `apiVersion: networking.k8s.io/v1` `import "k8s.io/api/networking/v1"` @@ -30,7 +42,7 @@ IngressClass represents the class of the Ingress, referenced by the Ingress Spec Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">IngressClassSpec) +- **spec** (}}">IngressClassSpec) Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -48,10 +60,33 @@ IngressClassSpec provides information about the class of an Ingress. Controller refers to the name of the controller that should handle this class. This allows for different "flavors" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. "acme.io/ingress-controller". This field is immutable. -- **parameters** (}}">TypedLocalObjectReference) +- **parameters** (IngressClassParametersReference) Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. + + *IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.* + + - **parameters.kind** (string), required + + Kind is the type of resource being referenced. + + - **parameters.name** (string), required + + Name is the name of resource being referenced. + + - **parameters.apiGroup** (string) + + APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + + - **parameters.namespace** (string) + + Namespace is the namespace of the resource being referenced. This field is required when scope is set to "Namespace" and must be unset when scope is set to "Cluster". + + - **parameters.scope** (string) + + Scope represents if this refers to a cluster or namespace scoped resource. This may be set to "Cluster" (default) or "Namespace". Field can be enabled with IngressClassNamespacedParams feature gate. + @@ -72,7 +107,7 @@ IngressClassList is a collection of IngressClasses. Standard list metadata. -- **items** ([]}}">IngressClass), required +- **items** ([]}}">IngressClass), required Items is the list of IngressClasses. @@ -114,7 +149,7 @@ GET /apis/networking.k8s.io/v1/ingressclasses/{name} #### Response -200 (}}">IngressClass): OK +200 (}}">IngressClass): OK 401: Unauthorized @@ -182,7 +217,7 @@ GET /apis/networking.k8s.io/v1/ingressclasses #### Response -200 (}}">IngressClassList): OK +200 (}}">IngressClassList): OK 401: Unauthorized @@ -196,7 +231,7 @@ POST /apis/networking.k8s.io/v1/ingressclasses #### Parameters -- **body**: }}">IngressClass, required +- **body**: }}">IngressClass, required @@ -220,11 +255,11 @@ POST /apis/networking.k8s.io/v1/ingressclasses #### Response -200 (}}">IngressClass): OK +200 (}}">IngressClass): OK -201 (}}">IngressClass): Created +201 (}}">IngressClass): Created -202 (}}">IngressClass): Accepted +202 (}}">IngressClass): Accepted 401: Unauthorized @@ -243,7 +278,7 @@ PUT /apis/networking.k8s.io/v1/ingressclasses/{name} name of the IngressClass -- **body**: }}">IngressClass, required +- **body**: }}">IngressClass, required @@ -267,9 +302,9 @@ PUT /apis/networking.k8s.io/v1/ingressclasses/{name} #### Response -200 (}}">IngressClass): OK +200 (}}">IngressClass): OK -201 (}}">IngressClass): Created +201 (}}">IngressClass): Created 401: Unauthorized @@ -317,7 +352,7 @@ PATCH /apis/networking.k8s.io/v1/ingressclasses/{name} #### Response -200 (}}">IngressClass): OK +200 (}}">IngressClass): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/services-resources/ingress-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md similarity index 71% rename from content/en/docs/reference/kubernetes-api/services-resources/ingress-v1.md rename to content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md index 003ad959ea8f4..00fe1bb61797a 100644 --- a/content/en/docs/reference/kubernetes-api/services-resources/ingress-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend." title: "Ingress" weight: 4 +auto_generated: true --- + + `apiVersion: networking.k8s.io/v1` `import "k8s.io/api/networking/v1"` @@ -30,11 +42,11 @@ Ingress is a collection of rules that allow inbound connections to reach the end Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">IngressSpec) +- **spec** (}}">IngressSpec) Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">IngressStatus) +- **status** (}}">IngressStatus) Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -48,43 +60,10 @@ IngressSpec describes the Ingress the user wishes to exist.
-- **defaultBackend** (IngressBackend) +- **defaultBackend** (}}">IngressBackend) DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller. - - *IngressBackend describes all endpoints for a given service and port.* - - - **defaultBackend.resource** (}}">TypedLocalObjectReference) - - Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with "Service". - - - **defaultBackend.service** (IngressServiceBackend) - - Service references a Service as a Backend. This is a mutually exclusive setting with "Resource". - - - *IngressServiceBackend references a Kubernetes Service as a Backend.* - - - **defaultBackend.service.name** (string), required - - Name is the referenced service. The service must exist in the same namespace as the Ingress object. - - - **defaultBackend.service.port** (ServiceBackendPort) - - Port of the referenced service. A port name or port number is required for a IngressServiceBackend. - - - *ServiceBackendPort is the service port being referenced.* - - - **defaultBackend.service.port.name** (string) - - Name is the name of the port on the Service. This is a mutually exclusive setting with "Number". - - - **defaultBackend.service.port.number** (int32) - - Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with "Name". - - **ingressClassName** (string) IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation. @@ -115,88 +94,95 @@ IngressSpec describes the Ingress the user wishes to exist. *HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.* - - **rules.http.paths** ([]HTTPIngressPath), required + - **rules.http.paths** ([]HTTPIngressPath), required - *Atomic: will be replaced during a merge* - - A collection of paths that map requests to backends. + *Atomic: will be replaced during a merge* + + A collection of paths that map requests to backends. - - *HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.* + + *HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.* - - **rules.http.paths.backend** (IngressBackend), required + - **rules.http.paths.backend** (}}">IngressBackend), required - Backend defines the referenced service endpoint to which the traffic will be forwarded to. + Backend defines the referenced service endpoint to which the traffic will be forwarded to. - - *IngressBackend describes all endpoints for a given service and port.* + - **rules.http.paths.path** (string) - - **rules.http.paths.backend.resource** (}}">TypedLocalObjectReference) + Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched. - Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with "Service". + - **rules.http.paths.pathType** (string) - - **rules.http.paths.backend.service** (IngressServiceBackend) + PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is + done on a path element by element basis. A path element refers is the + list of labels in the path split by the '/' separator. A request is a + match for path p if every p is an element-wise prefix of p of the + request path. Note that if the last element of the path is a substring + of the last element in request path, it is not a match (e.g. /foo/bar + matches /foo/bar/baz, but does not match /foo/barbaz). + * ImplementationSpecific: Interpretation of the Path matching is up to + the IngressClass. Implementations can treat this as a separate PathType + or treat it identically to Prefix or Exact path types. + Implementations are required to support all path types. - Service references a Service as a Backend. This is a mutually exclusive setting with "Resource". +- **tls** ([]IngressTLS) - - *IngressServiceBackend references a Kubernetes Service as a Backend.* + *Atomic: will be replaced during a merge* + + TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI. - - **rules.http.paths.backend.service.name** (string), required + + *IngressTLS describes the transport layer security associated with an Ingress.* - Name is the referenced service. The service must exist in the same namespace as the Ingress object. + - **tls.hosts** ([]string) - - **rules.http.paths.backend.service.port** (ServiceBackendPort) + *Atomic: will be replaced during a merge* + + Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified. - Port of the referenced service. A port name or port number is required for a IngressServiceBackend. + - **tls.secretName** (string) - - *ServiceBackendPort is the service port being referenced.* + SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing. - - **rules.http.paths.backend.service.port.name** (string) - Name is the name of the port on the Service. This is a mutually exclusive setting with "Number". - - **rules.http.paths.backend.service.port.number** (int32) - Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with "Name". - - **rules.http.paths.path** (string) +## IngressBackend {#IngressBackend} - Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched. +IngressBackend describes all endpoints for a given service and port. - - **rules.http.paths.pathType** (string) +
- PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is - done on a path element by element basis. A path element refers is the - list of labels in the path split by the '/' separator. A request is a - match for path p if every p is an element-wise prefix of p of the - request path. Note that if the last element of the path is a substring - of the last element in request path, it is not a match (e.g. /foo/bar - matches /foo/bar/baz, but does not match /foo/barbaz). - * ImplementationSpecific: Interpretation of the Path matching is up to - the IngressClass. Implementations can treat this as a separate PathType - or treat it identically to Prefix or Exact path types. - Implementations are required to support all path types. +- **resource** (}}">TypedLocalObjectReference) -- **tls** ([]IngressTLS) + Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with "Service". - *Atomic: will be replaced during a merge* - - TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI. +- **service** (IngressServiceBackend) - - *IngressTLS describes the transport layer security associated with an Ingress.* + Service references a Service as a Backend. This is a mutually exclusive setting with "Resource". - - **tls.hosts** ([]string) + + *IngressServiceBackend references a Kubernetes Service as a Backend.* - *Atomic: will be replaced during a merge* - - Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified. + - **service.name** (string), required - - **tls.secretName** (string) + Name is the referenced service. The service must exist in the same namespace as the Ingress object. - SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing. + - **service.port** (ServiceBackendPort) + + Port of the referenced service. A port name or port number is required for a IngressServiceBackend. + + + *ServiceBackendPort is the service port being referenced.* + + - **service.port.name** (string) + + Name is the name of the port on the Service. This is a mutually exclusive setting with "Number". + + - **service.port.number** (int32) + + Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with "Name". @@ -222,37 +208,37 @@ IngressStatus describe the current state of the Ingress. *LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.* - - **loadBalancer.ingress.hostname** (string) + - **loadBalancer.ingress.hostname** (string) - Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers) + Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers) - - **loadBalancer.ingress.ip** (string) + - **loadBalancer.ingress.ip** (string) - IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers) + IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers) - - **loadBalancer.ingress.ports** ([]PortStatus) + - **loadBalancer.ingress.ports** ([]PortStatus) - *Atomic: will be replaced during a merge* - - Ports is a list of records of service ports If used, every port defined in the service should have an entry in it + *Atomic: will be replaced during a merge* + + Ports is a list of records of service ports If used, every port defined in the service should have an entry in it - - ** + + ** - - **loadBalancer.ingress.ports.port** (int32), required + - **loadBalancer.ingress.ports.port** (int32), required - Port is the port number of the service port of which status is recorded here + Port is the port number of the service port of which status is recorded here - - **loadBalancer.ingress.ports.protocol** (string), required + - **loadBalancer.ingress.ports.protocol** (string), required - Protocol is the protocol of the service port of which status is recorded here The supported values are: "TCP", "UDP", "SCTP" + Protocol is the protocol of the service port of which status is recorded here The supported values are: "TCP", "UDP", "SCTP" - - **loadBalancer.ingress.ports.error** (string) + - **loadBalancer.ingress.ports.error** (string) - Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use - CamelCase names - - cloud provider specific error values must have names that comply with the - format foo.example.com/CamelCase. + Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use + CamelCase names + - cloud provider specific error values must have names that comply with the + format foo.example.com/CamelCase. @@ -264,19 +250,21 @@ IngressList is a collection of Ingress.
-- **apiVersion**: networking.k8s.io/v1 +- **items** ([]}}">Ingress), required + Items is the list of Ingress. -- **kind**: IngressList +- **apiVersion** (string) + APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources -- **metadata** (}}">ListMeta) +- **kind** (string) - Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">Ingress), required +- **metadata** (}}">ListMeta) - Items is the list of Ingress. + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -321,7 +309,7 @@ GET /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name} #### Response -200 (}}">Ingress): OK +200 (}}">Ingress): OK 401: Unauthorized @@ -354,7 +342,7 @@ GET /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status #### Response -200 (}}">Ingress): OK +200 (}}">Ingress): OK 401: Unauthorized @@ -427,7 +415,7 @@ GET /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses #### Response -200 (}}">IngressList): OK +200 (}}">IngressList): OK 401: Unauthorized @@ -495,7 +483,7 @@ GET /apis/networking.k8s.io/v1/ingresses #### Response -200 (}}">IngressList): OK +200 (}}">IngressList): OK 401: Unauthorized @@ -514,7 +502,7 @@ POST /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses }}">namespace -- **body**: }}">Ingress, required +- **body**: }}">Ingress, required @@ -538,11 +526,11 @@ POST /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses #### Response -200 (}}">Ingress): OK +200 (}}">Ingress): OK -201 (}}">Ingress): Created +201 (}}">Ingress): Created -202 (}}">Ingress): Accepted +202 (}}">Ingress): Accepted 401: Unauthorized @@ -566,7 +554,7 @@ PUT /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name} }}">namespace -- **body**: }}">Ingress, required +- **body**: }}">Ingress, required @@ -590,9 +578,9 @@ PUT /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name} #### Response -200 (}}">Ingress): OK +200 (}}">Ingress): OK -201 (}}">Ingress): Created +201 (}}">Ingress): Created 401: Unauthorized @@ -616,7 +604,7 @@ PUT /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status }}">namespace -- **body**: }}">Ingress, required +- **body**: }}">Ingress, required @@ -640,9 +628,9 @@ PUT /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status #### Response -200 (}}">Ingress): OK +200 (}}">Ingress): OK -201 (}}">Ingress): Created +201 (}}">Ingress): Created 401: Unauthorized @@ -695,7 +683,7 @@ PATCH /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name} #### Response -200 (}}">Ingress): OK +200 (}}">Ingress): OK 401: Unauthorized @@ -748,7 +736,7 @@ PATCH /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status #### Response -200 (}}">Ingress): OK +200 (}}">Ingress): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/services-resources/service-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md similarity index 83% rename from content/en/docs/reference/kubernetes-api/services-resources/service-v1.md rename to content/en/docs/reference/kubernetes-api/service-resources/service-v1.md index bfb2c819bd64b..8d643c688aac9 100644 --- a/content/en/docs/reference/kubernetes-api/services-resources/service-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy." title: "Service" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -30,11 +42,11 @@ Service is a named abstraction of software service (for example, mysql) consisti Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">ServiceSpec) +- **spec** (}}">ServiceSpec) Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">ServiceStatus) +- **status** (}}">ServiceStatus) Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -134,14 +146,22 @@ ServiceSpec describes the attributes that a user creates on a service. If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ +- **loadBalancerClass** (string) + + loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + - **externalName** (string) - externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be + externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". - **externalTrafficPolicy** (string) externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. "Local" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. "Cluster" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading. +- **internalTrafficPolicy** (string) + + InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. "Cluster" routes internal traffic to a Service to all endpoints. "Local" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is "Cluster". + - **healthCheckNodePort** (int32) healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). @@ -164,13 +184,13 @@ ServiceSpec describes the attributes that a user creates on a service. *ClientIPConfig represents the configurations of Client IP based session affinity.* - - **sessionAffinityConfig.clientIP.timeoutSeconds** (int32) + - **sessionAffinityConfig.clientIP.timeoutSeconds** (int32) - timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && \<=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours). + timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && \<=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours). - **topologyKeys** ([]string) - topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. + topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version. - **allocateLoadBalancerNodePorts** (boolean) @@ -238,37 +258,37 @@ ServiceStatus represents the current status of a service. *LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.* - - **loadBalancer.ingress.hostname** (string) + - **loadBalancer.ingress.hostname** (string) - Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers) + Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers) - - **loadBalancer.ingress.ip** (string) + - **loadBalancer.ingress.ip** (string) - IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers) + IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers) - - **loadBalancer.ingress.ports** ([]PortStatus) + - **loadBalancer.ingress.ports** ([]PortStatus) - *Atomic: will be replaced during a merge* - - Ports is a list of records of service ports If used, every port defined in the service should have an entry in it + *Atomic: will be replaced during a merge* + + Ports is a list of records of service ports If used, every port defined in the service should have an entry in it - - ** + + ** - - **loadBalancer.ingress.ports.port** (int32), required + - **loadBalancer.ingress.ports.port** (int32), required - Port is the port number of the service port of which status is recorded here + Port is the port number of the service port of which status is recorded here - - **loadBalancer.ingress.ports.protocol** (string), required + - **loadBalancer.ingress.ports.protocol** (string), required - Protocol is the protocol of the service port of which status is recorded here The supported values are: "TCP", "UDP", "SCTP" + Protocol is the protocol of the service port of which status is recorded here The supported values are: "TCP", "UDP", "SCTP" - - **loadBalancer.ingress.ports.error** (string) + - **loadBalancer.ingress.ports.error** (string) - Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use - CamelCase names - - cloud provider specific error values must have names that comply with the - format foo.example.com/CamelCase. + Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use + CamelCase names + - cloud provider specific error values must have names that comply with the + format foo.example.com/CamelCase. @@ -290,7 +310,7 @@ ServiceList holds a list of services. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">Service), required +- **items** ([]}}">Service), required List of services @@ -337,7 +357,7 @@ GET /api/v1/namespaces/{namespace}/services/{name} #### Response -200 (}}">Service): OK +200 (}}">Service): OK 401: Unauthorized @@ -370,7 +390,7 @@ GET /api/v1/namespaces/{namespace}/services/{name}/status #### Response -200 (}}">Service): OK +200 (}}">Service): OK 401: Unauthorized @@ -443,7 +463,7 @@ GET /api/v1/namespaces/{namespace}/services #### Response -200 (}}">ServiceList): OK +200 (}}">ServiceList): OK 401: Unauthorized @@ -511,7 +531,7 @@ GET /api/v1/services #### Response -200 (}}">ServiceList): OK +200 (}}">ServiceList): OK 401: Unauthorized @@ -530,7 +550,7 @@ POST /api/v1/namespaces/{namespace}/services }}">namespace -- **body**: }}">Service, required +- **body**: }}">Service, required @@ -554,11 +574,11 @@ POST /api/v1/namespaces/{namespace}/services #### Response -200 (}}">Service): OK +200 (}}">Service): OK -201 (}}">Service): Created +201 (}}">Service): Created -202 (}}">Service): Accepted +202 (}}">Service): Accepted 401: Unauthorized @@ -582,7 +602,7 @@ PUT /api/v1/namespaces/{namespace}/services/{name} }}">namespace -- **body**: }}">Service, required +- **body**: }}">Service, required @@ -606,9 +626,9 @@ PUT /api/v1/namespaces/{namespace}/services/{name} #### Response -200 (}}">Service): OK +200 (}}">Service): OK -201 (}}">Service): Created +201 (}}">Service): Created 401: Unauthorized @@ -632,7 +652,7 @@ PUT /api/v1/namespaces/{namespace}/services/{name}/status }}">namespace -- **body**: }}">Service, required +- **body**: }}">Service, required @@ -656,9 +676,9 @@ PUT /api/v1/namespaces/{namespace}/services/{name}/status #### Response -200 (}}">Service): OK +200 (}}">Service): OK -201 (}}">Service): Created +201 (}}">Service): Created 401: Unauthorized @@ -711,7 +731,7 @@ PATCH /api/v1/namespaces/{namespace}/services/{name} #### Response -200 (}}">Service): OK +200 (}}">Service): OK 401: Unauthorized @@ -764,7 +784,7 @@ PATCH /api/v1/namespaces/{namespace}/services/{name}/status #### Response -200 (}}">Service): OK +200 (}}">Service): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/services-resources/_index.md b/content/en/docs/reference/kubernetes-api/services-resources/_index.md deleted file mode 100644 index 1c4c64040dc0b..0000000000000 --- a/content/en/docs/reference/kubernetes-api/services-resources/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Services Resources" -weight: 2 ---- diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/_index.md b/content/en/docs/reference/kubernetes-api/workload-resources/_index.md new file mode 100644 index 0000000000000..7c4e44d99a005 --- /dev/null +++ b/content/en/docs/reference/kubernetes-api/workload-resources/_index.md @@ -0,0 +1,17 @@ +--- +title: "Workload Resources" +weight: 1 +auto_generated: true +--- + + + diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/controller-revision-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md similarity index 85% rename from content/en/docs/reference/kubernetes-api/workloads-resources/controller-revision-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md index 950e9d729c332..23b324fb6e198 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/controller-revision-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ControllerRevision implements an immutable snapshot of state data." title: "ControllerRevision" -weight: 9 +weight: 8 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` @@ -88,7 +100,7 @@ ControllerRevisionList is a resource containing a list of ControllerRevision obj More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">ControllerRevision), required +- **items** ([]}}">ControllerRevision), required Items is the list of ControllerRevisions @@ -135,7 +147,7 @@ GET /apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name} #### Response -200 (}}">ControllerRevision): OK +200 (}}">ControllerRevision): OK 401: Unauthorized @@ -208,7 +220,7 @@ GET /apis/apps/v1/namespaces/{namespace}/controllerrevisions #### Response -200 (}}">ControllerRevisionList): OK +200 (}}">ControllerRevisionList): OK 401: Unauthorized @@ -276,7 +288,7 @@ GET /apis/apps/v1/controllerrevisions #### Response -200 (}}">ControllerRevisionList): OK +200 (}}">ControllerRevisionList): OK 401: Unauthorized @@ -295,7 +307,7 @@ POST /apis/apps/v1/namespaces/{namespace}/controllerrevisions }}">namespace -- **body**: }}">ControllerRevision, required +- **body**: }}">ControllerRevision, required @@ -319,11 +331,11 @@ POST /apis/apps/v1/namespaces/{namespace}/controllerrevisions #### Response -200 (}}">ControllerRevision): OK +200 (}}">ControllerRevision): OK -201 (}}">ControllerRevision): Created +201 (}}">ControllerRevision): Created -202 (}}">ControllerRevision): Accepted +202 (}}">ControllerRevision): Accepted 401: Unauthorized @@ -347,7 +359,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name} }}">namespace -- **body**: }}">ControllerRevision, required +- **body**: }}">ControllerRevision, required @@ -371,9 +383,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name} #### Response -200 (}}">ControllerRevision): OK +200 (}}">ControllerRevision): OK -201 (}}">ControllerRevision): Created +201 (}}">ControllerRevision): Created 401: Unauthorized @@ -426,7 +438,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name} #### Response -200 (}}">ControllerRevision): OK +200 (}}">ControllerRevision): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/cron-job-v1beta1.md b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md similarity index 79% rename from content/en/docs/reference/kubernetes-api/workloads-resources/cron-job-v1beta1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md index 22a0135bf851c..a518d1f72a76c 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/cron-job-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md @@ -1,17 +1,29 @@ --- api_metadata: - apiVersion: "batch/v1beta1" - import: "k8s.io/api/batch/v1beta1" + apiVersion: "batch/v1" + import: "k8s.io/api/batch/v1" kind: "CronJob" content_type: "api_reference" description: "CronJob represents the configuration of a single cron job." -title: "CronJob v1beta1" -weight: 12 +title: "CronJob" +weight: 11 +auto_generated: true --- -`apiVersion: batch/v1beta1` + -`import "k8s.io/api/batch/v1beta1"` +`apiVersion: batch/v1` + +`import "k8s.io/api/batch/v1"` ## CronJob {#CronJob} @@ -20,7 +32,7 @@ CronJob represents the configuration of a single cron job.
-- **apiVersion**: batch/v1beta1 +- **apiVersion**: batch/v1 - **kind**: CronJob @@ -30,11 +42,11 @@ CronJob represents the configuration of a single cron job. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">CronJobSpec) +- **spec** (}}">CronJobSpec) Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">CronJobStatus) +- **status** (}}">CronJobStatus) Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -59,7 +71,7 @@ CronJobSpec describes how the job execution will look like and when it will actu Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - - **jobTemplate.spec** (}}">JobSpec) + - **jobTemplate.spec** (}}">JobSpec) Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -81,11 +93,11 @@ CronJobSpec describes how the job execution will look like and when it will actu - **successfulJobsHistoryLimit** (int32) - The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3. + The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3. - **failedJobsHistoryLimit** (int32) - The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. + The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1. @@ -99,6 +111,8 @@ CronJobStatus represents the current state of a cron job. - **active** ([]}}">ObjectReference) + *Atomic: will be replaced during a merge* + A list of pointers to currently running jobs. - **lastScheduleTime** (Time) @@ -108,6 +122,13 @@ CronJobStatus represents the current state of a cron job. *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* +- **lastSuccessfulTime** (Time) + + Information when was the last time the job successfully completed. + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + @@ -118,7 +139,7 @@ CronJobList is a collection of cron jobs.
-- **apiVersion**: batch/v1beta1 +- **apiVersion**: batch/v1 - **kind**: CronJobList @@ -128,7 +149,7 @@ CronJobList is a collection of cron jobs. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">CronJob), required +- **items** ([]}}">CronJob), required items is the list of CronJobs. @@ -151,7 +172,7 @@ CronJobList is a collection of cron jobs. #### HTTP Request -GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} +GET /apis/batch/v1/namespaces/{namespace}/cronjobs/{name} #### Parameters @@ -175,7 +196,7 @@ GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} #### Response -200 (}}">CronJob): OK +200 (}}">CronJob): OK 401: Unauthorized @@ -184,7 +205,7 @@ GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} #### HTTP Request -GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status +GET /apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status #### Parameters @@ -208,7 +229,7 @@ GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status #### Response -200 (}}">CronJob): OK +200 (}}">CronJob): OK 401: Unauthorized @@ -217,7 +238,7 @@ GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status #### HTTP Request -GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs +GET /apis/batch/v1/namespaces/{namespace}/cronjobs #### Parameters @@ -281,7 +302,7 @@ GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs #### Response -200 (}}">CronJobList): OK +200 (}}">CronJobList): OK 401: Unauthorized @@ -290,7 +311,7 @@ GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs #### HTTP Request -GET /apis/batch/v1beta1/cronjobs +GET /apis/batch/v1/cronjobs #### Parameters @@ -349,7 +370,7 @@ GET /apis/batch/v1beta1/cronjobs #### Response -200 (}}">CronJobList): OK +200 (}}">CronJobList): OK 401: Unauthorized @@ -358,7 +379,7 @@ GET /apis/batch/v1beta1/cronjobs #### HTTP Request -POST /apis/batch/v1beta1/namespaces/{namespace}/cronjobs +POST /apis/batch/v1/namespaces/{namespace}/cronjobs #### Parameters @@ -368,7 +389,7 @@ POST /apis/batch/v1beta1/namespaces/{namespace}/cronjobs }}">namespace -- **body**: }}">CronJob, required +- **body**: }}">CronJob, required @@ -392,11 +413,11 @@ POST /apis/batch/v1beta1/namespaces/{namespace}/cronjobs #### Response -200 (}}">CronJob): OK +200 (}}">CronJob): OK -201 (}}">CronJob): Created +201 (}}">CronJob): Created -202 (}}">CronJob): Accepted +202 (}}">CronJob): Accepted 401: Unauthorized @@ -405,7 +426,7 @@ POST /apis/batch/v1beta1/namespaces/{namespace}/cronjobs #### HTTP Request -PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} +PUT /apis/batch/v1/namespaces/{namespace}/cronjobs/{name} #### Parameters @@ -420,7 +441,7 @@ PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} }}">namespace -- **body**: }}">CronJob, required +- **body**: }}">CronJob, required @@ -444,9 +465,9 @@ PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} #### Response -200 (}}">CronJob): OK +200 (}}">CronJob): OK -201 (}}">CronJob): Created +201 (}}">CronJob): Created 401: Unauthorized @@ -455,7 +476,7 @@ PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} #### HTTP Request -PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status +PUT /apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status #### Parameters @@ -470,7 +491,7 @@ PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status }}">namespace -- **body**: }}">CronJob, required +- **body**: }}">CronJob, required @@ -494,9 +515,9 @@ PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status #### Response -200 (}}">CronJob): OK +200 (}}">CronJob): OK -201 (}}">CronJob): Created +201 (}}">CronJob): Created 401: Unauthorized @@ -505,7 +526,7 @@ PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status #### HTTP Request -PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} +PATCH /apis/batch/v1/namespaces/{namespace}/cronjobs/{name} #### Parameters @@ -549,7 +570,7 @@ PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} #### Response -200 (}}">CronJob): OK +200 (}}">CronJob): OK 401: Unauthorized @@ -558,7 +579,7 @@ PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} #### HTTP Request -PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status +PATCH /apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status #### Parameters @@ -602,7 +623,7 @@ PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status #### Response -200 (}}">CronJob): OK +200 (}}">CronJob): OK 401: Unauthorized @@ -611,7 +632,7 @@ PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status #### HTTP Request -DELETE /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} +DELETE /apis/batch/v1/namespaces/{namespace}/cronjobs/{name} #### Parameters @@ -666,7 +687,7 @@ DELETE /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} #### HTTP Request -DELETE /apis/batch/v1beta1/namespaces/{namespace}/cronjobs +DELETE /apis/batch/v1/namespaces/{namespace}/cronjobs #### Parameters diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/daemon-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md similarity index 76% rename from content/en/docs/reference/kubernetes-api/workloads-resources/daemon-set-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md index 95ae869be1e5e..2a313f533fa6a 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/daemon-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "DaemonSet represents the configuration of a daemon set." title: "DaemonSet" -weight: 10 +weight: 9 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` @@ -30,11 +42,11 @@ DaemonSet represents the configuration of a daemon set. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">DaemonSetSpec) +- **spec** (}}">DaemonSetSpec) The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">DaemonSetStatus) +- **status** (}}">DaemonSetStatus) The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +64,7 @@ DaemonSetSpec is the specification of a daemon set. A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -- **template** (}}">PodTemplateSpec), required +- **template** (}}">PodTemplateSpec), required An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template @@ -78,12 +90,19 @@ DaemonSetSpec is the specification of a daemon set. *Spec to control the desired behavior of daemon set rolling update.* - - **updateStrategy.rollingUpdate.maxUnavailable** (IntOrString) + - **updateStrategy.rollingUpdate.maxSurge** (IntOrString) + + The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. + + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + - **updateStrategy.rollingUpdate.maxUnavailable** (IntOrString) - The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. + The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - **revisionHistoryLimit** (int32) @@ -187,7 +206,7 @@ DaemonSetList is a collection of daemon sets. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">DaemonSet), required +- **items** ([]}}">DaemonSet), required A list of daemon sets. @@ -234,7 +253,7 @@ GET /apis/apps/v1/namespaces/{namespace}/daemonsets/{name} #### Response -200 (}}">DaemonSet): OK +200 (}}">DaemonSet): OK 401: Unauthorized @@ -267,7 +286,7 @@ GET /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status #### Response -200 (}}">DaemonSet): OK +200 (}}">DaemonSet): OK 401: Unauthorized @@ -340,7 +359,7 @@ GET /apis/apps/v1/namespaces/{namespace}/daemonsets #### Response -200 (}}">DaemonSetList): OK +200 (}}">DaemonSetList): OK 401: Unauthorized @@ -408,7 +427,7 @@ GET /apis/apps/v1/daemonsets #### Response -200 (}}">DaemonSetList): OK +200 (}}">DaemonSetList): OK 401: Unauthorized @@ -427,7 +446,7 @@ POST /apis/apps/v1/namespaces/{namespace}/daemonsets }}">namespace -- **body**: }}">DaemonSet, required +- **body**: }}">DaemonSet, required @@ -451,11 +470,11 @@ POST /apis/apps/v1/namespaces/{namespace}/daemonsets #### Response -200 (}}">DaemonSet): OK +200 (}}">DaemonSet): OK -201 (}}">DaemonSet): Created +201 (}}">DaemonSet): Created -202 (}}">DaemonSet): Accepted +202 (}}">DaemonSet): Accepted 401: Unauthorized @@ -479,7 +498,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/daemonsets/{name} }}">namespace -- **body**: }}">DaemonSet, required +- **body**: }}">DaemonSet, required @@ -503,9 +522,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/daemonsets/{name} #### Response -200 (}}">DaemonSet): OK +200 (}}">DaemonSet): OK -201 (}}">DaemonSet): Created +201 (}}">DaemonSet): Created 401: Unauthorized @@ -529,7 +548,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status }}">namespace -- **body**: }}">DaemonSet, required +- **body**: }}">DaemonSet, required @@ -553,9 +572,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status #### Response -200 (}}">DaemonSet): OK +200 (}}">DaemonSet): OK -201 (}}">DaemonSet): Created +201 (}}">DaemonSet): Created 401: Unauthorized @@ -608,7 +627,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/daemonsets/{name} #### Response -200 (}}">DaemonSet): OK +200 (}}">DaemonSet): OK 401: Unauthorized @@ -661,7 +680,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status #### Response -200 (}}">DaemonSet): OK +200 (}}">DaemonSet): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/deployment-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md similarity index 78% rename from content/en/docs/reference/kubernetes-api/workloads-resources/deployment-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md index 7e5b39c92922a..f304fd92391e8 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/deployment-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "Deployment enables declarative updates for Pods and ReplicaSets." title: "Deployment" -weight: 7 +weight: 6 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` @@ -30,11 +42,11 @@ Deployment enables declarative updates for Pods and ReplicaSets. Standard object metadata. -- **spec** (}}">DeploymentSpec) +- **spec** (}}">DeploymentSpec) Specification of the desired behavior of the Deployment. -- **status** (}}">DeploymentStatus) +- **status** (}}">DeploymentStatus) Most recently observed status of the Deployment. @@ -52,7 +64,7 @@ DeploymentSpec is the specification of the desired behavior of the Deployment. Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels. -- **template** (}}">PodTemplateSpec), required +- **template** (}}">PodTemplateSpec), required Template describes the pods that will be created. @@ -84,19 +96,19 @@ DeploymentSpec is the specification of the desired behavior of the Deployment. *Spec to control the desired behavior of rolling update.* - - **strategy.rollingUpdate.maxSurge** (IntOrString) + - **strategy.rollingUpdate.maxSurge** (IntOrString) - The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods. + The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods. - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - - **strategy.rollingUpdate.maxUnavailable** (IntOrString) + - **strategy.rollingUpdate.maxUnavailable** (IntOrString) - The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods. + The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods. - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - **revisionHistoryLimit** (int32) @@ -207,7 +219,7 @@ DeploymentList is a list of Deployments. Standard list metadata. -- **items** ([]}}">Deployment), required +- **items** ([]}}">Deployment), required Items is the list of Deployments. @@ -254,7 +266,7 @@ GET /apis/apps/v1/namespaces/{namespace}/deployments/{name} #### Response -200 (}}">Deployment): OK +200 (}}">Deployment): OK 401: Unauthorized @@ -287,7 +299,7 @@ GET /apis/apps/v1/namespaces/{namespace}/deployments/{name}/status #### Response -200 (}}">Deployment): OK +200 (}}">Deployment): OK 401: Unauthorized @@ -360,7 +372,7 @@ GET /apis/apps/v1/namespaces/{namespace}/deployments #### Response -200 (}}">DeploymentList): OK +200 (}}">DeploymentList): OK 401: Unauthorized @@ -428,7 +440,7 @@ GET /apis/apps/v1/deployments #### Response -200 (}}">DeploymentList): OK +200 (}}">DeploymentList): OK 401: Unauthorized @@ -447,7 +459,7 @@ POST /apis/apps/v1/namespaces/{namespace}/deployments }}">namespace -- **body**: }}">Deployment, required +- **body**: }}">Deployment, required @@ -471,11 +483,11 @@ POST /apis/apps/v1/namespaces/{namespace}/deployments #### Response -200 (}}">Deployment): OK +200 (}}">Deployment): OK -201 (}}">Deployment): Created +201 (}}">Deployment): Created -202 (}}">Deployment): Accepted +202 (}}">Deployment): Accepted 401: Unauthorized @@ -499,7 +511,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/deployments/{name} }}">namespace -- **body**: }}">Deployment, required +- **body**: }}">Deployment, required @@ -523,9 +535,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/deployments/{name} #### Response -200 (}}">Deployment): OK +200 (}}">Deployment): OK -201 (}}">Deployment): Created +201 (}}">Deployment): Created 401: Unauthorized @@ -549,7 +561,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/deployments/{name}/status }}">namespace -- **body**: }}">Deployment, required +- **body**: }}">Deployment, required @@ -573,9 +585,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/deployments/{name}/status #### Response -200 (}}">Deployment): OK +200 (}}">Deployment): OK -201 (}}">Deployment): Created +201 (}}">Deployment): Created 401: Unauthorized @@ -628,7 +640,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/deployments/{name} #### Response -200 (}}">Deployment): OK +200 (}}">Deployment): OK 401: Unauthorized @@ -681,7 +693,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/deployments/{name}/status #### Response -200 (}}">Deployment): OK +200 (}}">Deployment): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/ephemeral-container.md b/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md similarity index 58% rename from content/en/docs/reference/kubernetes-api/workloads-resources/ephemeral-container.md rename to content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md index 7355af9df88d5..960fc9c8c4a17 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/ephemeral-container.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md @@ -1,19 +1,58 @@ --- api_metadata: - apiVersion: "" + apiVersion: "v1" import: "k8s.io/api/core/v1" - kind: "EphemeralContainer" + kind: "EphemeralContainers" content_type: "api_reference" -description: "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging." -title: "EphemeralContainer" -weight: 3 +description: "A list of ephemeral containers used with the Pod ephemeralcontainers subresource." +title: "EphemeralContainers" +weight: 2 +auto_generated: true --- + +`apiVersion: v1` `import "k8s.io/api/core/v1"` +## EphemeralContainers {#EphemeralContainers} + +A list of ephemeral containers used with the Pod ephemeralcontainers subresource. + +
+ +- **apiVersion**: v1 + + +- **kind**: EphemeralContainers + + +- **metadata** (}}">ObjectMeta) + + +- **ephemeralContainers** ([]}}">EphemeralContainer), required + + *Patch strategy: merge on key `name`* + + A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified. + + + + + +## EphemeralContainer {#EphemeralContainer} + An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
@@ -28,7 +67,7 @@ An EphemeralContainer is a container that may be added temporarily to an existin -### Image {#Image} +### Image - **image** (string) @@ -39,7 +78,7 @@ An EphemeralContainer is a container that may be added temporarily to an existin Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images -### Entrypoint {#Entrypoint} +### Entrypoint - **command** ([]string) @@ -54,7 +93,7 @@ An EphemeralContainer is a container that may be added temporarily to an existin Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. -### Environment variables {#Environment-variables} +### Environment variables - **env** ([]EnvVar) @@ -81,51 +120,51 @@ An EphemeralContainer is a container that may be added temporarily to an existin *EnvVarSource represents a source for the value of an EnvVar.* - - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) + - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) - Selects a key of a ConfigMap. + Selects a key of a ConfigMap. - - *Selects a key from a ConfigMap.* + + *Selects a key from a ConfigMap.* - - **env.valueFrom.configMapKeyRef.key** (string), required + - **env.valueFrom.configMapKeyRef.key** (string), required - The key to select. + The key to select. - - **env.valueFrom.configMapKeyRef.name** (string) + - **env.valueFrom.configMapKeyRef.name** (string) - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - **env.valueFrom.configMapKeyRef.optional** (boolean) + - **env.valueFrom.configMapKeyRef.optional** (boolean) - Specify whether the ConfigMap or its key must be defined + Specify whether the ConfigMap or its key must be defined - - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) + - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) + - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) - Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - - **env.valueFrom.secretKeyRef** (SecretKeySelector) + - **env.valueFrom.secretKeyRef** (SecretKeySelector) - Selects a key of a secret in the pod's namespace + Selects a key of a secret in the pod's namespace - - *SecretKeySelector selects a key of a Secret.* + + *SecretKeySelector selects a key of a Secret.* - - **env.valueFrom.secretKeyRef.key** (string), required + - **env.valueFrom.secretKeyRef.key** (string), required - The key of the secret to select from. Must be a valid secret key. + The key of the secret to select from. Must be a valid secret key. - - **env.valueFrom.secretKeyRef.name** (string) + - **env.valueFrom.secretKeyRef.name** (string) - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - **env.valueFrom.secretKeyRef.optional** (boolean) + - **env.valueFrom.secretKeyRef.optional** (boolean) - Specify whether the Secret or its key must be defined + Specify whether the Secret or its key must be defined - **envFrom** ([]EnvFromSource) @@ -143,13 +182,13 @@ An EphemeralContainer is a container that may be added temporarily to an existin The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.* - - **envFrom.configMapRef.name** (string) + - **envFrom.configMapRef.name** (string) - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - **envFrom.configMapRef.optional** (boolean) + - **envFrom.configMapRef.optional** (boolean) - Specify whether the ConfigMap must be defined + Specify whether the ConfigMap must be defined - **envFrom.prefix** (string) @@ -164,15 +203,15 @@ An EphemeralContainer is a container that may be added temporarily to an existin The contents of the target Secret's Data field will represent the key-value pairs as environment variables.* - - **envFrom.secretRef.name** (string) + - **envFrom.secretRef.name** (string) - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - **envFrom.secretRef.optional** (boolean) + - **envFrom.secretRef.optional** (boolean) - Specify whether the Secret must be defined + Specify whether the Secret must be defined -### Volumes {#Volumes} +### Volumes - **volumeMounts** ([]VolumeMount) @@ -225,7 +264,7 @@ An EphemeralContainer is a container that may be added temporarily to an existin name must match the name of a persistentVolumeClaim in the pod -### Lifecycle {#Lifecycle} +### Lifecycle - **terminationMessagePath** (string) @@ -236,7 +275,7 @@ An EphemeralContainer is a container that may be added temporarily to an existin Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. -### Debugging {#Debugging} +### Debugging - **stdin** (boolean) @@ -251,7 +290,7 @@ An EphemeralContainer is a container that may be added temporarily to an existin Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. -### Not allowed {#Not-allowed} +### Not allowed - **ports** ([]ContainerPort) @@ -290,11 +329,11 @@ An EphemeralContainer is a container that may be added temporarily to an existin - **resources.limits** (map[string]}}">Quantity) - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - **resources.requests** (map[string]}}">Quantity) - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - **lifecycle** (Lifecycle) @@ -303,269 +342,280 @@ An EphemeralContainer is a container that may be added temporarily to an existin *Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.* - - **lifecycle.postStart** (Handler) + - **lifecycle.postStart** (}}">Handler) PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - *Handler defines a specific action that should be taken* + - **lifecycle.preStop** (}}">Handler) - - **lifecycle.postStart.exec** (}}">ExecAction) + PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - One and only one of the following should be specified. Exec specifies the action to take. +- **livenessProbe** (}}">Probe) - - **lifecycle.postStart.httpGet** (}}">HTTPGetAction) + Probes are not allowed for ephemeral containers. - HTTPGet specifies the http request to perform. +- **readinessProbe** (}}">Probe) - - **lifecycle.postStart.tcpSocket** (}}">TCPSocketAction) + Probes are not allowed for ephemeral containers. - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported +- **securityContext** (SecurityContext) - - **lifecycle.preStop** (Handler) + SecurityContext is not allowed for ephemeral containers. - PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + + *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* - - *Handler defines a specific action that should be taken* + - **securityContext.runAsUser** (int64) - - **lifecycle.preStop.exec** (}}">ExecAction) + The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - One and only one of the following should be specified. Exec specifies the action to take. + - **securityContext.runAsNonRoot** (boolean) - - **lifecycle.preStop.httpGet** (}}">HTTPGetAction) + Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - HTTPGet specifies the http request to perform. + - **securityContext.runAsGroup** (int64) + + The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - **lifecycle.preStop.tcpSocket** (}}">TCPSocketAction) + - **securityContext.readOnlyRootFilesystem** (boolean) - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported + Whether this container has a read-only root filesystem. Default is false. -- **livenessProbe** (Probe) + - **securityContext.procMount** (string) - Probes are not allowed for ephemeral containers. + procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + + - **securityContext.privileged** (boolean) + + Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + + - **securityContext.allowPrivilegeEscalation** (boolean) + + AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + + - **securityContext.capabilities** (Capabilities) + + The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + + + *Adds and removes POSIX capabilities from running containers.* + + - **securityContext.capabilities.add** ([]string) + + Added capabilities + + - **securityContext.capabilities.drop** ([]string) + + Removed capabilities + + - **securityContext.seccompProfile** (SeccompProfile) + + The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. + + + *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* + + - **securityContext.seccompProfile.type** (string), required + + type indicates which kind of seccomp profile will be applied. Valid options are: + + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + + - **securityContext.seccompProfile.localhostProfile** (string) + + localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* + - **securityContext.seLinuxOptions** (SELinuxOptions) - - **livenessProbe.exec** (}}">ExecAction) + The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - One and only one of the following should be specified. Exec specifies the action to take. + + *SELinuxOptions are the labels to be applied to the container* - - **livenessProbe.httpGet** (}}">HTTPGetAction) + - **securityContext.seLinuxOptions.level** (string) - HTTPGet specifies the http request to perform. + Level is SELinux level label that applies to the container. - - **livenessProbe.tcpSocket** (}}">TCPSocketAction) + - **securityContext.seLinuxOptions.role** (string) - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported + Role is a SELinux role label that applies to the container. - - **livenessProbe.initialDelaySeconds** (int32) + - **securityContext.seLinuxOptions.type** (string) - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + Type is a SELinux type label that applies to the container. - - **livenessProbe.periodSeconds** (int32) + - **securityContext.seLinuxOptions.user** (string) - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + User is a SELinux user label that applies to the container. - - **livenessProbe.timeoutSeconds** (int32) + - **securityContext.windowsOptions** (WindowsSecurityContextOptions) - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - **livenessProbe.failureThreshold** (int32) + + *WindowsSecurityContextOptions contain Windows-specific options and credentials.* - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + - **securityContext.windowsOptions.gmsaCredentialSpec** (string) - - **livenessProbe.successThreshold** (int32) + GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) -- **readinessProbe** (Probe) + GMSACredentialSpecName is the name of the GMSA credential spec to use. + + - **securityContext.windowsOptions.runAsUserName** (string) + + The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + +- **startupProbe** (}}">Probe) Probes are not allowed for ephemeral containers. - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - **readinessProbe.exec** (}}">ExecAction) - One and only one of the following should be specified. Exec specifies the action to take. +## Operations {#Operations} - - **readinessProbe.httpGet** (}}">HTTPGetAction) - HTTPGet specifies the http request to perform. - - **readinessProbe.tcpSocket** (}}">TCPSocketAction) +
- TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - **readinessProbe.initialDelaySeconds** (int32) - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - **readinessProbe.periodSeconds** (int32) - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - **readinessProbe.timeoutSeconds** (int32) +### `get` read ephemeralcontainers of the specified Pod - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +#### HTTP Request - - **readinessProbe.failureThreshold** (int32) +GET /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +#### Parameters - - **readinessProbe.successThreshold** (int32) - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. +- **name** (*in path*): string, required -- **securityContext** (SecurityContext) + name of the EphemeralContainers - SecurityContext is not allowed for ephemeral containers. - - *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* +- **namespace** (*in path*): string, required - - **securityContext.runAsUser** (int64) + }}">namespace - The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - **securityContext.runAsNonRoot** (boolean) +- **pretty** (*in query*): string - Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }}">pretty - - **securityContext.runAsGroup** (int64) - The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - **securityContext.readOnlyRootFilesystem** (boolean) +#### Response - Whether this container has a read-only root filesystem. Default is false. - - **securityContext.procMount** (string) +200 (}}">EphemeralContainers): OK - procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. +401: Unauthorized - - **securityContext.privileged** (boolean) - Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +### `update` replace ephemeralcontainers of the specified Pod - - **securityContext.allowPrivilegeEscalation** (boolean) +#### HTTP Request - AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +PUT /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers - - **securityContext.capabilities** (Capabilities) +#### Parameters - The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. - - *Adds and removes POSIX capabilities from running containers.* +- **name** (*in path*): string, required - - **securityContext.capabilities.add** ([]string) + name of the EphemeralContainers - Added capabilities - - **securityContext.capabilities.drop** ([]string) +- **namespace** (*in path*): string, required - Removed capabilities + }}">namespace - - **securityContext.seccompProfile** (SeccompProfile) - The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. +- **body**: }}">EphemeralContainers, required - - *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* + - - **securityContext.seccompProfile.type** (string), required - type indicates which kind of seccomp profile will be applied. Valid options are: - - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. +- **dryRun** (*in query*): string - - **securityContext.seccompProfile.localhostProfile** (string) + }}">dryRun - localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". - - **securityContext.seLinuxOptions** (SELinuxOptions) +- **fieldManager** (*in query*): string - The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }}">fieldManager - - *SELinuxOptions are the labels to be applied to the container* - - **securityContext.seLinuxOptions.level** (string) +- **pretty** (*in query*): string - Level is SELinux level label that applies to the container. + }}">pretty - - **securityContext.seLinuxOptions.role** (string) - Role is a SELinux role label that applies to the container. - - **securityContext.seLinuxOptions.type** (string) +#### Response - Type is a SELinux type label that applies to the container. - - **securityContext.seLinuxOptions.user** (string) +200 (}}">EphemeralContainers): OK - User is a SELinux user label that applies to the container. +201 (}}">EphemeralContainers): Created - - **securityContext.windowsOptions** (WindowsSecurityContextOptions) +401: Unauthorized - The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - *WindowsSecurityContextOptions contain Windows-specific options and credentials.* +### `patch` partially update ephemeralcontainers of the specified Pod - - **securityContext.windowsOptions.gmsaCredentialSpec** (string) +#### HTTP Request - GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. +PATCH /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers - - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) +#### Parameters - GMSACredentialSpecName is the name of the GMSA credential spec to use. - - **securityContext.windowsOptions.runAsUserName** (string) +- **name** (*in path*): string, required - The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + name of the EphemeralContainers -- **startupProbe** (Probe) - Probes are not allowed for ephemeral containers. +- **namespace** (*in path*): string, required + + }}">namespace - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - **startupProbe.exec** (}}">ExecAction) +- **body**: }}">Patch, required + + + - One and only one of the following should be specified. Exec specifies the action to take. +- **dryRun** (*in query*): string - - **startupProbe.httpGet** (}}">HTTPGetAction) + }}">dryRun - HTTPGet specifies the http request to perform. - - **startupProbe.tcpSocket** (}}">TCPSocketAction) +- **fieldManager** (*in query*): string - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported + }}">fieldManager - - **startupProbe.initialDelaySeconds** (int32) - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +- **force** (*in query*): boolean - - **startupProbe.periodSeconds** (int32) + }}">force - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - **startupProbe.timeoutSeconds** (int32) +- **pretty** (*in query*): string - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + }}">pretty - - **startupProbe.failureThreshold** (int32) - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - **startupProbe.successThreshold** (int32) +#### Response - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. +200 (}}">EphemeralContainers): OK +401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/horizontal-pod-autoscaler-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md similarity index 82% rename from content/en/docs/reference/kubernetes-api/workloads-resources/horizontal-pod-autoscaler-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md index 5bb2f6e1757be..a62d79e4f7507 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/horizontal-pod-autoscaler-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "configuration of a horizontal pod autoscaler." title: "HorizontalPodAutoscaler" -weight: 14 +weight: 12 +auto_generated: true --- + + `apiVersion: autoscaling/v1` `import "k8s.io/api/autoscaling/v1"` @@ -30,11 +42,11 @@ configuration of a horizontal pod autoscaler. Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">HorizontalPodAutoscalerSpec) +- **spec** (}}">HorizontalPodAutoscalerSpec) behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. -- **status** (}}">HorizontalPodAutoscalerStatus) +- **status** (}}">HorizontalPodAutoscalerStatus) current information about the autoscaler. @@ -132,7 +144,7 @@ list of horizontal pod autoscaler objects. Standard list metadata. -- **items** ([]}}">HorizontalPodAutoscaler), required +- **items** ([]}}">HorizontalPodAutoscaler), required list of horizontal pod autoscaler objects. @@ -179,7 +191,7 @@ GET /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name} #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized @@ -212,7 +224,7 @@ GET /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/ #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized @@ -285,7 +297,7 @@ GET /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers #### Response -200 (}}">HorizontalPodAutoscalerList): OK +200 (}}">HorizontalPodAutoscalerList): OK 401: Unauthorized @@ -353,7 +365,7 @@ GET /apis/autoscaling/v1/horizontalpodautoscalers #### Response -200 (}}">HorizontalPodAutoscalerList): OK +200 (}}">HorizontalPodAutoscalerList): OK 401: Unauthorized @@ -372,7 +384,7 @@ POST /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers }}">namespace -- **body**: }}">HorizontalPodAutoscaler, required +- **body**: }}">HorizontalPodAutoscaler, required @@ -396,11 +408,11 @@ POST /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK -201 (}}">HorizontalPodAutoscaler): Created +201 (}}">HorizontalPodAutoscaler): Created -202 (}}">HorizontalPodAutoscaler): Accepted +202 (}}">HorizontalPodAutoscaler): Accepted 401: Unauthorized @@ -424,7 +436,7 @@ PUT /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name} }}">namespace -- **body**: }}">HorizontalPodAutoscaler, required +- **body**: }}">HorizontalPodAutoscaler, required @@ -448,9 +460,9 @@ PUT /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name} #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK -201 (}}">HorizontalPodAutoscaler): Created +201 (}}">HorizontalPodAutoscaler): Created 401: Unauthorized @@ -474,7 +486,7 @@ PUT /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/ }}">namespace -- **body**: }}">HorizontalPodAutoscaler, required +- **body**: }}">HorizontalPodAutoscaler, required @@ -498,9 +510,9 @@ PUT /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/ #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK -201 (}}">HorizontalPodAutoscaler): Created +201 (}}">HorizontalPodAutoscaler): Created 401: Unauthorized @@ -553,7 +565,7 @@ PATCH /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized @@ -606,7 +618,7 @@ PATCH /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/horizontal-pod-autoscaler-v2beta2.md b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md similarity index 55% rename from content/en/docs/reference/kubernetes-api/workloads-resources/horizontal-pod-autoscaler-v2beta2.md rename to content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md index 56fd55d24b209..9d326e21315fa 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/horizontal-pod-autoscaler-v2beta2.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified." title: "HorizontalPodAutoscaler v2beta2" -weight: 15 +weight: 13 +auto_generated: true --- + + `apiVersion: autoscaling/v2beta2` `import "k8s.io/api/autoscaling/v2beta2"` @@ -30,11 +42,11 @@ HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, wh metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">HorizontalPodAutoscalerSpec) +- **spec** (}}">HorizontalPodAutoscalerSpec) spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. -- **status** (}}">HorizontalPodAutoscalerStatus) +- **status** (}}">HorizontalPodAutoscalerStatus) status is the current information about the autoscaler. @@ -89,32 +101,32 @@ HorizontalPodAutoscalerSpec describes the desired functionality of the Horizonta *HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.* - - **behavior.scaleDown.policies** ([]HPAScalingPolicy) + - **behavior.scaleDown.policies** ([]HPAScalingPolicy) - policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid - - *HPAScalingPolicy is a single policy which must hold true for a specified past interval.* + + *HPAScalingPolicy is a single policy which must hold true for a specified past interval.* - - **behavior.scaleDown.policies.type** (string), required + - **behavior.scaleDown.policies.type** (string), required - Type is used to specify the scaling policy. + Type is used to specify the scaling policy. - - **behavior.scaleDown.policies.value** (int32), required + - **behavior.scaleDown.policies.value** (int32), required - Value contains the amount of change which is permitted by the policy. It must be greater than zero + Value contains the amount of change which is permitted by the policy. It must be greater than zero - - **behavior.scaleDown.policies.periodSeconds** (int32), required + - **behavior.scaleDown.policies.periodSeconds** (int32), required - PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). - - **behavior.scaleDown.selectPolicy** (string) + - **behavior.scaleDown.selectPolicy** (string) - selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used. + selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used. - - **behavior.scaleDown.stabilizationWindowSeconds** (int32) + - **behavior.scaleDown.stabilizationWindowSeconds** (int32) - StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). - **behavior.scaleUp** (HPAScalingRules) @@ -126,32 +138,32 @@ HorizontalPodAutoscalerSpec describes the desired functionality of the Horizonta *HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.* - - **behavior.scaleUp.policies** ([]HPAScalingPolicy) + - **behavior.scaleUp.policies** ([]HPAScalingPolicy) - policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid - - *HPAScalingPolicy is a single policy which must hold true for a specified past interval.* + + *HPAScalingPolicy is a single policy which must hold true for a specified past interval.* - - **behavior.scaleUp.policies.type** (string), required + - **behavior.scaleUp.policies.type** (string), required - Type is used to specify the scaling policy. + Type is used to specify the scaling policy. - - **behavior.scaleUp.policies.value** (int32), required + - **behavior.scaleUp.policies.value** (int32), required - Value contains the amount of change which is permitted by the policy. It must be greater than zero + Value contains the amount of change which is permitted by the policy. It must be greater than zero - - **behavior.scaleUp.policies.periodSeconds** (int32), required + - **behavior.scaleUp.policies.periodSeconds** (int32), required - PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). - - **behavior.scaleUp.selectPolicy** (string) + - **behavior.scaleUp.selectPolicy** (string) - selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used. + selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used. - - **behavior.scaleUp.stabilizationWindowSeconds** (int32) + - **behavior.scaleUp.stabilizationWindowSeconds** (int32) - StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). - **metrics** ([]MetricSpec) @@ -171,36 +183,36 @@ HorizontalPodAutoscalerSpec describes the desired functionality of the Horizonta *ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.* - - **metrics.containerResource.container** (string), required + - **metrics.containerResource.container** (string), required - container is the name of the container in the pods of the scaling target + container is the name of the container in the pods of the scaling target - - **metrics.containerResource.name** (string), required + - **metrics.containerResource.name** (string), required - name is the name of the resource in question. + name is the name of the resource in question. - - **metrics.containerResource.target** (MetricTarget), required + - **metrics.containerResource.target** (MetricTarget), required - target specifies the target value for the given metric + target specifies the target value for the given metric - - *MetricTarget defines the target value, average value, or average utilization of a specific metric* + + *MetricTarget defines the target value, average value, or average utilization of a specific metric* - - **metrics.containerResource.target.type** (string), required + - **metrics.containerResource.target.type** (string), required - type represents whether the metric type is Utilization, Value, or AverageValue + type represents whether the metric type is Utilization, Value, or AverageValue - - **metrics.containerResource.target.averageUtilization** (int32) + - **metrics.containerResource.target.averageUtilization** (int32) - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - **metrics.containerResource.target.averageValue** (}}">Quantity) + - **metrics.containerResource.target.averageValue** (}}">Quantity) - averageValue is the target value of the average of the metric across all relevant pods (as a quantity) + averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - - **metrics.containerResource.target.value** (}}">Quantity) + - **metrics.containerResource.target.value** (}}">Quantity) - value is the target value of the metric (as a quantity). + value is the target value of the metric (as a quantity). - **metrics.external** (ExternalMetricSource) @@ -209,43 +221,43 @@ HorizontalPodAutoscalerSpec describes the desired functionality of the Horizonta *ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).* - - **metrics.external.metric** (MetricIdentifier), required + - **metrics.external.metric** (MetricIdentifier), required - metric identifies the target metric by name and selector + metric identifies the target metric by name and selector - - *MetricIdentifier defines the name and optionally selector for a metric* + + *MetricIdentifier defines the name and optionally selector for a metric* - - **metrics.external.metric.name** (string), required + - **metrics.external.metric.name** (string), required - name is the name of the given metric + name is the name of the given metric - - **metrics.external.metric.selector** (}}">LabelSelector) + - **metrics.external.metric.selector** (}}">LabelSelector) - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - **metrics.external.target** (MetricTarget), required + - **metrics.external.target** (MetricTarget), required - target specifies the target value for the given metric + target specifies the target value for the given metric - - *MetricTarget defines the target value, average value, or average utilization of a specific metric* + + *MetricTarget defines the target value, average value, or average utilization of a specific metric* - - **metrics.external.target.type** (string), required + - **metrics.external.target.type** (string), required - type represents whether the metric type is Utilization, Value, or AverageValue + type represents whether the metric type is Utilization, Value, or AverageValue - - **metrics.external.target.averageUtilization** (int32) + - **metrics.external.target.averageUtilization** (int32) - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - **metrics.external.target.averageValue** (}}">Quantity) + - **metrics.external.target.averageValue** (}}">Quantity) - averageValue is the target value of the average of the metric across all relevant pods (as a quantity) + averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - - **metrics.external.target.value** (}}">Quantity) + - **metrics.external.target.value** (}}">Quantity) - value is the target value of the metric (as a quantity). + value is the target value of the metric (as a quantity). - **metrics.object** (ObjectMetricSource) @@ -254,61 +266,61 @@ HorizontalPodAutoscalerSpec describes the desired functionality of the Horizonta *ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).* - - **metrics.object.describedObject** (CrossVersionObjectReference), required + - **metrics.object.describedObject** (CrossVersionObjectReference), required - - *CrossVersionObjectReference contains enough information to let you identify the referred resource.* + + *CrossVersionObjectReference contains enough information to let you identify the referred resource.* - - **metrics.object.describedObject.kind** (string), required + - **metrics.object.describedObject.kind** (string), required - Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" - - **metrics.object.describedObject.name** (string), required + - **metrics.object.describedObject.name** (string), required - Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - - **metrics.object.describedObject.apiVersion** (string) + - **metrics.object.describedObject.apiVersion** (string) - API version of the referent + API version of the referent - - **metrics.object.metric** (MetricIdentifier), required + - **metrics.object.metric** (MetricIdentifier), required - metric identifies the target metric by name and selector + metric identifies the target metric by name and selector - - *MetricIdentifier defines the name and optionally selector for a metric* + + *MetricIdentifier defines the name and optionally selector for a metric* - - **metrics.object.metric.name** (string), required + - **metrics.object.metric.name** (string), required - name is the name of the given metric + name is the name of the given metric - - **metrics.object.metric.selector** (}}">LabelSelector) + - **metrics.object.metric.selector** (}}">LabelSelector) - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - **metrics.object.target** (MetricTarget), required + - **metrics.object.target** (MetricTarget), required - target specifies the target value for the given metric + target specifies the target value for the given metric - - *MetricTarget defines the target value, average value, or average utilization of a specific metric* + + *MetricTarget defines the target value, average value, or average utilization of a specific metric* - - **metrics.object.target.type** (string), required + - **metrics.object.target.type** (string), required - type represents whether the metric type is Utilization, Value, or AverageValue + type represents whether the metric type is Utilization, Value, or AverageValue - - **metrics.object.target.averageUtilization** (int32) + - **metrics.object.target.averageUtilization** (int32) - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - **metrics.object.target.averageValue** (}}">Quantity) + - **metrics.object.target.averageValue** (}}">Quantity) - averageValue is the target value of the average of the metric across all relevant pods (as a quantity) + averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - - **metrics.object.target.value** (}}">Quantity) + - **metrics.object.target.value** (}}">Quantity) - value is the target value of the metric (as a quantity). + value is the target value of the metric (as a quantity). - **metrics.pods** (PodsMetricSource) @@ -317,43 +329,43 @@ HorizontalPodAutoscalerSpec describes the desired functionality of the Horizonta *PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.* - - **metrics.pods.metric** (MetricIdentifier), required + - **metrics.pods.metric** (MetricIdentifier), required - metric identifies the target metric by name and selector + metric identifies the target metric by name and selector - - *MetricIdentifier defines the name and optionally selector for a metric* + + *MetricIdentifier defines the name and optionally selector for a metric* - - **metrics.pods.metric.name** (string), required + - **metrics.pods.metric.name** (string), required - name is the name of the given metric + name is the name of the given metric - - **metrics.pods.metric.selector** (}}">LabelSelector) + - **metrics.pods.metric.selector** (}}">LabelSelector) - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - **metrics.pods.target** (MetricTarget), required + - **metrics.pods.target** (MetricTarget), required - target specifies the target value for the given metric + target specifies the target value for the given metric - - *MetricTarget defines the target value, average value, or average utilization of a specific metric* + + *MetricTarget defines the target value, average value, or average utilization of a specific metric* - - **metrics.pods.target.type** (string), required + - **metrics.pods.target.type** (string), required - type represents whether the metric type is Utilization, Value, or AverageValue + type represents whether the metric type is Utilization, Value, or AverageValue - - **metrics.pods.target.averageUtilization** (int32) + - **metrics.pods.target.averageUtilization** (int32) - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - **metrics.pods.target.averageValue** (}}">Quantity) + - **metrics.pods.target.averageValue** (}}">Quantity) - averageValue is the target value of the average of the metric across all relevant pods (as a quantity) + averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - - **metrics.pods.target.value** (}}">Quantity) + - **metrics.pods.target.value** (}}">Quantity) - value is the target value of the metric (as a quantity). + value is the target value of the metric (as a quantity). - **metrics.resource** (ResourceMetricSource) @@ -362,32 +374,32 @@ HorizontalPodAutoscalerSpec describes the desired functionality of the Horizonta *ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.* - - **metrics.resource.name** (string), required + - **metrics.resource.name** (string), required - name is the name of the resource in question. + name is the name of the resource in question. - - **metrics.resource.target** (MetricTarget), required + - **metrics.resource.target** (MetricTarget), required - target specifies the target value for the given metric + target specifies the target value for the given metric - - *MetricTarget defines the target value, average value, or average utilization of a specific metric* + + *MetricTarget defines the target value, average value, or average utilization of a specific metric* - - **metrics.resource.target.type** (string), required + - **metrics.resource.target.type** (string), required - type represents whether the metric type is Utilization, Value, or AverageValue + type represents whether the metric type is Utilization, Value, or AverageValue - - **metrics.resource.target.averageUtilization** (int32) + - **metrics.resource.target.averageUtilization** (int32) - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - **metrics.resource.target.averageValue** (}}">Quantity) + - **metrics.resource.target.averageValue** (}}">Quantity) - averageValue is the target value of the average of the metric across all relevant pods (as a quantity) + averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - - **metrics.resource.target.value** (}}">Quantity) + - **metrics.resource.target.value** (}}">Quantity) - value is the target value of the metric (as a quantity). + value is the target value of the metric (as a quantity). @@ -455,32 +467,32 @@ HorizontalPodAutoscalerStatus describes the current status of a horizontal pod a *ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.* - - **currentMetrics.containerResource.container** (string), required + - **currentMetrics.containerResource.container** (string), required - Container is the name of the container in the pods of the scaling target + Container is the name of the container in the pods of the scaling target - - **currentMetrics.containerResource.current** (MetricValueStatus), required + - **currentMetrics.containerResource.current** (MetricValueStatus), required - current contains the current value for the given metric + current contains the current value for the given metric - - *MetricValueStatus holds the current value for a metric* + + *MetricValueStatus holds the current value for a metric* - - **currentMetrics.containerResource.current.averageUtilization** (int32) + - **currentMetrics.containerResource.current.averageUtilization** (int32) - currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. + currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. - - **currentMetrics.containerResource.current.averageValue** (}}">Quantity) + - **currentMetrics.containerResource.current.averageValue** (}}">Quantity) - averageValue is the current value of the average of the metric across all relevant pods (as a quantity) + averageValue is the current value of the average of the metric across all relevant pods (as a quantity) - - **currentMetrics.containerResource.current.value** (}}">Quantity) + - **currentMetrics.containerResource.current.value** (}}">Quantity) - value is the current value of the metric (as a quantity). + value is the current value of the metric (as a quantity). - - **currentMetrics.containerResource.name** (string), required + - **currentMetrics.containerResource.name** (string), required - Name is the name of the resource in question. + Name is the name of the resource in question. - **currentMetrics.external** (ExternalMetricStatus) @@ -489,39 +501,39 @@ HorizontalPodAutoscalerStatus describes the current status of a horizontal pod a *ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.* - - **currentMetrics.external.current** (MetricValueStatus), required + - **currentMetrics.external.current** (MetricValueStatus), required - current contains the current value for the given metric + current contains the current value for the given metric - - *MetricValueStatus holds the current value for a metric* + + *MetricValueStatus holds the current value for a metric* - - **currentMetrics.external.current.averageUtilization** (int32) + - **currentMetrics.external.current.averageUtilization** (int32) - currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. + currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. - - **currentMetrics.external.current.averageValue** (}}">Quantity) + - **currentMetrics.external.current.averageValue** (}}">Quantity) - averageValue is the current value of the average of the metric across all relevant pods (as a quantity) + averageValue is the current value of the average of the metric across all relevant pods (as a quantity) - - **currentMetrics.external.current.value** (}}">Quantity) + - **currentMetrics.external.current.value** (}}">Quantity) - value is the current value of the metric (as a quantity). + value is the current value of the metric (as a quantity). - - **currentMetrics.external.metric** (MetricIdentifier), required + - **currentMetrics.external.metric** (MetricIdentifier), required - metric identifies the target metric by name and selector + metric identifies the target metric by name and selector - - *MetricIdentifier defines the name and optionally selector for a metric* + + *MetricIdentifier defines the name and optionally selector for a metric* - - **currentMetrics.external.metric.name** (string), required + - **currentMetrics.external.metric.name** (string), required - name is the name of the given metric + name is the name of the given metric - - **currentMetrics.external.metric.selector** (}}">LabelSelector) + - **currentMetrics.external.metric.selector** (}}">LabelSelector) - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - **currentMetrics.object** (ObjectMetricStatus) @@ -530,57 +542,57 @@ HorizontalPodAutoscalerStatus describes the current status of a horizontal pod a *ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).* - - **currentMetrics.object.current** (MetricValueStatus), required + - **currentMetrics.object.current** (MetricValueStatus), required - current contains the current value for the given metric + current contains the current value for the given metric - - *MetricValueStatus holds the current value for a metric* + + *MetricValueStatus holds the current value for a metric* - - **currentMetrics.object.current.averageUtilization** (int32) + - **currentMetrics.object.current.averageUtilization** (int32) - currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. + currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. - - **currentMetrics.object.current.averageValue** (}}">Quantity) + - **currentMetrics.object.current.averageValue** (}}">Quantity) - averageValue is the current value of the average of the metric across all relevant pods (as a quantity) + averageValue is the current value of the average of the metric across all relevant pods (as a quantity) - - **currentMetrics.object.current.value** (}}">Quantity) + - **currentMetrics.object.current.value** (}}">Quantity) - value is the current value of the metric (as a quantity). + value is the current value of the metric (as a quantity). - - **currentMetrics.object.describedObject** (CrossVersionObjectReference), required + - **currentMetrics.object.describedObject** (CrossVersionObjectReference), required - - *CrossVersionObjectReference contains enough information to let you identify the referred resource.* + + *CrossVersionObjectReference contains enough information to let you identify the referred resource.* - - **currentMetrics.object.describedObject.kind** (string), required + - **currentMetrics.object.describedObject.kind** (string), required - Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" - - **currentMetrics.object.describedObject.name** (string), required + - **currentMetrics.object.describedObject.name** (string), required - Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - - **currentMetrics.object.describedObject.apiVersion** (string) + - **currentMetrics.object.describedObject.apiVersion** (string) - API version of the referent + API version of the referent - - **currentMetrics.object.metric** (MetricIdentifier), required + - **currentMetrics.object.metric** (MetricIdentifier), required - metric identifies the target metric by name and selector + metric identifies the target metric by name and selector - - *MetricIdentifier defines the name and optionally selector for a metric* + + *MetricIdentifier defines the name and optionally selector for a metric* - - **currentMetrics.object.metric.name** (string), required + - **currentMetrics.object.metric.name** (string), required - name is the name of the given metric + name is the name of the given metric - - **currentMetrics.object.metric.selector** (}}">LabelSelector) + - **currentMetrics.object.metric.selector** (}}">LabelSelector) - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - **currentMetrics.pods** (PodsMetricStatus) @@ -589,39 +601,39 @@ HorizontalPodAutoscalerStatus describes the current status of a horizontal pod a *PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).* - - **currentMetrics.pods.current** (MetricValueStatus), required + - **currentMetrics.pods.current** (MetricValueStatus), required - current contains the current value for the given metric + current contains the current value for the given metric - - *MetricValueStatus holds the current value for a metric* + + *MetricValueStatus holds the current value for a metric* - - **currentMetrics.pods.current.averageUtilization** (int32) + - **currentMetrics.pods.current.averageUtilization** (int32) - currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. + currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. - - **currentMetrics.pods.current.averageValue** (}}">Quantity) + - **currentMetrics.pods.current.averageValue** (}}">Quantity) - averageValue is the current value of the average of the metric across all relevant pods (as a quantity) + averageValue is the current value of the average of the metric across all relevant pods (as a quantity) - - **currentMetrics.pods.current.value** (}}">Quantity) + - **currentMetrics.pods.current.value** (}}">Quantity) - value is the current value of the metric (as a quantity). + value is the current value of the metric (as a quantity). - - **currentMetrics.pods.metric** (MetricIdentifier), required + - **currentMetrics.pods.metric** (MetricIdentifier), required - metric identifies the target metric by name and selector + metric identifies the target metric by name and selector - - *MetricIdentifier defines the name and optionally selector for a metric* + + *MetricIdentifier defines the name and optionally selector for a metric* - - **currentMetrics.pods.metric.name** (string), required + - **currentMetrics.pods.metric.name** (string), required - name is the name of the given metric + name is the name of the given metric - - **currentMetrics.pods.metric.selector** (}}">LabelSelector) + - **currentMetrics.pods.metric.selector** (}}">LabelSelector) - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - **currentMetrics.resource** (ResourceMetricStatus) @@ -630,28 +642,28 @@ HorizontalPodAutoscalerStatus describes the current status of a horizontal pod a *ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.* - - **currentMetrics.resource.current** (MetricValueStatus), required + - **currentMetrics.resource.current** (MetricValueStatus), required - current contains the current value for the given metric + current contains the current value for the given metric - - *MetricValueStatus holds the current value for a metric* + + *MetricValueStatus holds the current value for a metric* - - **currentMetrics.resource.current.averageUtilization** (int32) + - **currentMetrics.resource.current.averageUtilization** (int32) - currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. + currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. - - **currentMetrics.resource.current.averageValue** (}}">Quantity) + - **currentMetrics.resource.current.averageValue** (}}">Quantity) - averageValue is the current value of the average of the metric across all relevant pods (as a quantity) + averageValue is the current value of the average of the metric across all relevant pods (as a quantity) - - **currentMetrics.resource.current.value** (}}">Quantity) + - **currentMetrics.resource.current.value** (}}">Quantity) - value is the current value of the metric (as a quantity). + value is the current value of the metric (as a quantity). - - **currentMetrics.resource.name** (string), required + - **currentMetrics.resource.name** (string), required - Name is the name of the resource in question. + Name is the name of the resource in question. - **lastScaleTime** (Time) @@ -684,7 +696,7 @@ HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. metadata is the standard list metadata. -- **items** ([]}}">HorizontalPodAutoscaler), required +- **items** ([]}}">HorizontalPodAutoscaler), required items is the list of horizontal pod autoscaler objects. @@ -731,7 +743,7 @@ GET /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{n #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized @@ -764,7 +776,7 @@ GET /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{n #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized @@ -837,7 +849,7 @@ GET /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers #### Response -200 (}}">HorizontalPodAutoscalerList): OK +200 (}}">HorizontalPodAutoscalerList): OK 401: Unauthorized @@ -905,7 +917,7 @@ GET /apis/autoscaling/v2beta2/horizontalpodautoscalers #### Response -200 (}}">HorizontalPodAutoscalerList): OK +200 (}}">HorizontalPodAutoscalerList): OK 401: Unauthorized @@ -924,7 +936,7 @@ POST /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers }}">namespace -- **body**: }}">HorizontalPodAutoscaler, required +- **body**: }}">HorizontalPodAutoscaler, required @@ -948,11 +960,11 @@ POST /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK -201 (}}">HorizontalPodAutoscaler): Created +201 (}}">HorizontalPodAutoscaler): Created -202 (}}">HorizontalPodAutoscaler): Accepted +202 (}}">HorizontalPodAutoscaler): Accepted 401: Unauthorized @@ -976,7 +988,7 @@ PUT /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{n }}">namespace -- **body**: }}">HorizontalPodAutoscaler, required +- **body**: }}">HorizontalPodAutoscaler, required @@ -1000,9 +1012,9 @@ PUT /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{n #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK -201 (}}">HorizontalPodAutoscaler): Created +201 (}}">HorizontalPodAutoscaler): Created 401: Unauthorized @@ -1026,7 +1038,7 @@ PUT /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{n }}">namespace -- **body**: }}">HorizontalPodAutoscaler, required +- **body**: }}">HorizontalPodAutoscaler, required @@ -1050,9 +1062,9 @@ PUT /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{n #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK -201 (}}">HorizontalPodAutoscaler): Created +201 (}}">HorizontalPodAutoscaler): Created 401: Unauthorized @@ -1105,7 +1117,7 @@ PATCH /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/ #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized @@ -1158,7 +1170,7 @@ PATCH /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/ #### Response -200 (}}">HorizontalPodAutoscaler): OK +200 (}}">HorizontalPodAutoscaler): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/job-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md similarity index 77% rename from content/en/docs/reference/kubernetes-api/workloads-resources/job-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md index 3deab1c4be626..4848a36d4d67d 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/job-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "Job represents the configuration of a single job." title: "Job" -weight: 11 +weight: 10 +auto_generated: true --- + + `apiVersion: batch/v1` `import "k8s.io/api/batch/v1"` @@ -30,11 +42,11 @@ Job represents the configuration of a single job. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">JobSpec) +- **spec** (}}">JobSpec) Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">JobStatus) +- **status** (}}">JobStatus) Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -53,7 +65,7 @@ JobSpec describes how the job execution will look like. ### Replicas -- **template** (}}">PodTemplateSpec), required +- **template** (}}">PodTemplateSpec), required Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ @@ -68,18 +80,32 @@ JobSpec describes how the job execution will look like. Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ +- **completionMode** (string) + + CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. + + `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. + + `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. + + This field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job. + - **backoffLimit** (int32) Specifies the number of retries before marking this job failed. Defaults to 6 - **activeDeadlineSeconds** (int64) - Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer + Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again. - **ttlSecondsAfterFinished** (int32) ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature. +- **suspend** (boolean) + + Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false. + ### Selector @@ -101,7 +127,7 @@ JobStatus represents the current state of a Job. - **startTime** (Time) - Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. + Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC. *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* @@ -125,11 +151,17 @@ JobStatus represents the current state of a Job. The number of pods which reached phase Succeeded. +- **completedIndexes** (string) + + CompletedIndexes holds the completed indexes when .spec.completionMode = "Indexed" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as "1,3-5,7". + - **conditions** ([]JobCondition) *Patch strategy: merge on key `type`* - The latest available observations of an object's current state. When a job fails, one of the conditions will have type == "Failed". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + *Atomic: will be replaced during a merge* + + The latest available observations of an object's current state. When a Job fails, one of the conditions will have type "Failed" and status true. When a Job is suspended, one of the conditions will have type "Suspended" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type "Complete" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ *JobCondition describes current state of a job.* @@ -184,7 +216,7 @@ JobList is a collection of jobs. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">Job), required +- **items** ([]}}">Job), required items is the list of Jobs. @@ -231,7 +263,7 @@ GET /apis/batch/v1/namespaces/{namespace}/jobs/{name} #### Response -200 (}}">Job): OK +200 (}}">Job): OK 401: Unauthorized @@ -264,7 +296,7 @@ GET /apis/batch/v1/namespaces/{namespace}/jobs/{name}/status #### Response -200 (}}">Job): OK +200 (}}">Job): OK 401: Unauthorized @@ -337,7 +369,7 @@ GET /apis/batch/v1/namespaces/{namespace}/jobs #### Response -200 (}}">JobList): OK +200 (}}">JobList): OK 401: Unauthorized @@ -405,7 +437,7 @@ GET /apis/batch/v1/jobs #### Response -200 (}}">JobList): OK +200 (}}">JobList): OK 401: Unauthorized @@ -424,7 +456,7 @@ POST /apis/batch/v1/namespaces/{namespace}/jobs }}">namespace -- **body**: }}">Job, required +- **body**: }}">Job, required @@ -448,11 +480,11 @@ POST /apis/batch/v1/namespaces/{namespace}/jobs #### Response -200 (}}">Job): OK +200 (}}">Job): OK -201 (}}">Job): Created +201 (}}">Job): Created -202 (}}">Job): Accepted +202 (}}">Job): Accepted 401: Unauthorized @@ -476,7 +508,7 @@ PUT /apis/batch/v1/namespaces/{namespace}/jobs/{name} }}">namespace -- **body**: }}">Job, required +- **body**: }}">Job, required @@ -500,9 +532,9 @@ PUT /apis/batch/v1/namespaces/{namespace}/jobs/{name} #### Response -200 (}}">Job): OK +200 (}}">Job): OK -201 (}}">Job): Created +201 (}}">Job): Created 401: Unauthorized @@ -526,7 +558,7 @@ PUT /apis/batch/v1/namespaces/{namespace}/jobs/{name}/status }}">namespace -- **body**: }}">Job, required +- **body**: }}">Job, required @@ -550,9 +582,9 @@ PUT /apis/batch/v1/namespaces/{namespace}/jobs/{name}/status #### Response -200 (}}">Job): OK +200 (}}">Job): OK -201 (}}">Job): Created +201 (}}">Job): Created 401: Unauthorized @@ -605,7 +637,7 @@ PATCH /apis/batch/v1/namespaces/{namespace}/jobs/{name} #### Response -200 (}}">Job): OK +200 (}}">Job): OK 401: Unauthorized @@ -658,7 +690,7 @@ PATCH /apis/batch/v1/namespaces/{namespace}/jobs/{name}/status #### Response -200 (}}">Job): OK +200 (}}">Job): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/pod-template-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md similarity index 82% rename from content/en/docs/reference/kubernetes-api/workloads-resources/pod-template-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md index fec34df19f5b8..9a0bbecab1468 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/pod-template-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "PodTemplate describes a template for creating copies of a predefined pod." title: "PodTemplate" -weight: 4 +weight: 3 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -30,7 +42,7 @@ PodTemplate describes a template for creating copies of a predefined pod. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **template** (}}">PodTemplateSpec) +- **template** (}}">PodTemplateSpec) Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -48,7 +60,7 @@ PodTemplateSpec describes the data a pod should have when created from a templat Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">PodSpec) +- **spec** (}}">PodSpec) Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -72,7 +84,7 @@ PodTemplateList is a list of PodTemplates. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">PodTemplate), required +- **items** ([]}}">PodTemplate), required List of pod templates @@ -119,7 +131,7 @@ GET /api/v1/namespaces/{namespace}/podtemplates/{name} #### Response -200 (}}">PodTemplate): OK +200 (}}">PodTemplate): OK 401: Unauthorized @@ -192,7 +204,7 @@ GET /api/v1/namespaces/{namespace}/podtemplates #### Response -200 (}}">PodTemplateList): OK +200 (}}">PodTemplateList): OK 401: Unauthorized @@ -260,7 +272,7 @@ GET /api/v1/podtemplates #### Response -200 (}}">PodTemplateList): OK +200 (}}">PodTemplateList): OK 401: Unauthorized @@ -279,7 +291,7 @@ POST /api/v1/namespaces/{namespace}/podtemplates }}">namespace -- **body**: }}">PodTemplate, required +- **body**: }}">PodTemplate, required @@ -303,11 +315,11 @@ POST /api/v1/namespaces/{namespace}/podtemplates #### Response -200 (}}">PodTemplate): OK +200 (}}">PodTemplate): OK -201 (}}">PodTemplate): Created +201 (}}">PodTemplate): Created -202 (}}">PodTemplate): Accepted +202 (}}">PodTemplate): Accepted 401: Unauthorized @@ -331,7 +343,7 @@ PUT /api/v1/namespaces/{namespace}/podtemplates/{name} }}">namespace -- **body**: }}">PodTemplate, required +- **body**: }}">PodTemplate, required @@ -355,9 +367,9 @@ PUT /api/v1/namespaces/{namespace}/podtemplates/{name} #### Response -200 (}}">PodTemplate): OK +200 (}}">PodTemplate): OK -201 (}}">PodTemplate): Created +201 (}}">PodTemplate): Created 401: Unauthorized @@ -410,7 +422,7 @@ PATCH /api/v1/namespaces/{namespace}/podtemplates/{name} #### Response -200 (}}">PodTemplate): OK +200 (}}">PodTemplate): OK 401: Unauthorized @@ -463,9 +475,9 @@ DELETE /api/v1/namespaces/{namespace}/podtemplates/{name} #### Response -200 (}}">PodTemplate): OK +200 (}}">PodTemplate): OK -202 (}}">PodTemplate): Accepted +202 (}}">PodTemplate): Accepted 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md new file mode 100644 index 0000000000000..d16d9b0a8505e --- /dev/null +++ b/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md @@ -0,0 +1,2643 @@ +--- +api_metadata: + apiVersion: "v1" + import: "k8s.io/api/core/v1" + kind: "Pod" +content_type: "api_reference" +description: "Pod is a collection of containers that can run on a host." +title: "Pod" +weight: 1 +auto_generated: true +--- + + + +`apiVersion: v1` + +`import "k8s.io/api/core/v1"` + + +## Pod {#Pod} + +Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts. + +
+ +- **apiVersion**: v1 + + +- **kind**: Pod + + +- **metadata** (}}">ObjectMeta) + + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + +- **spec** (}}">PodSpec) + + Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + +- **status** (}}">PodStatus) + + Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + + + + + +## PodSpec {#PodSpec} + +PodSpec is a description of a pod. + +
+ + + +### Containers + + +- **containers** ([]}}">Container), required + + *Patch strategy: merge on key `name`* + + List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + +- **initContainers** ([]}}">Container) + + *Patch strategy: merge on key `name`* + + List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + +- **imagePullSecrets** ([]}}">LocalObjectReference) + + *Patch strategy: merge on key `name`* + + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + +- **enableServiceLinks** (boolean) + + EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. + +### Volumes + + +- **volumes** ([]}}">Volume) + + *Patch strategies: retainKeys, merge on key `name`* + + List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes + +### Scheduling + + +- **nodeSelector** (map[string]string) + + NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + +- **nodeName** (string) + + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + +- **affinity** (Affinity) + + If specified, the pod's scheduling constraints + + + *Affinity is a group of affinity scheduling rules.* + + - **affinity.nodeAffinity** (}}">NodeAffinity) + + Describes node affinity scheduling rules for the pod. + + - **affinity.podAffinity** (}}">PodAffinity) + + Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + + - **affinity.podAntiAffinity** (}}">PodAntiAffinity) + + Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + +- **tolerations** ([]Toleration) + + If specified, the pod's tolerations. + + + *The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .* + + - **tolerations.key** (string) + + Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + + - **tolerations.operator** (string) + + Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + + - **tolerations.value** (string) + + Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + + - **tolerations.effect** (string) + + Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + + - **tolerations.tolerationSeconds** (int64) + + TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + +- **schedulerName** (string) + + If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + +- **runtimeClassName** (string) + + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. + +- **priorityClassName** (string) + + If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + +- **priority** (int32) + + The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + +- **topologySpreadConstraints** ([]TopologySpreadConstraint) + + *Patch strategy: merge on key `topologyKey`* + + *Map: unique values on keys `topologyKey, whenUnsatisfiable` will be kept during a merge* + + TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + + + *TopologySpreadConstraint specifies how to spread matching pods among the given topology.* + + - **topologySpreadConstraints.maxSkew** (int32), required + + MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. + + - **topologySpreadConstraints.topologyKey** (string), required + + TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \ as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + + - **topologySpreadConstraints.whenUnsatisfiable** (string), required + + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. + + - **topologySpreadConstraints.labelSelector** (}}">LabelSelector) + + LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + +### Lifecycle + + +- **restartPolicy** (string) + + Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + +- **terminationGracePeriodSeconds** (int64) + + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + +- **activeDeadlineSeconds** (int64) + + Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + +- **readinessGates** ([]PodReadinessGate) + + If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + + + *PodReadinessGate contains the reference to a pod condition* + + - **readinessGates.conditionType** (string), required + + ConditionType refers to a condition in the pod's condition list with matching type. + +### Hostname and Name resolution + + +- **hostname** (string) + + Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + +- **setHostnameAsFQDN** (boolean) + + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + +- **subdomain** (string) + + If specified, the fully qualified Pod hostname will be "\.\.\.svc.\". If not specified, the pod will not have a domainname at all. + +- **hostAliases** ([]HostAlias) + + *Patch strategy: merge on key `ip`* + + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + + + *HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.* + + - **hostAliases.hostnames** ([]string) + + Hostnames for the above IP address. + + - **hostAliases.ip** (string) + + IP address of the host file entry. + +- **dnsConfig** (PodDNSConfig) + + Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + + + *PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.* + + - **dnsConfig.nameservers** ([]string) + + A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + + - **dnsConfig.options** ([]PodDNSConfigOption) + + A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + + + *PodDNSConfigOption defines DNS resolver options of a pod.* + + - **dnsConfig.options.name** (string) + + Required. + + - **dnsConfig.options.value** (string) + + + - **dnsConfig.searches** ([]string) + + A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + +- **dnsPolicy** (string) + + Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + +### Hosts namespaces + + +- **hostNetwork** (boolean) + + Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + +- **hostPID** (boolean) + + Use the host's pid namespace. Optional: Default to false. + +- **hostIPC** (boolean) + + Use the host's ipc namespace. Optional: Default to false. + +- **shareProcessNamespace** (boolean) + + Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. + +### Service account + + +- **serviceAccountName** (string) + + ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + +- **automountServiceAccountToken** (boolean) + + AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + +### Security context + + +- **securityContext** (PodSecurityContext) + + SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. + + + *PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.* + + - **securityContext.runAsUser** (int64) + + The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + + - **securityContext.runAsNonRoot** (boolean) + + Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + - **securityContext.runAsGroup** (int64) + + The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + + - **securityContext.supplementalGroups** ([]int64) + + A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. + + - **securityContext.fsGroup** (int64) + + A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: + + 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + + - **securityContext.fsGroupChangePolicy** (string) + + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + + - **securityContext.seccompProfile** (SeccompProfile) + + The seccomp options to use by the containers in this pod. + + + *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* + + - **securityContext.seccompProfile.type** (string), required + + type indicates which kind of seccomp profile will be applied. Valid options are: + + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + + - **securityContext.seccompProfile.localhostProfile** (string) + + localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + + - **securityContext.seLinuxOptions** (SELinuxOptions) + + The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + + + *SELinuxOptions are the labels to be applied to the container* + + - **securityContext.seLinuxOptions.level** (string) + + Level is SELinux level label that applies to the container. + + - **securityContext.seLinuxOptions.role** (string) + + Role is a SELinux role label that applies to the container. + + - **securityContext.seLinuxOptions.type** (string) + + Type is a SELinux type label that applies to the container. + + - **securityContext.seLinuxOptions.user** (string) + + User is a SELinux user label that applies to the container. + + - **securityContext.sysctls** ([]Sysctl) + + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. + + + *Sysctl defines a kernel parameter to be set* + + - **securityContext.sysctls.name** (string), required + + Name of a property to set + + - **securityContext.sysctls.value** (string), required + + Value of a property to set + + - **securityContext.windowsOptions** (WindowsSecurityContextOptions) + + The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + + *WindowsSecurityContextOptions contain Windows-specific options and credentials.* + + - **securityContext.windowsOptions.gmsaCredentialSpec** (string) + + GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + + - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) + + GMSACredentialSpecName is the name of the GMSA credential spec to use. + + - **securityContext.windowsOptions.runAsUserName** (string) + + The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + +### Beta level + + +- **preemptionPolicy** (string) + + PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate. + +- **overhead** (map[string]}}">Quantity) + + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + +### Alpha level + + +- **ephemeralContainers** ([]}}">EphemeralContainer) + + *Patch strategy: merge on key `name`* + + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + +### Deprecated + + +- **serviceAccount** (string) + + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. + + + +## Container {#Container} + +A single application container that you want to run within a pod. + +
+ +- **name** (string), required + + Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + + + +### Image + + +- **image** (string) + + Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + +- **imagePullPolicy** (string) + + Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + +### Entrypoint + + +- **command** ([]string) + + Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + +- **args** ([]string) + + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + +- **workingDir** (string) + + Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + +### Ports + + +- **ports** ([]ContainerPort) + + *Patch strategy: merge on key `containerPort`* + + *Map: unique values on keys `containerPort, protocol` will be kept during a merge* + + List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + + + *ContainerPort represents a network port in a single container.* + + - **ports.containerPort** (int32), required + + Number of port to expose on the pod's IP address. This must be a valid port number, 0 \< x \< 65536. + + - **ports.hostIP** (string) + + What host IP to bind the external port to. + + - **ports.hostPort** (int32) + + Number of port to expose on the host. If specified, this must be a valid port number, 0 \< x \< 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + + - **ports.name** (string) + + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + + - **ports.protocol** (string) + + Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + +### Environment variables + + +- **env** ([]EnvVar) + + *Patch strategy: merge on key `name`* + + List of environment variables to set in the container. Cannot be updated. + + + *EnvVar represents an environment variable present in a Container.* + + - **env.name** (string), required + + Name of the environment variable. Must be a C_IDENTIFIER. + + - **env.value** (string) + + Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + + - **env.valueFrom** (EnvVarSource) + + Source for the environment variable's value. Cannot be used if value is not empty. + + + *EnvVarSource represents a source for the value of an EnvVar.* + + - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) + + Selects a key of a ConfigMap. + + + *Selects a key from a ConfigMap.* + + - **env.valueFrom.configMapKeyRef.key** (string), required + + The key to select. + + - **env.valueFrom.configMapKeyRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **env.valueFrom.configMapKeyRef.optional** (boolean) + + Specify whether the ConfigMap or its key must be defined + + - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) + + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + + - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) + + Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + + - **env.valueFrom.secretKeyRef** (SecretKeySelector) + + Selects a key of a secret in the pod's namespace + + + *SecretKeySelector selects a key of a Secret.* + + - **env.valueFrom.secretKeyRef.key** (string), required + + The key of the secret to select from. Must be a valid secret key. + + - **env.valueFrom.secretKeyRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **env.valueFrom.secretKeyRef.optional** (boolean) + + Specify whether the Secret or its key must be defined + +- **envFrom** ([]EnvFromSource) + + List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + + + *EnvFromSource represents the source of a set of ConfigMaps* + + - **envFrom.configMapRef** (ConfigMapEnvSource) + + The ConfigMap to select from + + + *ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. + + The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.* + + - **envFrom.configMapRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **envFrom.configMapRef.optional** (boolean) + + Specify whether the ConfigMap must be defined + + - **envFrom.prefix** (string) + + An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + + - **envFrom.secretRef** (SecretEnvSource) + + The Secret to select from + + + *SecretEnvSource selects a Secret to populate the environment variables with. + + The contents of the target Secret's Data field will represent the key-value pairs as environment variables.* + + - **envFrom.secretRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **envFrom.secretRef.optional** (boolean) + + Specify whether the Secret must be defined + +### Volumes + + +- **volumeMounts** ([]VolumeMount) + + *Patch strategy: merge on key `mountPath`* + + Pod volumes to mount into the container's filesystem. Cannot be updated. + + + *VolumeMount describes a mounting of a Volume within a container.* + + - **volumeMounts.mountPath** (string), required + + Path within the container at which the volume should be mounted. Must not contain ':'. + + - **volumeMounts.name** (string), required + + This must match the Name of a Volume. + + - **volumeMounts.mountPropagation** (string) + + mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + + - **volumeMounts.readOnly** (boolean) + + Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + + - **volumeMounts.subPath** (string) + + Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + + - **volumeMounts.subPathExpr** (string) + + Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + +- **volumeDevices** ([]VolumeDevice) + + *Patch strategy: merge on key `devicePath`* + + volumeDevices is the list of block devices to be used by the container. + + + *volumeDevice describes a mapping of a raw block device within a container.* + + - **volumeDevices.devicePath** (string), required + + devicePath is the path inside of the container that the device will be mapped to. + + - **volumeDevices.name** (string), required + + name must match the name of a persistentVolumeClaim in the pod + +### Resources + + +- **resources** (ResourceRequirements) + + Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + *ResourceRequirements describes the compute resource requirements.* + + - **resources.limits** (map[string]}}">Quantity) + + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + - **resources.requests** (map[string]}}">Quantity) + + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + +### Lifecycle + + +- **lifecycle** (Lifecycle) + + Actions that the management system should take in response to container lifecycle events. Cannot be updated. + + + *Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.* + + - **lifecycle.postStart** (}}">Handler) + + PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + + - **lifecycle.preStop** (}}">Handler) + + PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + +- **terminationMessagePath** (string) + + Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + +- **terminationMessagePolicy** (string) + + Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + +- **livenessProbe** (}}">Probe) + + Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +- **readinessProbe** (}}">Probe) + + Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +- **startupProbe** (}}">Probe) + + StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +### Security Context + + +- **securityContext** (SecurityContext) + + Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + + + *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* + + - **securityContext.runAsUser** (int64) + + The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + - **securityContext.runAsNonRoot** (boolean) + + Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + - **securityContext.runAsGroup** (int64) + + The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + - **securityContext.readOnlyRootFilesystem** (boolean) + + Whether this container has a read-only root filesystem. Default is false. + + - **securityContext.procMount** (string) + + procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + + - **securityContext.privileged** (boolean) + + Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + + - **securityContext.allowPrivilegeEscalation** (boolean) + + AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + + - **securityContext.capabilities** (Capabilities) + + The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + + + *Adds and removes POSIX capabilities from running containers.* + + - **securityContext.capabilities.add** ([]string) + + Added capabilities + + - **securityContext.capabilities.drop** ([]string) + + Removed capabilities + + - **securityContext.seccompProfile** (SeccompProfile) + + The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. + + + *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* + + - **securityContext.seccompProfile.type** (string), required + + type indicates which kind of seccomp profile will be applied. Valid options are: + + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + + - **securityContext.seccompProfile.localhostProfile** (string) + + localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + + - **securityContext.seLinuxOptions** (SELinuxOptions) + + The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + + *SELinuxOptions are the labels to be applied to the container* + + - **securityContext.seLinuxOptions.level** (string) + + Level is SELinux level label that applies to the container. + + - **securityContext.seLinuxOptions.role** (string) + + Role is a SELinux role label that applies to the container. + + - **securityContext.seLinuxOptions.type** (string) + + Type is a SELinux type label that applies to the container. + + - **securityContext.seLinuxOptions.user** (string) + + User is a SELinux user label that applies to the container. + + - **securityContext.windowsOptions** (WindowsSecurityContextOptions) + + The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + + *WindowsSecurityContextOptions contain Windows-specific options and credentials.* + + - **securityContext.windowsOptions.gmsaCredentialSpec** (string) + + GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + + - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) + + GMSACredentialSpecName is the name of the GMSA credential spec to use. + + - **securityContext.windowsOptions.runAsUserName** (string) + + The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + +### Debugging + + +- **stdin** (boolean) + + Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + +- **stdinOnce** (boolean) + + Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + +- **tty** (boolean) + + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + + + +## Handler {#Handler} + +Handler defines a specific action that should be taken + +
+ +- **exec** (ExecAction) + + One and only one of the following should be specified. Exec specifies the action to take. + + + *ExecAction describes a "run in container" action.* + + - **exec.command** ([]string) + + Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + +- **httpGet** (HTTPGetAction) + + HTTPGet specifies the http request to perform. + + + *HTTPGetAction describes an action based on HTTP Get requests.* + + - **httpGet.port** (IntOrString), required + + Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + - **httpGet.host** (string) + + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + + - **httpGet.httpHeaders** ([]HTTPHeader) + + Custom headers to set in the request. HTTP allows repeated headers. + + + *HTTPHeader describes a custom header to be used in HTTP probes* + + - **httpGet.httpHeaders.name** (string), required + + The header field name + + - **httpGet.httpHeaders.value** (string), required + + The header field value + + - **httpGet.path** (string) + + Path to access on the HTTP server. + + - **httpGet.scheme** (string) + + Scheme to use for connecting to the host. Defaults to HTTP. + +- **tcpSocket** (TCPSocketAction) + + TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported + + + *TCPSocketAction describes an action based on opening a socket* + + - **tcpSocket.port** (IntOrString), required + + Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + - **tcpSocket.host** (string) + + Optional: Host name to connect to, defaults to the pod IP. + + + + + +## NodeAffinity {#NodeAffinity} + +Node affinity is a group of node affinity scheduling rules. + +
+ +- **preferredDuringSchedulingIgnoredDuringExecution** ([]PreferredSchedulingTerm) + + The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + + + *An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).* + + - **preferredDuringSchedulingIgnoredDuringExecution.preference** (NodeSelectorTerm), required + + A node selector term, associated with the corresponding weight. + + + *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* + + - **preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions** ([]}}">NodeSelectorRequirement) + + A list of node selector requirements by node's labels. + + - **preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields** ([]}}">NodeSelectorRequirement) + + A list of node selector requirements by node's fields. + + - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required + + Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + +- **requiredDuringSchedulingIgnoredDuringExecution** (NodeSelector) + + If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + + + *A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.* + + - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms** ([]NodeSelectorTerm), required + + Required. A list of node selector terms. The terms are ORed. + + + *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* + + - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions** ([]}}">NodeSelectorRequirement) + + A list of node selector requirements by node's labels. + + - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields** ([]}}">NodeSelectorRequirement) + + A list of node selector requirements by node's fields. + + + + + +## PodAffinity {#PodAffinity} + +Pod affinity is a group of inter pod affinity scheduling rules. + +
+ +- **preferredDuringSchedulingIgnoredDuringExecution** ([]WeightedPodAffinityTerm) + + The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + + + *The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)* + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm** (PodAffinityTerm), required + + Required. A pod affinity term, associated with the corresponding weight. + + + *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey** (string), required + + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector** (}}">LabelSelector) + + A label query over a set of resources, in this case pods. + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector** (}}">LabelSelector) + + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) + + namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + + - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required + + weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + +- **requiredDuringSchedulingIgnoredDuringExecution** ([]PodAffinityTerm) + + If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + + + *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* + + - **requiredDuringSchedulingIgnoredDuringExecution.topologyKey** (string), required + + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + + - **requiredDuringSchedulingIgnoredDuringExecution.labelSelector** (}}">LabelSelector) + + A label query over a set of resources, in this case pods. + + - **requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector** (}}">LabelSelector) + + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + + - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) + + namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + + + + + +## PodAntiAffinity {#PodAntiAffinity} + +Pod anti affinity is a group of inter pod anti affinity scheduling rules. + +
+ +- **preferredDuringSchedulingIgnoredDuringExecution** ([]WeightedPodAffinityTerm) + + The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + + + *The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)* + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm** (PodAffinityTerm), required + + Required. A pod affinity term, associated with the corresponding weight. + + + *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey** (string), required + + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector** (}}">LabelSelector) + + A label query over a set of resources, in this case pods. + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector** (}}">LabelSelector) + + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + + - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) + + namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + + - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required + + weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + +- **requiredDuringSchedulingIgnoredDuringExecution** ([]PodAffinityTerm) + + If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + + + *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* + + - **requiredDuringSchedulingIgnoredDuringExecution.topologyKey** (string), required + + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + + - **requiredDuringSchedulingIgnoredDuringExecution.labelSelector** (}}">LabelSelector) + + A label query over a set of resources, in this case pods. + + - **requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector** (}}">LabelSelector) + + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + + - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) + + namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + + + + + +## Probe {#Probe} + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + +
+ +- **exec** (ExecAction) + + One and only one of the following should be specified. Exec specifies the action to take. + + + *ExecAction describes a "run in container" action.* + + - **exec.command** ([]string) + + Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + +- **httpGet** (HTTPGetAction) + + HTTPGet specifies the http request to perform. + + + *HTTPGetAction describes an action based on HTTP Get requests.* + + - **httpGet.port** (IntOrString), required + + Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + - **httpGet.host** (string) + + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + + - **httpGet.httpHeaders** ([]HTTPHeader) + + Custom headers to set in the request. HTTP allows repeated headers. + + + *HTTPHeader describes a custom header to be used in HTTP probes* + + - **httpGet.httpHeaders.name** (string), required + + The header field name + + - **httpGet.httpHeaders.value** (string), required + + The header field value + + - **httpGet.path** (string) + + Path to access on the HTTP server. + + - **httpGet.scheme** (string) + + Scheme to use for connecting to the host. Defaults to HTTP. + +- **tcpSocket** (TCPSocketAction) + + TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported + + + *TCPSocketAction describes an action based on opening a socket* + + - **tcpSocket.port** (IntOrString), required + + Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + + + *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* + + - **tcpSocket.host** (string) + + Optional: Host name to connect to, defaults to the pod IP. + +- **initialDelaySeconds** (int32) + + Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +- **terminationGracePeriodSeconds** (int64) + + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + +- **periodSeconds** (int32) + + How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + +- **timeoutSeconds** (int32) + + Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +- **failureThreshold** (int32) + + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + +- **successThreshold** (int32) + + Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + + + + + +## PodStatus {#PodStatus} + +PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane. + +
+ +- **nominatedNodeName** (string) + + nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled. + +- **hostIP** (string) + + IP address of the host to which the pod is assigned. Empty if not yet scheduled. + +- **startTime** (Time) + + RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod. + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + +- **phase** (string) + + The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values: + + Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. + + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase + +- **message** (string) + + A human readable message indicating details about why the pod is in this condition. + +- **reason** (string) + + A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' + +- **podIP** (string) + + IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated. + +- **podIPs** ([]PodIP) + + *Patch strategy: merge on key `ip`* + + podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet. + + + *IP address information for entries in the (plural) PodIPs field. Each entry includes: + IP: An IP address allocated to the pod. Routable at least within the cluster.* + + - **podIPs.ip** (string) + + ip is an IP address (IPv4 or IPv6) assigned to the pod + +- **conditions** ([]PodCondition) + + *Patch strategy: merge on key `type`* + + Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + + + *PodCondition contains details for the current condition of this pod.* + + - **conditions.status** (string), required + + Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + + - **conditions.type** (string), required + + Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + + - **conditions.lastProbeTime** (Time) + + Last time we probed the condition. + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **conditions.lastTransitionTime** (Time) + + Last time the condition transitioned from one status to another. + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **conditions.message** (string) + + Human-readable message indicating details about last transition. + + - **conditions.reason** (string) + + Unique, one-word, CamelCase reason for the condition's last transition. + +- **qosClass** (string) + + The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + +- **initContainerStatuses** ([]ContainerStatus) + + The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + + + *ContainerStatus contains details for the current status of this container.* + + - **initContainerStatuses.name** (string), required + + This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated. + + - **initContainerStatuses.image** (string), required + + The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images + + - **initContainerStatuses.imageID** (string), required + + ImageID of the container's image. + + - **initContainerStatuses.containerID** (string) + + Container's ID in the format 'docker://\'. + + - **initContainerStatuses.state** (ContainerState) + + Details about the container's current condition. + + + *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* + + - **initContainerStatuses.state.running** (ContainerStateRunning) + + Details about a running container + + + *ContainerStateRunning is a running state of a container.* + + - **initContainerStatuses.state.running.startedAt** (Time) + + Time at which the container was last (re-)started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **initContainerStatuses.state.terminated** (ContainerStateTerminated) + + Details about a terminated container + + + *ContainerStateTerminated is a terminated state of a container.* + + - **initContainerStatuses.state.terminated.containerID** (string) + + Container's ID in the format 'docker://\' + + - **initContainerStatuses.state.terminated.exitCode** (int32), required + + Exit status from the last termination of the container + + - **initContainerStatuses.state.terminated.startedAt** (Time) + + Time at which previous execution of the container started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **initContainerStatuses.state.terminated.finishedAt** (Time) + + Time at which the container last terminated + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **initContainerStatuses.state.terminated.message** (string) + + Message regarding the last termination of the container + + - **initContainerStatuses.state.terminated.reason** (string) + + (brief) reason from the last termination of the container + + - **initContainerStatuses.state.terminated.signal** (int32) + + Signal from the last termination of the container + + - **initContainerStatuses.state.waiting** (ContainerStateWaiting) + + Details about a waiting container + + + *ContainerStateWaiting is a waiting state of a container.* + + - **initContainerStatuses.state.waiting.message** (string) + + Message regarding why the container is not yet running. + + - **initContainerStatuses.state.waiting.reason** (string) + + (brief) reason the container is not yet running. + + - **initContainerStatuses.lastState** (ContainerState) + + Details about the container's last termination condition. + + + *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* + + - **initContainerStatuses.lastState.running** (ContainerStateRunning) + + Details about a running container + + + *ContainerStateRunning is a running state of a container.* + + - **initContainerStatuses.lastState.running.startedAt** (Time) + + Time at which the container was last (re-)started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **initContainerStatuses.lastState.terminated** (ContainerStateTerminated) + + Details about a terminated container + + + *ContainerStateTerminated is a terminated state of a container.* + + - **initContainerStatuses.lastState.terminated.containerID** (string) + + Container's ID in the format 'docker://\' + + - **initContainerStatuses.lastState.terminated.exitCode** (int32), required + + Exit status from the last termination of the container + + - **initContainerStatuses.lastState.terminated.startedAt** (Time) + + Time at which previous execution of the container started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **initContainerStatuses.lastState.terminated.finishedAt** (Time) + + Time at which the container last terminated + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **initContainerStatuses.lastState.terminated.message** (string) + + Message regarding the last termination of the container + + - **initContainerStatuses.lastState.terminated.reason** (string) + + (brief) reason from the last termination of the container + + - **initContainerStatuses.lastState.terminated.signal** (int32) + + Signal from the last termination of the container + + - **initContainerStatuses.lastState.waiting** (ContainerStateWaiting) + + Details about a waiting container + + + *ContainerStateWaiting is a waiting state of a container.* + + - **initContainerStatuses.lastState.waiting.message** (string) + + Message regarding why the container is not yet running. + + - **initContainerStatuses.lastState.waiting.reason** (string) + + (brief) reason the container is not yet running. + + - **initContainerStatuses.ready** (boolean), required + + Specifies whether the container has passed its readiness probe. + + - **initContainerStatuses.restartCount** (int32), required + + The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC. + + - **initContainerStatuses.started** (boolean) + + Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined. + +- **containerStatuses** ([]ContainerStatus) + + The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + + + *ContainerStatus contains details for the current status of this container.* + + - **containerStatuses.name** (string), required + + This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated. + + - **containerStatuses.image** (string), required + + The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images + + - **containerStatuses.imageID** (string), required + + ImageID of the container's image. + + - **containerStatuses.containerID** (string) + + Container's ID in the format 'docker://\'. + + - **containerStatuses.state** (ContainerState) + + Details about the container's current condition. + + + *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* + + - **containerStatuses.state.running** (ContainerStateRunning) + + Details about a running container + + + *ContainerStateRunning is a running state of a container.* + + - **containerStatuses.state.running.startedAt** (Time) + + Time at which the container was last (re-)started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **containerStatuses.state.terminated** (ContainerStateTerminated) + + Details about a terminated container + + + *ContainerStateTerminated is a terminated state of a container.* + + - **containerStatuses.state.terminated.containerID** (string) + + Container's ID in the format 'docker://\' + + - **containerStatuses.state.terminated.exitCode** (int32), required + + Exit status from the last termination of the container + + - **containerStatuses.state.terminated.startedAt** (Time) + + Time at which previous execution of the container started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **containerStatuses.state.terminated.finishedAt** (Time) + + Time at which the container last terminated + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **containerStatuses.state.terminated.message** (string) + + Message regarding the last termination of the container + + - **containerStatuses.state.terminated.reason** (string) + + (brief) reason from the last termination of the container + + - **containerStatuses.state.terminated.signal** (int32) + + Signal from the last termination of the container + + - **containerStatuses.state.waiting** (ContainerStateWaiting) + + Details about a waiting container + + + *ContainerStateWaiting is a waiting state of a container.* + + - **containerStatuses.state.waiting.message** (string) + + Message regarding why the container is not yet running. + + - **containerStatuses.state.waiting.reason** (string) + + (brief) reason the container is not yet running. + + - **containerStatuses.lastState** (ContainerState) + + Details about the container's last termination condition. + + + *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* + + - **containerStatuses.lastState.running** (ContainerStateRunning) + + Details about a running container + + + *ContainerStateRunning is a running state of a container.* + + - **containerStatuses.lastState.running.startedAt** (Time) + + Time at which the container was last (re-)started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **containerStatuses.lastState.terminated** (ContainerStateTerminated) + + Details about a terminated container + + + *ContainerStateTerminated is a terminated state of a container.* + + - **containerStatuses.lastState.terminated.containerID** (string) + + Container's ID in the format 'docker://\' + + - **containerStatuses.lastState.terminated.exitCode** (int32), required + + Exit status from the last termination of the container + + - **containerStatuses.lastState.terminated.startedAt** (Time) + + Time at which previous execution of the container started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **containerStatuses.lastState.terminated.finishedAt** (Time) + + Time at which the container last terminated + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **containerStatuses.lastState.terminated.message** (string) + + Message regarding the last termination of the container + + - **containerStatuses.lastState.terminated.reason** (string) + + (brief) reason from the last termination of the container + + - **containerStatuses.lastState.terminated.signal** (int32) + + Signal from the last termination of the container + + - **containerStatuses.lastState.waiting** (ContainerStateWaiting) + + Details about a waiting container + + + *ContainerStateWaiting is a waiting state of a container.* + + - **containerStatuses.lastState.waiting.message** (string) + + Message regarding why the container is not yet running. + + - **containerStatuses.lastState.waiting.reason** (string) + + (brief) reason the container is not yet running. + + - **containerStatuses.ready** (boolean), required + + Specifies whether the container has passed its readiness probe. + + - **containerStatuses.restartCount** (int32), required + + The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC. + + - **containerStatuses.started** (boolean) + + Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined. + +- **ephemeralContainerStatuses** ([]ContainerStatus) + + Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + + + *ContainerStatus contains details for the current status of this container.* + + - **ephemeralContainerStatuses.name** (string), required + + This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated. + + - **ephemeralContainerStatuses.image** (string), required + + The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images + + - **ephemeralContainerStatuses.imageID** (string), required + + ImageID of the container's image. + + - **ephemeralContainerStatuses.containerID** (string) + + Container's ID in the format 'docker://\'. + + - **ephemeralContainerStatuses.state** (ContainerState) + + Details about the container's current condition. + + + *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* + + - **ephemeralContainerStatuses.state.running** (ContainerStateRunning) + + Details about a running container + + + *ContainerStateRunning is a running state of a container.* + + - **ephemeralContainerStatuses.state.running.startedAt** (Time) + + Time at which the container was last (re-)started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **ephemeralContainerStatuses.state.terminated** (ContainerStateTerminated) + + Details about a terminated container + + + *ContainerStateTerminated is a terminated state of a container.* + + - **ephemeralContainerStatuses.state.terminated.containerID** (string) + + Container's ID in the format 'docker://\' + + - **ephemeralContainerStatuses.state.terminated.exitCode** (int32), required + + Exit status from the last termination of the container + + - **ephemeralContainerStatuses.state.terminated.startedAt** (Time) + + Time at which previous execution of the container started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **ephemeralContainerStatuses.state.terminated.finishedAt** (Time) + + Time at which the container last terminated + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **ephemeralContainerStatuses.state.terminated.message** (string) + + Message regarding the last termination of the container + + - **ephemeralContainerStatuses.state.terminated.reason** (string) + + (brief) reason from the last termination of the container + + - **ephemeralContainerStatuses.state.terminated.signal** (int32) + + Signal from the last termination of the container + + - **ephemeralContainerStatuses.state.waiting** (ContainerStateWaiting) + + Details about a waiting container + + + *ContainerStateWaiting is a waiting state of a container.* + + - **ephemeralContainerStatuses.state.waiting.message** (string) + + Message regarding why the container is not yet running. + + - **ephemeralContainerStatuses.state.waiting.reason** (string) + + (brief) reason the container is not yet running. + + - **ephemeralContainerStatuses.lastState** (ContainerState) + + Details about the container's last termination condition. + + + *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* + + - **ephemeralContainerStatuses.lastState.running** (ContainerStateRunning) + + Details about a running container + + + *ContainerStateRunning is a running state of a container.* + + - **ephemeralContainerStatuses.lastState.running.startedAt** (Time) + + Time at which the container was last (re-)started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **ephemeralContainerStatuses.lastState.terminated** (ContainerStateTerminated) + + Details about a terminated container + + + *ContainerStateTerminated is a terminated state of a container.* + + - **ephemeralContainerStatuses.lastState.terminated.containerID** (string) + + Container's ID in the format 'docker://\' + + - **ephemeralContainerStatuses.lastState.terminated.exitCode** (int32), required + + Exit status from the last termination of the container + + - **ephemeralContainerStatuses.lastState.terminated.startedAt** (Time) + + Time at which previous execution of the container started + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **ephemeralContainerStatuses.lastState.terminated.finishedAt** (Time) + + Time at which the container last terminated + + + *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* + + - **ephemeralContainerStatuses.lastState.terminated.message** (string) + + Message regarding the last termination of the container + + - **ephemeralContainerStatuses.lastState.terminated.reason** (string) + + (brief) reason from the last termination of the container + + - **ephemeralContainerStatuses.lastState.terminated.signal** (int32) + + Signal from the last termination of the container + + - **ephemeralContainerStatuses.lastState.waiting** (ContainerStateWaiting) + + Details about a waiting container + + + *ContainerStateWaiting is a waiting state of a container.* + + - **ephemeralContainerStatuses.lastState.waiting.message** (string) + + Message regarding why the container is not yet running. + + - **ephemeralContainerStatuses.lastState.waiting.reason** (string) + + (brief) reason the container is not yet running. + + - **ephemeralContainerStatuses.ready** (boolean), required + + Specifies whether the container has passed its readiness probe. + + - **ephemeralContainerStatuses.restartCount** (int32), required + + The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC. + + - **ephemeralContainerStatuses.started** (boolean) + + Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined. + + + + + +## PodList {#PodList} + +PodList is a list of Pods. + +
+ +- **items** ([]}}">Pod), required + + List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + +- **apiVersion** (string) + + APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + +- **kind** (string) + + Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + +- **metadata** (}}">ListMeta) + + Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + + + + + +## Operations {#Operations} + + + +
+ + + + + + +### `get` read the specified Pod + +#### HTTP Request + +GET /api/v1/namespaces/{namespace}/pods/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +401: Unauthorized + + +### `get` read log of the specified Pod + +#### HTTP Request + +GET /api/v1/namespaces/{namespace}/pods/{name}/log + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **container** (*in query*): string + + The container for which to stream logs. Defaults to only container if there is one container in the pod. + + +- **follow** (*in query*): boolean + + Follow the log stream of the pod. Defaults to false. + + +- **insecureSkipTLSVerifyBackend** (*in query*): boolean + + insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet). + + +- **limitBytes** (*in query*): integer + + If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit. + + +- **pretty** (*in query*): string + + }}">pretty + + +- **previous** (*in query*): boolean + + Return previous terminated container logs. Defaults to false. + + +- **sinceSeconds** (*in query*): integer + + A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. + + +- **tailLines** (*in query*): integer + + If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime + + +- **timestamps** (*in query*): boolean + + If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. + + + +#### Response + + +200 (string): OK + +401: Unauthorized + + +### `get` read status of the specified Pod + +#### HTTP Request + +GET /api/v1/namespaces/{namespace}/pods/{name}/status + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +401: Unauthorized + + +### `list` list or watch objects of kind Pod + +#### HTTP Request + +GET /api/v1/namespaces/{namespace}/pods + +#### Parameters + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **allowWatchBookmarks** (*in query*): boolean + + }}">allowWatchBookmarks + + +- **continue** (*in query*): string + + }}">continue + + +- **fieldSelector** (*in query*): string + + }}">fieldSelector + + +- **labelSelector** (*in query*): string + + }}">labelSelector + + +- **limit** (*in query*): integer + + }}">limit + + +- **pretty** (*in query*): string + + }}">pretty + + +- **resourceVersion** (*in query*): string + + }}">resourceVersion + + +- **resourceVersionMatch** (*in query*): string + + }}">resourceVersionMatch + + +- **timeoutSeconds** (*in query*): integer + + }}">timeoutSeconds + + +- **watch** (*in query*): boolean + + }}">watch + + + +#### Response + + +200 (}}">PodList): OK + +401: Unauthorized + + +### `list` list or watch objects of kind Pod + +#### HTTP Request + +GET /api/v1/pods + +#### Parameters + + +- **allowWatchBookmarks** (*in query*): boolean + + }}">allowWatchBookmarks + + +- **continue** (*in query*): string + + }}">continue + + +- **fieldSelector** (*in query*): string + + }}">fieldSelector + + +- **labelSelector** (*in query*): string + + }}">labelSelector + + +- **limit** (*in query*): integer + + }}">limit + + +- **pretty** (*in query*): string + + }}">pretty + + +- **resourceVersion** (*in query*): string + + }}">resourceVersion + + +- **resourceVersionMatch** (*in query*): string + + }}">resourceVersionMatch + + +- **timeoutSeconds** (*in query*): integer + + }}">timeoutSeconds + + +- **watch** (*in query*): boolean + + }}">watch + + + +#### Response + + +200 (}}">PodList): OK + +401: Unauthorized + + +### `create` create a Pod + +#### HTTP Request + +POST /api/v1/namespaces/{namespace}/pods + +#### Parameters + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Pod, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +201 (}}">Pod): Created + +202 (}}">Pod): Accepted + +401: Unauthorized + + +### `update` replace the specified Pod + +#### HTTP Request + +PUT /api/v1/namespaces/{namespace}/pods/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Pod, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +201 (}}">Pod): Created + +401: Unauthorized + + +### `update` replace status of the specified Pod + +#### HTTP Request + +PUT /api/v1/namespaces/{namespace}/pods/{name}/status + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Pod, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +201 (}}">Pod): Created + +401: Unauthorized + + +### `patch` partially update the specified Pod + +#### HTTP Request + +PATCH /api/v1/namespaces/{namespace}/pods/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Patch, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **force** (*in query*): boolean + + }}">force + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +401: Unauthorized + + +### `patch` partially update status of the specified Pod + +#### HTTP Request + +PATCH /api/v1/namespaces/{namespace}/pods/{name}/status + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Patch, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **force** (*in query*): boolean + + }}">force + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +401: Unauthorized + + +### `delete` delete a Pod + +#### HTTP Request + +DELETE /api/v1/namespaces/{namespace}/pods/{name} + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">DeleteOptions + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **gracePeriodSeconds** (*in query*): integer + + }}">gracePeriodSeconds + + +- **pretty** (*in query*): string + + }}">pretty + + +- **propagationPolicy** (*in query*): string + + }}">propagationPolicy + + + +#### Response + + +200 (}}">Pod): OK + +202 (}}">Pod): Accepted + +401: Unauthorized + + +### `deletecollection` delete collection of Pod + +#### HTTP Request + +DELETE /api/v1/namespaces/{namespace}/pods + +#### Parameters + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">DeleteOptions + + + + +- **continue** (*in query*): string + + }}">continue + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldSelector** (*in query*): string + + }}">fieldSelector + + +- **gracePeriodSeconds** (*in query*): integer + + }}">gracePeriodSeconds + + +- **labelSelector** (*in query*): string + + }}">labelSelector + + +- **limit** (*in query*): integer + + }}">limit + + +- **pretty** (*in query*): string + + }}">pretty + + +- **propagationPolicy** (*in query*): string + + }}">propagationPolicy + + +- **resourceVersion** (*in query*): string + + }}">resourceVersion + + +- **resourceVersionMatch** (*in query*): string + + }}">resourceVersionMatch + + +- **timeoutSeconds** (*in query*): integer + + }}">timeoutSeconds + + + +#### Response + + +200 (}}">Status): OK + +401: Unauthorized + diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/priority-class-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md similarity index 83% rename from content/en/docs/reference/kubernetes-api/workloads-resources/priority-class-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md index 30b40e6840e67..020c25c05af3d 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/priority-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "PriorityClass defines mapping from a priority class name to the priority integer value." title: "PriorityClass" -weight: 16 +weight: 14 +auto_generated: true --- + + `apiVersion: scheduling.k8s.io/v1` `import "k8s.io/api/scheduling/v1"` @@ -66,7 +78,7 @@ PriorityClassList is a collection of priority classes. Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **items** ([]}}">PriorityClass), required +- **items** ([]}}">PriorityClass), required items is the list of PriorityClasses @@ -108,7 +120,7 @@ GET /apis/scheduling.k8s.io/v1/priorityclasses/{name} #### Response -200 (}}">PriorityClass): OK +200 (}}">PriorityClass): OK 401: Unauthorized @@ -176,7 +188,7 @@ GET /apis/scheduling.k8s.io/v1/priorityclasses #### Response -200 (}}">PriorityClassList): OK +200 (}}">PriorityClassList): OK 401: Unauthorized @@ -190,7 +202,7 @@ POST /apis/scheduling.k8s.io/v1/priorityclasses #### Parameters -- **body**: }}">PriorityClass, required +- **body**: }}">PriorityClass, required @@ -214,11 +226,11 @@ POST /apis/scheduling.k8s.io/v1/priorityclasses #### Response -200 (}}">PriorityClass): OK +200 (}}">PriorityClass): OK -201 (}}">PriorityClass): Created +201 (}}">PriorityClass): Created -202 (}}">PriorityClass): Accepted +202 (}}">PriorityClass): Accepted 401: Unauthorized @@ -237,7 +249,7 @@ PUT /apis/scheduling.k8s.io/v1/priorityclasses/{name} name of the PriorityClass -- **body**: }}">PriorityClass, required +- **body**: }}">PriorityClass, required @@ -261,9 +273,9 @@ PUT /apis/scheduling.k8s.io/v1/priorityclasses/{name} #### Response -200 (}}">PriorityClass): OK +200 (}}">PriorityClass): OK -201 (}}">PriorityClass): Created +201 (}}">PriorityClass): Created 401: Unauthorized @@ -311,7 +323,7 @@ PATCH /apis/scheduling.k8s.io/v1/priorityclasses/{name} #### Response -200 (}}">PriorityClass): OK +200 (}}">PriorityClass): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/replica-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md similarity index 86% rename from content/en/docs/reference/kubernetes-api/workloads-resources/replica-set-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md index 429a1bd5a2af2..7a344128c8d11 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/replica-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ReplicaSet ensures that a specified number of pod replicas are running at any given time." title: "ReplicaSet" -weight: 6 +weight: 5 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` @@ -30,11 +42,11 @@ ReplicaSet ensures that a specified number of pod replicas are running at any gi If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">ReplicaSetSpec) +- **spec** (}}">ReplicaSetSpec) Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">ReplicaSetStatus) +- **status** (}}">ReplicaSetStatus) Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +64,7 @@ ReplicaSetSpec is the specification of a ReplicaSet. Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -- **template** (}}">PodTemplateSpec) +- **template** (}}">PodTemplateSpec) Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template @@ -146,7 +158,7 @@ ReplicaSetList is a collection of ReplicaSets. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">ReplicaSet), required +- **items** ([]}}">ReplicaSet), required List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -193,7 +205,7 @@ GET /apis/apps/v1/namespaces/{namespace}/replicasets/{name} #### Response -200 (}}">ReplicaSet): OK +200 (}}">ReplicaSet): OK 401: Unauthorized @@ -226,7 +238,7 @@ GET /apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status #### Response -200 (}}">ReplicaSet): OK +200 (}}">ReplicaSet): OK 401: Unauthorized @@ -299,7 +311,7 @@ GET /apis/apps/v1/namespaces/{namespace}/replicasets #### Response -200 (}}">ReplicaSetList): OK +200 (}}">ReplicaSetList): OK 401: Unauthorized @@ -367,7 +379,7 @@ GET /apis/apps/v1/replicasets #### Response -200 (}}">ReplicaSetList): OK +200 (}}">ReplicaSetList): OK 401: Unauthorized @@ -386,7 +398,7 @@ POST /apis/apps/v1/namespaces/{namespace}/replicasets }}">namespace -- **body**: }}">ReplicaSet, required +- **body**: }}">ReplicaSet, required @@ -410,11 +422,11 @@ POST /apis/apps/v1/namespaces/{namespace}/replicasets #### Response -200 (}}">ReplicaSet): OK +200 (}}">ReplicaSet): OK -201 (}}">ReplicaSet): Created +201 (}}">ReplicaSet): Created -202 (}}">ReplicaSet): Accepted +202 (}}">ReplicaSet): Accepted 401: Unauthorized @@ -438,7 +450,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/replicasets/{name} }}">namespace -- **body**: }}">ReplicaSet, required +- **body**: }}">ReplicaSet, required @@ -462,9 +474,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/replicasets/{name} #### Response -200 (}}">ReplicaSet): OK +200 (}}">ReplicaSet): OK -201 (}}">ReplicaSet): Created +201 (}}">ReplicaSet): Created 401: Unauthorized @@ -488,7 +500,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status }}">namespace -- **body**: }}">ReplicaSet, required +- **body**: }}">ReplicaSet, required @@ -512,9 +524,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status #### Response -200 (}}">ReplicaSet): OK +200 (}}">ReplicaSet): OK -201 (}}">ReplicaSet): Created +201 (}}">ReplicaSet): Created 401: Unauthorized @@ -567,7 +579,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/replicasets/{name} #### Response -200 (}}">ReplicaSet): OK +200 (}}">ReplicaSet): OK 401: Unauthorized @@ -620,7 +632,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status #### Response -200 (}}">ReplicaSet): OK +200 (}}">ReplicaSet): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/replication-controller-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md similarity index 83% rename from content/en/docs/reference/kubernetes-api/workloads-resources/replication-controller-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md index 2970b6670c45e..c14db8ece901f 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/replication-controller-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "ReplicationController represents the configuration of a replication controller." title: "ReplicationController" -weight: 5 +weight: 4 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` @@ -30,11 +42,11 @@ ReplicationController represents the configuration of a replication controller. If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -- **spec** (}}">ReplicationControllerSpec) +- **spec** (}}">ReplicationControllerSpec) Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -- **status** (}}">ReplicationControllerStatus) +- **status** (}}">ReplicationControllerStatus) Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +64,7 @@ ReplicationControllerSpec is the specification of a replication controller. Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -- **template** (}}">PodTemplateSpec) +- **template** (}}">PodTemplateSpec) Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template @@ -146,7 +158,7 @@ ReplicationControllerList is a collection of replication controllers. Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -- **items** ([]}}">ReplicationController), required +- **items** ([]}}">ReplicationController), required List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -193,7 +205,7 @@ GET /api/v1/namespaces/{namespace}/replicationcontrollers/{name} #### Response -200 (}}">ReplicationController): OK +200 (}}">ReplicationController): OK 401: Unauthorized @@ -226,7 +238,7 @@ GET /api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status #### Response -200 (}}">ReplicationController): OK +200 (}}">ReplicationController): OK 401: Unauthorized @@ -299,7 +311,7 @@ GET /api/v1/namespaces/{namespace}/replicationcontrollers #### Response -200 (}}">ReplicationControllerList): OK +200 (}}">ReplicationControllerList): OK 401: Unauthorized @@ -367,7 +379,7 @@ GET /api/v1/replicationcontrollers #### Response -200 (}}">ReplicationControllerList): OK +200 (}}">ReplicationControllerList): OK 401: Unauthorized @@ -386,7 +398,7 @@ POST /api/v1/namespaces/{namespace}/replicationcontrollers }}">namespace -- **body**: }}">ReplicationController, required +- **body**: }}">ReplicationController, required @@ -410,11 +422,11 @@ POST /api/v1/namespaces/{namespace}/replicationcontrollers #### Response -200 (}}">ReplicationController): OK +200 (}}">ReplicationController): OK -201 (}}">ReplicationController): Created +201 (}}">ReplicationController): Created -202 (}}">ReplicationController): Accepted +202 (}}">ReplicationController): Accepted 401: Unauthorized @@ -438,7 +450,7 @@ PUT /api/v1/namespaces/{namespace}/replicationcontrollers/{name} }}">namespace -- **body**: }}">ReplicationController, required +- **body**: }}">ReplicationController, required @@ -462,9 +474,9 @@ PUT /api/v1/namespaces/{namespace}/replicationcontrollers/{name} #### Response -200 (}}">ReplicationController): OK +200 (}}">ReplicationController): OK -201 (}}">ReplicationController): Created +201 (}}">ReplicationController): Created 401: Unauthorized @@ -488,7 +500,7 @@ PUT /api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status }}">namespace -- **body**: }}">ReplicationController, required +- **body**: }}">ReplicationController, required @@ -512,9 +524,9 @@ PUT /api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status #### Response -200 (}}">ReplicationController): OK +200 (}}">ReplicationController): OK -201 (}}">ReplicationController): Created +201 (}}">ReplicationController): Created 401: Unauthorized @@ -567,7 +579,7 @@ PATCH /api/v1/namespaces/{namespace}/replicationcontrollers/{name} #### Response -200 (}}">ReplicationController): OK +200 (}}">ReplicationController): OK 401: Unauthorized @@ -620,7 +632,7 @@ PATCH /api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status #### Response -200 (}}">ReplicationController): OK +200 (}}">ReplicationController): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/stateful-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md similarity index 86% rename from content/en/docs/reference/kubernetes-api/workloads-resources/stateful-set-v1.md rename to content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md index 1a55fa4c1db47..ec097d7cced00 100644 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/stateful-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md @@ -6,9 +6,21 @@ api_metadata: content_type: "api_reference" description: "StatefulSet represents a set of pods with consistent identities." title: "StatefulSet" -weight: 8 +weight: 7 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` @@ -32,11 +44,11 @@ The StatefulSet guarantees that a given network identity will always map to the - **metadata** (}}">ObjectMeta) -- **spec** (}}">StatefulSetSpec) +- **spec** (}}">StatefulSetSpec) Spec defines the desired identities of pods in this set. -- **status** (}}">StatefulSetStatus) +- **status** (}}">StatefulSetStatus) Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time. @@ -58,7 +70,7 @@ A StatefulSetSpec is the specification of a StatefulSet. selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -- **template** (}}">PodTemplateSpec), required +- **template** (}}">PodTemplateSpec), required template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. @@ -84,9 +96,9 @@ A StatefulSetSpec is the specification of a StatefulSet. *RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.* - - **updateStrategy.rollingUpdate.partition** (int32) + - **updateStrategy.rollingUpdate.partition** (int32) - Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0. + Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0. - **podManagementPolicy** (string) @@ -193,7 +205,7 @@ StatefulSetList is a collection of StatefulSets. - **metadata** (}}">ListMeta) -- **items** ([]}}">StatefulSet), required +- **items** ([]}}">StatefulSet), required @@ -239,7 +251,7 @@ GET /apis/apps/v1/namespaces/{namespace}/statefulsets/{name} #### Response -200 (}}">StatefulSet): OK +200 (}}">StatefulSet): OK 401: Unauthorized @@ -272,7 +284,7 @@ GET /apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status #### Response -200 (}}">StatefulSet): OK +200 (}}">StatefulSet): OK 401: Unauthorized @@ -345,7 +357,7 @@ GET /apis/apps/v1/namespaces/{namespace}/statefulsets #### Response -200 (}}">StatefulSetList): OK +200 (}}">StatefulSetList): OK 401: Unauthorized @@ -413,7 +425,7 @@ GET /apis/apps/v1/statefulsets #### Response -200 (}}">StatefulSetList): OK +200 (}}">StatefulSetList): OK 401: Unauthorized @@ -432,7 +444,7 @@ POST /apis/apps/v1/namespaces/{namespace}/statefulsets }}">namespace -- **body**: }}">StatefulSet, required +- **body**: }}">StatefulSet, required @@ -456,11 +468,11 @@ POST /apis/apps/v1/namespaces/{namespace}/statefulsets #### Response -200 (}}">StatefulSet): OK +200 (}}">StatefulSet): OK -201 (}}">StatefulSet): Created +201 (}}">StatefulSet): Created -202 (}}">StatefulSet): Accepted +202 (}}">StatefulSet): Accepted 401: Unauthorized @@ -484,7 +496,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/statefulsets/{name} }}">namespace -- **body**: }}">StatefulSet, required +- **body**: }}">StatefulSet, required @@ -508,9 +520,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/statefulsets/{name} #### Response -200 (}}">StatefulSet): OK +200 (}}">StatefulSet): OK -201 (}}">StatefulSet): Created +201 (}}">StatefulSet): Created 401: Unauthorized @@ -534,7 +546,7 @@ PUT /apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status }}">namespace -- **body**: }}">StatefulSet, required +- **body**: }}">StatefulSet, required @@ -558,9 +570,9 @@ PUT /apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status #### Response -200 (}}">StatefulSet): OK +200 (}}">StatefulSet): OK -201 (}}">StatefulSet): Created +201 (}}">StatefulSet): Created 401: Unauthorized @@ -613,7 +625,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/statefulsets/{name} #### Response -200 (}}">StatefulSet): OK +200 (}}">StatefulSet): OK 401: Unauthorized @@ -666,7 +678,7 @@ PATCH /apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status #### Response -200 (}}">StatefulSet): OK +200 (}}">StatefulSet): OK 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/_index.md b/content/en/docs/reference/kubernetes-api/workloads-resources/_index.md deleted file mode 100644 index 1b5aab3fbaf18..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Workloads Resources" -weight: 1 ---- diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/container.md b/content/en/docs/reference/kubernetes-api/workloads-resources/container.md deleted file mode 100644 index d87cd422c84d2..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/container.md +++ /dev/null @@ -1,774 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "Container" -content_type: "api_reference" -description: "A single application container that you want to run within a pod." -title: "Container" -weight: 2 ---- - - - -`import "k8s.io/api/core/v1"` - - -## Container {#Container} - -A single application container that you want to run within a pod. - -
- -- **name** (string), required - - Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. - - - -### Image - - -- **image** (string) - - Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. - -- **imagePullPolicy** (string) - - Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - -### Entrypoint - - -- **command** ([]string) - - Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **args** ([]string) - - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **workingDir** (string) - - Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. - -### Ports - - -- **ports** ([]ContainerPort) - - *Patch strategy: merge on key `containerPort`* - - *Map: unique values on keys `containerPort, protocol` will be kept during a merge* - - List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. - - - *ContainerPort represents a network port in a single container.* - - - **ports.containerPort** (int32), required - - Number of port to expose on the pod's IP address. This must be a valid port number, 0 \< x \< 65536. - - - **ports.hostIP** (string) - - What host IP to bind the external port to. - - - **ports.hostPort** (int32) - - Number of port to expose on the host. If specified, this must be a valid port number, 0 \< x \< 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. - - - **ports.name** (string) - - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. - - - **ports.protocol** (string) - - Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". - -### Environment variables - - -- **env** ([]EnvVar) - - *Patch strategy: merge on key `name`* - - List of environment variables to set in the container. Cannot be updated. - - - *EnvVar represents an environment variable present in a Container.* - - - **env.name** (string), required - - Name of the environment variable. Must be a C_IDENTIFIER. - - - **env.value** (string) - - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". - - - **env.valueFrom** (EnvVarSource) - - Source for the environment variable's value. Cannot be used if value is not empty. - - - *EnvVarSource represents a source for the value of an EnvVar.* - - - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) - - Selects a key of a ConfigMap. - - - *Selects a key from a ConfigMap.* - - - **env.valueFrom.configMapKeyRef.key** (string), required - - The key to select. - - - **env.valueFrom.configMapKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.configMapKeyRef.optional** (boolean) - - Specify whether the ConfigMap or its key must be defined - - - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) - - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - - - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) - - Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - - - **env.valueFrom.secretKeyRef** (SecretKeySelector) - - Selects a key of a secret in the pod's namespace - - - *SecretKeySelector selects a key of a Secret.* - - - **env.valueFrom.secretKeyRef.key** (string), required - - The key of the secret to select from. Must be a valid secret key. - - - **env.valueFrom.secretKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.secretKeyRef.optional** (boolean) - - Specify whether the Secret or its key must be defined - -- **envFrom** ([]EnvFromSource) - - List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. - - - *EnvFromSource represents the source of a set of ConfigMaps* - - - **envFrom.configMapRef** (ConfigMapEnvSource) - - The ConfigMap to select from - - - *ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. - - The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.configMapRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.configMapRef.optional** (boolean) - - Specify whether the ConfigMap must be defined - - - **envFrom.prefix** (string) - - An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - - - **envFrom.secretRef** (SecretEnvSource) - - The Secret to select from - - - *SecretEnvSource selects a Secret to populate the environment variables with. - - The contents of the target Secret's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.secretRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.secretRef.optional** (boolean) - - Specify whether the Secret must be defined - -### Volumes - - -- **volumeMounts** ([]VolumeMount) - - *Patch strategy: merge on key `mountPath`* - - Pod volumes to mount into the container's filesystem. Cannot be updated. - - - *VolumeMount describes a mounting of a Volume within a container.* - - - **volumeMounts.mountPath** (string), required - - Path within the container at which the volume should be mounted. Must not contain ':'. - - - **volumeMounts.name** (string), required - - This must match the Name of a Volume. - - - **volumeMounts.mountPropagation** (string) - - mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - - - **volumeMounts.readOnly** (boolean) - - Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - - - **volumeMounts.subPath** (string) - - Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - - - **volumeMounts.subPathExpr** (string) - - Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - -- **volumeDevices** ([]VolumeDevice) - - *Patch strategy: merge on key `devicePath`* - - volumeDevices is the list of block devices to be used by the container. - - - *volumeDevice describes a mapping of a raw block device within a container.* - - - **volumeDevices.devicePath** (string), required - - devicePath is the path inside of the container that the device will be mapped to. - - - **volumeDevices.name** (string), required - - name must match the name of a persistentVolumeClaim in the pod - -### Resources - - -- **resources** (ResourceRequirements) - - Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - - - *ResourceRequirements describes the compute resource requirements.* - - - **resources.limits** (map[string]}}">Quantity) - - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - - - **resources.requests** (map[string]}}">Quantity) - - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - -### Lifecycle - - -- **lifecycle** (Lifecycle) - - Actions that the management system should take in response to container lifecycle events. Cannot be updated. - - - *Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.* - - - **lifecycle.postStart** (Handler) - - PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - *Handler defines a specific action that should be taken* - - - **lifecycle.postStart.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **lifecycle.postStart.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **lifecycle.postStart.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **lifecycle.preStop** (Handler) - - PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - *Handler defines a specific action that should be taken* - - - **lifecycle.preStop.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **lifecycle.preStop.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **lifecycle.preStop.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - -- **terminationMessagePath** (string) - - Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. - -- **terminationMessagePolicy** (string) - - Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. - -- **livenessProbe** (Probe) - - Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **livenessProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **livenessProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **livenessProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **livenessProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **livenessProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **livenessProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **livenessProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **livenessProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - -- **readinessProbe** (Probe) - - Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **readinessProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **readinessProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **readinessProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **readinessProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **readinessProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **readinessProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **readinessProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **readinessProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - -### Security Context - - -- **securityContext** (SecurityContext) - - Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - - - *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* - - - **securityContext.runAsUser** (int64) - - The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsNonRoot** (boolean) - - Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsGroup** (int64) - - The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.readOnlyRootFilesystem** (boolean) - - Whether this container has a read-only root filesystem. Default is false. - - - **securityContext.procMount** (string) - - procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. - - - **securityContext.privileged** (boolean) - - Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. - - - **securityContext.allowPrivilegeEscalation** (boolean) - - AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN - - - **securityContext.capabilities** (Capabilities) - - The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. - - - *Adds and removes POSIX capabilities from running containers.* - - - **securityContext.capabilities.add** ([]string) - - Added capabilities - - - **securityContext.capabilities.drop** ([]string) - - Removed capabilities - - - **securityContext.seccompProfile** (SeccompProfile) - - The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. - - - *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* - - - **securityContext.seccompProfile.type** (string), required - - type indicates which kind of seccomp profile will be applied. Valid options are: - - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. - - - **securityContext.seccompProfile.localhostProfile** (string) - - localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". - - - **securityContext.seLinuxOptions** (SELinuxOptions) - - The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *SELinuxOptions are the labels to be applied to the container* - - - **securityContext.seLinuxOptions.level** (string) - - Level is SELinux level label that applies to the container. - - - **securityContext.seLinuxOptions.role** (string) - - Role is a SELinux role label that applies to the container. - - - **securityContext.seLinuxOptions.type** (string) - - Type is a SELinux type label that applies to the container. - - - **securityContext.seLinuxOptions.user** (string) - - User is a SELinux user label that applies to the container. - - - **securityContext.windowsOptions** (WindowsSecurityContextOptions) - - The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *WindowsSecurityContextOptions contain Windows-specific options and credentials.* - - - **securityContext.windowsOptions.gmsaCredentialSpec** (string) - - GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. - - - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) - - GMSACredentialSpecName is the name of the GMSA credential spec to use. - - - **securityContext.windowsOptions.runAsUserName** (string) - - The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - -### Debugging - - -- **stdin** (boolean) - - Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. - -- **stdinOnce** (boolean) - - Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false - -- **tty** (boolean) - - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. - -### Beta level - - -- **startupProbe** (Probe) - - StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **startupProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **startupProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **startupProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **startupProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **startupProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **startupProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **startupProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **startupProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - - - -## ContainerStatus {#ContainerStatus} - -ContainerStatus contains details for the current status of this container. - -
- -- **name** (string), required - - This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated. - -- **image** (string), required - - The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images - -- **imageID** (string), required - - ImageID of the container's image. - -- **containerID** (string) - - Container's ID in the format 'docker://\'. - -- **state** (ContainerState) - - Details about the container's current condition. - - - *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* - - - **state.running** (ContainerStateRunning) - - Details about a running container - - - *ContainerStateRunning is a running state of a container.* - - - **state.running.startedAt** (Time) - - Time at which the container was last (re-)started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **state.terminated** (ContainerStateTerminated) - - Details about a terminated container - - - *ContainerStateTerminated is a terminated state of a container.* - - - **state.terminated.containerID** (string) - - Container's ID in the format 'docker://\' - - - **state.terminated.exitCode** (int32), required - - Exit status from the last termination of the container - - - **state.terminated.startedAt** (Time) - - Time at which previous execution of the container started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **state.terminated.finishedAt** (Time) - - Time at which the container last terminated - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **state.terminated.message** (string) - - Message regarding the last termination of the container - - - **state.terminated.reason** (string) - - (brief) reason from the last termination of the container - - - **state.terminated.signal** (int32) - - Signal from the last termination of the container - - - **state.waiting** (ContainerStateWaiting) - - Details about a waiting container - - - *ContainerStateWaiting is a waiting state of a container.* - - - **state.waiting.message** (string) - - Message regarding why the container is not yet running. - - - **state.waiting.reason** (string) - - (brief) reason the container is not yet running. - -- **lastState** (ContainerState) - - Details about the container's last termination condition. - - - *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* - - - **lastState.running** (ContainerStateRunning) - - Details about a running container - - - *ContainerStateRunning is a running state of a container.* - - - **lastState.running.startedAt** (Time) - - Time at which the container was last (re-)started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **lastState.terminated** (ContainerStateTerminated) - - Details about a terminated container - - - *ContainerStateTerminated is a terminated state of a container.* - - - **lastState.terminated.containerID** (string) - - Container's ID in the format 'docker://\' - - - **lastState.terminated.exitCode** (int32), required - - Exit status from the last termination of the container - - - **lastState.terminated.startedAt** (Time) - - Time at which previous execution of the container started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **lastState.terminated.finishedAt** (Time) - - Time at which the container last terminated - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **lastState.terminated.message** (string) - - Message regarding the last termination of the container - - - **lastState.terminated.reason** (string) - - (brief) reason from the last termination of the container - - - **lastState.terminated.signal** (int32) - - Signal from the last termination of the container - - - **lastState.waiting** (ContainerStateWaiting) - - Details about a waiting container - - - *ContainerStateWaiting is a waiting state of a container.* - - - **lastState.waiting.message** (string) - - Message regarding why the container is not yet running. - - - **lastState.waiting.reason** (string) - - (brief) reason the container is not yet running. - -- **ready** (boolean), required - - Specifies whether the container has passed its readiness probe. - -- **restartCount** (int32), required - - The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC. - -- **started** (boolean) - - Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/cron-job-v2alpha1.md b/content/en/docs/reference/kubernetes-api/workloads-resources/cron-job-v2alpha1.md deleted file mode 100644 index c7c2ba7a81441..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/cron-job-v2alpha1.md +++ /dev/null @@ -1,746 +0,0 @@ ---- -api_metadata: - apiVersion: "batch/v2alpha1" - import: "k8s.io/api/batch/v2alpha1" - kind: "CronJob" -content_type: "api_reference" -description: "CronJob represents the configuration of a single cron job." -title: "CronJob v2alpha1" -weight: 13 ---- - -`apiVersion: batch/v2alpha1` - -`import "k8s.io/api/batch/v2alpha1"` - - -## CronJob {#CronJob} - -CronJob represents the configuration of a single cron job. - -
- -- **apiVersion**: batch/v2alpha1 - - -- **kind**: CronJob - - -- **metadata** (}}">ObjectMeta) - - Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -- **spec** (}}">CronJobSpec) - - Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -- **status** (}}">CronJobStatus) - - Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - -## CronJobSpec {#CronJobSpec} - -CronJobSpec describes how the job execution will look like and when it will actually run. - -
- -- **jobTemplate** (JobTemplateSpec), required - - Specifies the job that will be created when executing a CronJob. - - - *JobTemplateSpec describes the data a Job should have when created from a template* - - - **jobTemplate.metadata** (}}">ObjectMeta) - - Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - - - **jobTemplate.spec** (}}">JobSpec) - - Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -- **schedule** (string), required - - The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - -- **concurrencyPolicy** (string) - - Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one - -- **failedJobsHistoryLimit** (int32) - - The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. - -- **startingDeadlineSeconds** (int64) - - Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. - -- **successfulJobsHistoryLimit** (int32) - - The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. - -- **suspend** (boolean) - - This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. - - - - - -## CronJobStatus {#CronJobStatus} - -CronJobStatus represents the current state of a cron job. - -
- -- **active** ([]}}">ObjectReference) - - A list of pointers to currently running jobs. - -- **lastScheduleTime** (Time) - - Information when was the last time the job was successfully scheduled. - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - - - -## CronJobList {#CronJobList} - -CronJobList is a collection of cron jobs. - -
- -- **apiVersion**: batch/v2alpha1 - - -- **kind**: CronJobList - - -- **metadata** (}}">ListMeta) - - Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -- **items** ([]}}">CronJob), required - - items is the list of CronJobs. - - - - - -## Operations {#Operations} - - - -
- - - - - - -### `get` read the specified CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `get` read status of the specified CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `list` list or watch objects of kind CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">CronJobList): OK - -401: Unauthorized - - -### `list` list or watch objects of kind CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/cronjobs - -#### Parameters - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">CronJobList): OK - -401: Unauthorized - - -### `create` create a CronJob - -#### HTTP Request - -POST /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -202 (}}">CronJob): Accepted - -401: Unauthorized - - -### `update` replace the specified CronJob - -#### HTTP Request - -PUT /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -401: Unauthorized - - -### `update` replace status of the specified CronJob - -#### HTTP Request - -PUT /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -401: Unauthorized - - -### `patch` partially update the specified CronJob - -#### HTTP Request - -PATCH /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `patch` partially update status of the specified CronJob - -#### HTTP Request - -PATCH /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `delete` delete a CronJob - -#### HTTP Request - -DELETE /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - - -#### Response - - -200 (}}">Status): OK - -202 (}}">Status): Accepted - -401: Unauthorized - - -### `deletecollection` delete collection of CronJob - -#### HTTP Request - -DELETE /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **continue** (*in query*): string - - }}">continue - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - - -#### Response - - -200 (}}">Status): OK - -401: Unauthorized - diff --git a/content/en/docs/reference/kubernetes-api/workloads-resources/pod-v1.md b/content/en/docs/reference/kubernetes-api/workloads-resources/pod-v1.md deleted file mode 100644 index 584c3381725f8..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workloads-resources/pod-v1.md +++ /dev/null @@ -1,1270 +0,0 @@ ---- -api_metadata: - apiVersion: "v1" - import: "k8s.io/api/core/v1" - kind: "Pod" -content_type: "api_reference" -description: "Pod is a collection of containers that can run on a host." -title: "Pod" -weight: 1 ---- - -`apiVersion: v1` - -`import "k8s.io/api/core/v1"` - - -## Pod {#Pod} - -Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts. - -
- -- **apiVersion**: v1 - - -- **kind**: Pod - - -- **metadata** (}}">ObjectMeta) - - Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -- **spec** (}}">PodSpec) - - Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -- **status** (}}">PodStatus) - - Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - -## PodSpec {#PodSpec} - -PodSpec is a description of a pod. - -
- - - -### Containers - - -- **containers** ([]}}">Container), required - - *Patch strategy: merge on key `name`* - - List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. - -- **initContainers** ([]}}">Container) - - *Patch strategy: merge on key `name`* - - List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - -- **imagePullSecrets** ([]}}">LocalObjectReference) - - *Patch strategy: merge on key `name`* - - ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - -- **enableServiceLinks** (boolean) - - EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. - -### Volumes - - -- **volumes** ([]}}">Volume) - - *Patch strategies: retainKeys, merge on key `name`* - - List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes - -### Scheduling - - -- **nodeSelector** (map[string]string) - - NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - -- **nodeName** (string) - - NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. - -- **affinity** (Affinity) - - If specified, the pod's scheduling constraints - - - *Affinity is a group of affinity scheduling rules.* - - - **affinity.nodeAffinity** (}}">NodeAffinity) - - Describes node affinity scheduling rules for the pod. - - - **affinity.podAffinity** (}}">PodAffinity) - - Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - **affinity.podAntiAffinity** (}}">PodAntiAffinity) - - Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - -- **tolerations** ([]Toleration) - - If specified, the pod's tolerations. - - - *The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .* - - - **tolerations.key** (string) - - Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - - - **tolerations.operator** (string) - - Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - - - **tolerations.value** (string) - - Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - - - **tolerations.effect** (string) - - Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - - - **tolerations.tolerationSeconds** (int64) - - TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - -- **schedulerName** (string) - - If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. - -- **runtimeClassName** (string) - - RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. - -- **priorityClassName** (string) - - If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. - -- **priority** (int32) - - The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. - -### Lifecycle - - -- **restartPolicy** (string) - - Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - -- **terminationGracePeriodSeconds** (int64) - - Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. - -- **activeDeadlineSeconds** (int64) - - Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. - -- **readinessGates** ([]PodReadinessGate) - - If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md - - - *PodReadinessGate contains the reference to a pod condition* - - - **readinessGates.conditionType** (string), required - - ConditionType refers to a condition in the pod's condition list with matching type. - -### Hostname and Name resolution - - -- **hostname** (string) - - Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. - -- **setHostnameAsFQDN** (boolean) - - If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. - -- **subdomain** (string) - - If specified, the fully qualified Pod hostname will be "\.\.\.svc.\". If not specified, the pod will not have a domainname at all. - -- **hostAliases** ([]HostAlias) - - *Patch strategy: merge on key `ip`* - - HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. - - - *HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.* - - - **hostAliases.hostnames** ([]string) - - Hostnames for the above IP address. - - - **hostAliases.ip** (string) - - IP address of the host file entry. - -- **dnsConfig** (PodDNSConfig) - - Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. - - - *PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.* - - - **dnsConfig.nameservers** ([]string) - - A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. - - - **dnsConfig.options** ([]PodDNSConfigOption) - - A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. - - - *PodDNSConfigOption defines DNS resolver options of a pod.* - - - **dnsConfig.options.name** (string) - - Required. - - - **dnsConfig.options.value** (string) - - - - **dnsConfig.searches** ([]string) - - A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. - -- **dnsPolicy** (string) - - Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. - -### Hosts namespaces - - -- **hostNetwork** (boolean) - - Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. - -- **hostPID** (boolean) - - Use the host's pid namespace. Optional: Default to false. - -- **hostIPC** (boolean) - - Use the host's ipc namespace. Optional: Default to false. - -- **shareProcessNamespace** (boolean) - - Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. - -### Service account - - -- **serviceAccountName** (string) - - ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - -- **automountServiceAccountToken** (boolean) - - AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - -### Security context - - -- **securityContext** (PodSecurityContext) - - SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. - - - *PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.* - - - **securityContext.runAsUser** (int64) - - The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. - - - **securityContext.runAsNonRoot** (boolean) - - Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsGroup** (int64) - - The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. - - - **securityContext.supplementalGroups** ([]int64) - - A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. - - - **securityContext.fsGroup** (int64) - - A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: - - 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - - - **securityContext.fsGroupChangePolicy** (string) - - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - - - **securityContext.seccompProfile** (SeccompProfile) - - The seccomp options to use by the containers in this pod. - - - *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* - - - **securityContext.seccompProfile.type** (string), required - - type indicates which kind of seccomp profile will be applied. Valid options are: - - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. - - - **securityContext.seccompProfile.localhostProfile** (string) - - localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". - - - **securityContext.seLinuxOptions** (SELinuxOptions) - - The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. - - - *SELinuxOptions are the labels to be applied to the container* - - - **securityContext.seLinuxOptions.level** (string) - - Level is SELinux level label that applies to the container. - - - **securityContext.seLinuxOptions.role** (string) - - Role is a SELinux role label that applies to the container. - - - **securityContext.seLinuxOptions.type** (string) - - Type is a SELinux type label that applies to the container. - - - **securityContext.seLinuxOptions.user** (string) - - User is a SELinux user label that applies to the container. - - - **securityContext.sysctls** ([]Sysctl) - - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. - - - *Sysctl defines a kernel parameter to be set* - - - **securityContext.sysctls.name** (string), required - - Name of a property to set - - - **securityContext.sysctls.value** (string), required - - Value of a property to set - - - **securityContext.windowsOptions** (WindowsSecurityContextOptions) - - The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *WindowsSecurityContextOptions contain Windows-specific options and credentials.* - - - **securityContext.windowsOptions.gmsaCredentialSpec** (string) - - GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. - - - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) - - GMSACredentialSpecName is the name of the GMSA credential spec to use. - - - **securityContext.windowsOptions.runAsUserName** (string) - - The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - -### Beta level - - -- **overhead** (map[string]}}">Quantity) - - Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. - -- **topologySpreadConstraints** ([]TopologySpreadConstraint) - - *Patch strategy: merge on key `topologyKey`* - - *Map: unique values on keys `topologyKey, whenUnsatisfiable` will be kept during a merge* - - TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. - - - *TopologySpreadConstraint specifies how to spread matching pods among the given topology.* - - - **topologySpreadConstraints.maxSkew** (int32), required - - MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. - - - **topologySpreadConstraints.topologyKey** (string), required - - TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \ as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - - - **topologySpreadConstraints.whenUnsatisfiable** (string), required - - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. - - - **topologySpreadConstraints.labelSelector** (}}">LabelSelector) - - LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - -### Alpha level - - -- **ephemeralContainers** ([]}}">EphemeralContainer) - - *Patch strategy: merge on key `name`* - - List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. - -- **preemptionPolicy** (string) - - PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate. - -### Deprecated - - -- **serviceAccount** (string) - - DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. - - - -## PodStatus {#PodStatus} - -PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane. - -
- -- **nominatedNodeName** (string) - - nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled. - -- **hostIP** (string) - - IP address of the host to which the pod is assigned. Empty if not yet scheduled. - -- **startTime** (Time) - - RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod. - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - -- **phase** (string) - - The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values: - - Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. - - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase - -- **message** (string) - - A human readable message indicating details about why the pod is in this condition. - -- **reason** (string) - - A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' - -- **podIP** (string) - - IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated. - -- **podIPs** ([]PodIP) - - *Patch strategy: merge on key `ip`* - - podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet. - - - *IP address information for entries in the (plural) PodIPs field. Each entry includes: - IP: An IP address allocated to the pod. Routable at least within the cluster.* - - - **podIPs.ip** (string) - - ip is an IP address (IPv4 or IPv6) assigned to the pod - -- **conditions** ([]PodCondition) - - *Patch strategy: merge on key `type`* - - Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - - - *PodCondition contains details for the current condition of this pod.* - - - **conditions.status** (string), required - - Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - - - **conditions.type** (string), required - - Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - - - **conditions.lastProbeTime** (Time) - - Last time we probed the condition. - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **conditions.lastTransitionTime** (Time) - - Last time the condition transitioned from one status to another. - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **conditions.message** (string) - - Human-readable message indicating details about last transition. - - - **conditions.reason** (string) - - Unique, one-word, CamelCase reason for the condition's last transition. - -- **qosClass** (string) - - The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md - -- **initContainerStatuses** ([]}}">ContainerStatus) - - The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status - -- **containerStatuses** ([]}}">ContainerStatus) - - The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status - -- **ephemeralContainerStatuses** ([]}}">ContainerStatus) - - Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. - - - - - -## PodList {#PodList} - -PodList is a list of Pods. - -
- -- **apiVersion**: v1 - - -- **kind**: PodList - - -- **metadata** (}}">ListMeta) - - Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - -- **items** ([]}}">Pod), required - - List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md - - - - - -## Operations {#Operations} - - - -
- - - - - - -### `get` read the specified Pod - -#### HTTP Request - -GET /api/v1/namespaces/{namespace}/pods/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">Pod): OK - -401: Unauthorized - - -### `get` read log of the specified Pod - -#### HTTP Request - -GET /api/v1/namespaces/{namespace}/pods/{name}/log - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **container** (*in query*): string - - The container for which to stream logs. Defaults to only container if there is one container in the pod. - - -- **follow** (*in query*): boolean - - Follow the log stream of the pod. Defaults to false. - - -- **insecureSkipTLSVerifyBackend** (*in query*): boolean - - insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet). - - -- **limitBytes** (*in query*): integer - - If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit. - - -- **pretty** (*in query*): string - - }}">pretty - - -- **previous** (*in query*): boolean - - Return previous terminated container logs. Defaults to false. - - -- **sinceSeconds** (*in query*): integer - - A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. - - -- **tailLines** (*in query*): integer - - If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime - - -- **timestamps** (*in query*): boolean - - If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. - - - -#### Response - - -200 (string): OK - -401: Unauthorized - - -### `get` read status of the specified Pod - -#### HTTP Request - -GET /api/v1/namespaces/{namespace}/pods/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">Pod): OK - -401: Unauthorized - - -### `list` list or watch objects of kind Pod - -#### HTTP Request - -GET /api/v1/namespaces/{namespace}/pods - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">PodList): OK - -401: Unauthorized - - -### `list` list or watch objects of kind Pod - -#### HTTP Request - -GET /api/v1/pods - -#### Parameters - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">PodList): OK - -401: Unauthorized - - -### `create` create a Pod - -#### HTTP Request - -POST /api/v1/namespaces/{namespace}/pods - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Pod, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">Pod): OK - -201 (}}">Pod): Created - -202 (}}">Pod): Accepted - -401: Unauthorized - - -### `update` replace the specified Pod - -#### HTTP Request - -PUT /api/v1/namespaces/{namespace}/pods/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Pod, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">Pod): OK - -201 (}}">Pod): Created - -401: Unauthorized - - -### `update` replace status of the specified Pod - -#### HTTP Request - -PUT /api/v1/namespaces/{namespace}/pods/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Pod, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">Pod): OK - -201 (}}">Pod): Created - -401: Unauthorized - - -### `patch` partially update the specified Pod - -#### HTTP Request - -PATCH /api/v1/namespaces/{namespace}/pods/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">Pod): OK - -401: Unauthorized - - -### `patch` partially update status of the specified Pod - -#### HTTP Request - -PATCH /api/v1/namespaces/{namespace}/pods/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">Pod): OK - -401: Unauthorized - - -### `delete` delete a Pod - -#### HTTP Request - -DELETE /api/v1/namespaces/{namespace}/pods/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the Pod - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - - -#### Response - - -200 (}}">Pod): OK - -202 (}}">Pod): Accepted - -401: Unauthorized - - -### `deletecollection` delete collection of Pod - -#### HTTP Request - -DELETE /api/v1/namespaces/{namespace}/pods - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **continue** (*in query*): string - - }}">continue - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - - -#### Response - - -200 (}}">Status): OK - -401: Unauthorized - diff --git a/content/en/docs/reference/labels-annotations-taints.md b/content/en/docs/reference/labels-annotations-taints.md index 78be058013e4f..2d74362913310 100644 --- a/content/en/docs/reference/labels-annotations-taints.md +++ b/content/en/docs/reference/labels-annotations-taints.md @@ -30,6 +30,20 @@ Used on: Node The Kubelet populates this with `runtime.GOOS` as defined by Go. This can be handy if you are mixing operating systems in your cluster (for example: mixing Linux and Windows nodes). +## kubernetes.io/metadata.name + +Example: `kubernetes.io/metadata.name=mynamespace` + +Used on: Namespaces + +When the `NamespaceDefaultLabelName` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled, +the Kubernetes API server sets this label on all namespaces. The label value is set to +the name of the namespace. + +This is useful if you want to target a specific namespace with a label +{{< glossary_tooltip text="selector" term_id="selector" >}}. + ## beta.kubernetes.io/arch (deprecated) This label has been deprecated. Please use `kubernetes.io/arch` instead. @@ -48,6 +62,16 @@ The Kubelet populates this label with the hostname. Note that the hostname can b This label is also used as part of the topology hierarchy. See [topology.kubernetes.io/zone](#topologykubernetesiozone) for more information. + +## controller.kubernetes.io/pod-deletion-cost {#pod-deletion-cost} + +Example: `controller.kubernetes.io/pod-deletion-cost=10` + +Used on: Pod + +This annotation is used to set [Pod Deletion Cost](/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) +which allows users to influence ReplicaSet downscaling order. The annotation parses into an `int32` type. + ## beta.kubernetes.io/instance-type (deprecated) {{< note >}} Starting in v1.17, this label is deprecated in favor of [node.kubernetes.io/instance-type](#nodekubernetesioinstance-type). {{< /note >}} @@ -75,6 +99,18 @@ See [topology.kubernetes.io/zone](#topologykubernetesiozone). {{< note >}} Starting in v1.17, this label is deprecated in favor of [topology.kubernetes.io/zone](#topologykubernetesiozone). {{< /note >}} +## statefulset.kubernetes.io/pod-name {#statefulsetkubernetesiopod-name} + +Example: + +`statefulset.kubernetes.io/pod-name=mystatefulset-7` + +When a StatefulSet controller creates a Pod for the StatefulSet, the control plane +sets this label on that Pod. The value of the label is the name of the Pod being created. + +See [Pod Name Label](/docs/concepts/workloads/controllers/statefulset/#pod-name-label) in the +StatefulSet topic for more details. + ## topology.kubernetes.io/region {#topologykubernetesioregion} Example: @@ -114,3 +150,173 @@ The scheduler (through the _VolumeZonePredicate_ predicate) also will ensure tha If `PersistentVolumeLabel` does not support automatic labeling of your PersistentVolumes, you should consider adding the labels manually (or adding support for `PersistentVolumeLabel`). With `PersistentVolumeLabel`, the scheduler prevents Pods from mounting volumes in a different zone. If your infrastructure doesn't have this constraint, you don't need to add the zone labels to the volumes at all. +## node.kubernetes.io/windows-build {#nodekubernetesiowindows-build} + +Example: `node.kubernetes.io/windows-build=10.0.17763` + +Used on: Node + +When the kubelet is running on Microsoft Windows, it automatically labels its node to record the version of Windows Server in use. + +The label's value is in the format "MajorVersion.MinorVersion.BuildNumber". + +## service.kubernetes.io/headless {#servicekubernetesioheadless} + +Example: `service.kubernetes.io/headless=""` + +Used on: Service + +The control plane adds this label to an Endpoints object when the owning Service is headless. + +## kubernetes.io/service-name {#kubernetesioservice-name} + +Example: `kubernetes.io/service-name="nginx"` + +Used on: Service + +Kubernetes uses this label to differentiate multiple Services. Used currently for `ELB`(Elastic Load Balancer) only. + +## endpointslice.kubernetes.io/managed-by {#endpointslicekubernetesiomanaged-by} + +Example: `endpointslice.kubernetes.io/managed-by="controller"` + +Used on: EndpointSlices + +The label is used to indicate the controller or entity that manages an EndpointSlice. This label aims to enable different EndpointSlice objects to be managed by different controllers or entities within the same cluster. + +## endpointslice.kubernetes.io/skip-mirror {#endpointslicekubernetesioskip-mirror} + +Example: `endpointslice.kubernetes.io/skip-mirror="true"` + +Used on: Endpoints + +The label can be set to `"true"` on an Endpoints resource to indicate that the EndpointSliceMirroring controller should not mirror this resource with EndpointSlices. + +## service.kubernetes.io/service-proxy-name {#servicekubernetesioservice-proxy-name} + +Example: `service.kubernetes.io/service-proxy-name="foo-bar"` + +Used on: Service + +The kube-proxy has this label for custom proxy, which delegates service control to custom proxy. + +## experimental.windows.kubernetes.io/isolation-type + +Example: `experimental.windows.kubernetes.io/isolation-type: "hyperv"` + +Used on: Pod + +The annotation is used to run Windows containers with Hyper-V isolation. To use Hyper-V isolation feature and create a Hyper-V isolated container, the kubelet should be started with feature gates HyperVContainer=true and the Pod should include the annotation experimental.windows.kubernetes.io/isolation-type=hyperv. + +{{< note >}} +You can only set this annotation on Pods that have a single container. +{{< /note >}} + +## ingressclass.kubernetes.io/is-default-class + +Example: `ingressclass.kubernetes.io/is-default-class: "true"` + +Used on: IngressClass + +When a single IngressClass resource has this annotation set to `"true"`, new Ingress resource without a class specified will be assigned this default class. + +## kubernetes.io/ingress.class (deprecated) + +{{< note >}} Starting in v1.18, this annotation is deprecated in favor of `spec.ingressClassName`. {{< /note >}} + +## storageclass.kubernetes.io/is-default-class + +Example: `storageclass.kubernetes.io/is-default-class=true` + +Used on: StorageClass + +When a single StorageClass resource has this annotation set to `"true"`, new Physical Volume Claim resource without a class specified will be assigned this default class. + +## alpha.kubernetes.io/provided-node-ip + +Example: `alpha.kubernetes.io/provided-node-ip: "10.0.0.1"` + +Used on: Node + +The kubelet can set this annotation on a Node to denote its configured IPv4 address. + +When kubelet is started with the "external" cloud provider, it sets this annotation on the Node to denote an IP address set from the command line flag (`--node-ip`). This IP is verified with the cloud provider as valid by the cloud-controller-manager. + +## batch.kubernetes.io/job-completion-index + +Example: `batch.kubernetes.io/job-completion-index: "3"` + +Used on: Pod + +The Job controller in the kube-controller-manager sets this annotation for Pods +created with Indexed [completion mode](/docs/concepts/workloads/controllers/job/#completion-mode). + +## kubectl.kubernetes.io/default-container + +Example: `kubectl.kubernetes.io/default-container: "front-end-app"` + +The value of the annotation is the container name that is default for this Pod. For example, `kubectl logs` or `kubectl exec` without `-c` or `--container` flag will use this default container. + +## endpoints.kubernetes.io/over-capacity + +Example: `endpoints.kubernetes.io/over-capacity:warning` + +Used on: Endpoints + +In Kubernetes clusters v1.21 (or later), the Endpoints controller adds this annotation to an Endpoints resource if it has more than 1000 endpoints. The annotation indicates that the Endpoints resource is over capacity. + +**The taints listed below are always used on Nodes** + +## node.kubernetes.io/not-ready + +Example: `node.kubernetes.io/not-ready:NoExecute` + +The node controller detects whether a node is ready by monitoring its health and adds or removes this taint accordingly. + +## node.kubernetes.io/unreachable + +Example: `node.kubernetes.io/unreachable:NoExecute` + +The node controller adds the taint to a node corresponding to the [NodeCondition](/docs/concepts/architecture/nodes/#condition) `Ready` being `Unknown`. + +## node.kubernetes.io/unschedulable + +Example: `node.kubernetes.io/unschedulable:NoSchedule` + +The taint will be added to a node when initializing the node to avoid race condition. + +## node.kubernetes.io/memory-pressure + +Example: `node.kubernetes.io/memory-pressure:NoSchedule` + +The kubelet detects memory pressure based on `memory.available` and `allocatableMemory.available` observed on a Node. The observed values are then compared to the corresponding thresholds that can be set on the kubelet to determine if the Node condition and taint should be added/removed. + +## node.kubernetes.io/disk-pressure + +Example: `node.kubernetes.io/disk-pressure:NoSchedule` + +The kubelet detects disk pressure based on `imagefs.available`, `imagefs.inodesFree`, `nodefs.available` and `nodefs.inodesFree`(Linux only) observed on a Node. The observed values are then compared to the corresponding thresholds that can be set on the kubelet to determine if the Node condition and taint should be added/removed. + +## node.kubernetes.io/network-unavailable + +Example: `node.kubernetes.io/network-unavailable:NoSchedule` + +This is initially set by the kubelet when the cloud provider used indicates a requirement for additional network configuration. Only when the route on the cloud is configured properly will the taint be removed by the cloud provider. + +## node.kubernetes.io/pid-pressure + +Example: `node.kubernetes.io/pid-pressure:NoSchedule` + +The kubelet checks D-value of the size of `/proc/sys/kernel/pid_max` and the PIDs consumed by Kubernetes on a node to get the number of available PIDs that referred to as the `pid.available` metric. The metric is then compared to the corresponding threshold that can be set on the kubelet to determine if the node condition and taint should be added/removed. + +## node.cloudprovider.kubernetes.io/uninitialized + +Example: `node.cloudprovider.kubernetes.io/uninitialized:NoSchedule` + +Sets this taint on a node to mark it as unusable, when kubelet is started with the "external" cloud provider, until a controller from the cloud-controller-manager initializes this node, and then removes the taint. + +## node.cloudprovider.kubernetes.io/shutdown + +Example: `node.cloudprovider.kubernetes.io/shutdown:NoSchedule` + +If a Node is in a cloud provider specified shutdown state, the Node gets tainted accordingly with `node.cloudprovider.kubernetes.io/shutdown` and the taint effect of `NoSchedule`. diff --git a/content/en/docs/reference/scheduling/config.md b/content/en/docs/reference/scheduling/config.md index 7754d7cb7d36e..02a6e8e505078 100644 --- a/content/en/docs/reference/scheduling/config.md +++ b/content/en/docs/reference/scheduling/config.md @@ -19,8 +19,9 @@ Each stage is exposed in a extension point. Plugins provide scheduling behaviors by implementing one or more of these extension points. You can specify scheduling profiles by running `kube-scheduler --config `, -using the component config APIs -([`v1beta1`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.19.0/config/v1beta1?tab=doc#KubeSchedulerConfiguration)). +using the +[KubeSchedulerConfiguration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +struct. A minimal configuration looks as follows: @@ -97,6 +98,7 @@ for that extension point. This can also be used to rearrange plugins order, if desired. ### Scheduling plugins + 1. `UnReserve`: This is an informational extension point that is called if a Pod is rejected after being reserved and put on hold by a `Permit` plugin. @@ -145,7 +147,12 @@ extension points: Extension points: `Score`. - `VolumeBinding`: Checks if the node has or if it can bind the requested {{< glossary_tooltip text="volumes" term_id="volume" >}}. - Extension points: `PreFilter`, `Filter`, `Reserve`, `PreBind`. + Extension points: `PreFilter`, `Filter`, `Reserve`, `PreBind`, `Score`. + {{< note >}} + `Score` extension point is enabled when `VolumeCapacityPriority` feature is + enabled. It prioritizes the smallest PVs that can fit the requested volume + size. + {{< /note >}} - `VolumeRestrictions`: Checks that volumes mounted in the node satisfy restrictions that are specific to the volume provider. Extension points: `Filter`. @@ -181,8 +188,6 @@ that are not enabled by default: - `RequestedToCapacityRatio`: Favor nodes according to a configured function of the allocated resources. Extension points: `Score`. -- `NodeResourceLimits`: Favors nodes that satisfy the Pod resource limits. - Extension points: `PreScore`, `Score`. - `CinderVolume`: Checks that OpenStack Cinder volume limits can be satisfied for the node. Extension points: `Filter`. @@ -247,3 +252,5 @@ only has one pending pods queue. * Read the [kube-scheduler reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) * Learn about [scheduling](/docs/concepts/scheduling-eviction/kube-scheduler/) +* Read the [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) reference + diff --git a/content/en/docs/reference/scheduling/policies.md b/content/en/docs/reference/scheduling/policies.md index 946150322da54..fc9a7402661ea 100644 --- a/content/en/docs/reference/scheduling/policies.md +++ b/content/en/docs/reference/scheduling/policies.md @@ -14,9 +14,7 @@ respectively. You can set a scheduling policy by running `kube-scheduler --policy-config-file ` or `kube-scheduler --policy-configmap ` -and using the [Policy type](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1?tab=doc#Policy). - - +and using the [Policy type](/docs/reference/config-api/kube-scheduler-policy-config.v1/). @@ -117,10 +115,10 @@ The following *priorities* implement scoring: - `EvenPodsSpreadPriority`: Implements preferred [pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). - - ## {{% heading "whatsnext" %}} * Learn about [scheduling](/docs/concepts/scheduling-eviction/kube-scheduler/) * Learn about [kube-scheduler Configuration](/docs/reference/scheduling/config/) +* Read the [kube-scheduler configuration reference (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1) +* Read the [kube-scheduler Policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) diff --git a/content/en/docs/reference/setup-tools/_index.md b/content/en/docs/reference/setup-tools/_index.md index 3988d6485e151..c97758fe6ea81 100644 --- a/content/en/docs/reference/setup-tools/_index.md +++ b/content/en/docs/reference/setup-tools/_index.md @@ -1,4 +1,4 @@ --- -title: Setup tools reference +title: Setup tools weight: 50 --- diff --git a/content/en/docs/reference/setup-tools/kubeadm/_index.md b/content/en/docs/reference/setup-tools/kubeadm/_index.md index d747c05124d2a..ad84378d1d94c 100755 --- a/content/en/docs/reference/setup-tools/kubeadm/_index.md +++ b/content/en/docs/reference/setup-tools/kubeadm/_index.md @@ -26,5 +26,7 @@ To install kubeadm, see the [installation guide](/docs/setup/production-environm * [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config) if you initialized your cluster using kubeadm v1.7.x or lower, to configure your cluster for `kubeadm upgrade` * [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token) to manage tokens for `kubeadm join` * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm certs](/docs/reference/setup-tools/kubeadm/kubeadm-certs) to manage Kubernetes certificates +* [kubeadm kubeconfig](/docs/reference/setup-tools/kubeadm/kubeadm-kubeconfig) to manage kubeconfig files * [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version) to print the kubeadm version * [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha) to preview a set of features made available for gathering feedback from the community diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md index ed03bf49c45d2..ff545b4042357 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md @@ -1,3 +1,16 @@ + + + +kubeadm: easily bootstrap a secure Kubernetes cluster ### Synopsis @@ -47,14 +60,14 @@ Example usage: -h, --help -help for kubeadm +

help for kubeadm

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md index 95b034be1bb44..af458320a5c90 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md @@ -1,3 +1,16 @@ + + + +Kubeadm experimental sub-commands ### Synopsis @@ -17,7 +30,7 @@ Kubeadm experimental sub-commands -h, --help -help for alpha +

help for alpha

@@ -38,7 +51,7 @@ Kubeadm experimental sub-commands --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md index 67f30bc3f8390..b678061bb0ea6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md @@ -1,3 +1,16 @@ + + + +Kubeconfig file utilities ### Synopsis @@ -19,7 +32,7 @@ Alpha Disclaimer: this command is currently alpha. -h, --help -help for kubeconfig +

help for kubeconfig

@@ -40,7 +53,7 @@ Alpha Disclaimer: this command is currently alpha. --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md index bdb246232f988..de07cd0f7d741 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md @@ -1,3 +1,16 @@ + + + +Output a kubeconfig file for an additional user ### Synopsis @@ -30,35 +43,35 @@ kubeadm alpha kubeconfig user [flags] --client-name string -The name of user. It will be used as the CN if client certificates are created +

The name of user. It will be used as the CN if client certificates are created

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for user +

help for user

---org stringSlice +--org strings -The orgnizations of the client certificate. It will be used as the O if client certificates are created +

The orgnizations of the client certificate. It will be used as the O if client certificates are created

--token string -The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates +

The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates

@@ -79,7 +92,7 @@ kubeadm alpha kubeconfig user [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet.md deleted file mode 100644 index 055c8ecac5ed7..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet.md +++ /dev/null @@ -1,48 +0,0 @@ - -### Synopsis - - -This command is not meant to be run on its own. See list of available subcommands. - -### Options - - ---- - - - - - - - - - - -
-h, --help
help for kubelet
- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config.md deleted file mode 100644 index 563d9fe2277da..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config.md +++ /dev/null @@ -1,48 +0,0 @@ - -### Synopsis - - -This command is not meant to be run on its own. See list of available subcommands. - -### Options - - ---- - - - - - - - - - - -
-h, --help
help for config
- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md deleted file mode 100644 index 278def1dd336b..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md +++ /dev/null @@ -1,87 +0,0 @@ - -### Synopsis - - -Enable or update dynamic kubelet configuration for a Node, against the kubelet-config-1.X ConfigMap in the cluster, where X is the minor version of the desired kubelet version. - -WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it may have surprising side-effects at this stage. - -Alpha Disclaimer: this command is currently alpha. - -``` -kubeadm alpha kubelet config enable-dynamic [flags] -``` - -### Examples - -``` - # Enable dynamic kubelet configuration for a Node. - kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version 1.18.0 - - WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it - may have surprising side-effects at this stage. -``` - -### Options - - ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-h, --help
help for enable-dynamic
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--kubelet-version string
The desired version for the kubelet
--node-name string
Name of the node that should enable the dynamic kubelet configuration
- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting.md deleted file mode 100644 index 77646b064b1ab..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting.md +++ /dev/null @@ -1,48 +0,0 @@ - -### Synopsis - - -This command is not meant to be run on its own. See list of available subcommands. - -### Options - - ---- - - - - - - - - - - -
-h, --help
help for selfhosting
- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md deleted file mode 100644 index 554b8fe4c65fd..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_selfhosting_pivot.md +++ /dev/null @@ -1,99 +0,0 @@ - -### Synopsis - - -Convert static Pod files for control plane components into self-hosted DaemonSets configured via the Kubernetes API. - -See the documentation for self-hosting limitations. - -Alpha Disclaimer: this command is currently alpha. - -``` -kubeadm alpha selfhosting pivot [flags] -``` - -### Examples - -``` - # Convert a static Pod-hosted control plane into a self-hosted one. - - kubeadm alpha phase self-hosting convert-from-staticpods -``` - -### Options - - ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--cert-dir string     Default: "/etc/kubernetes/pki"
The path where certificates are stored
--config string
Path to a kubeadm configuration file.
-f, --force
Pivot the cluster without prompting for confirmation
-h, --help
help for pivot
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
-s, --store-certs-in-secrets
Enable storing certs in secrets
- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md index fef772e702650..9458702330d38 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md @@ -1,3 +1,16 @@ + + + +Commands related to handling kubernetes certificates ### Synopsis @@ -17,7 +30,7 @@ Commands related to handling kubernetes certificates -h, --help -help for certs +

help for certs

@@ -38,7 +51,7 @@ Commands related to handling kubernetes certificates --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md index 2de0366641d70..3f978e50fd383 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md @@ -1,3 +1,16 @@ + + + +Generate certificate keys ### Synopsis @@ -27,7 +40,7 @@ kubeadm certs certificate-key [flags] -h, --help -help for certificate-key +

help for certificate-key

@@ -48,7 +61,7 @@ kubeadm certs certificate-key [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md index 50a3cb8bf29c0..e321a5a0294dc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md @@ -1,3 +1,16 @@ + + + +Check certificates expiration for a Kubernetes cluster ### Synopsis @@ -21,28 +34,28 @@ kubeadm certs check-expiration [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for check-expiration +

help for check-expiration

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -63,7 +76,7 @@ kubeadm certs check-expiration [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md index 81b248e4f0a6c..52d21a2cff107 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md @@ -1,3 +1,16 @@ + + + +Generate keys and certificate signing requests ### Synopsis @@ -32,28 +45,28 @@ kubeadm certs generate-csr [flags] --cert-dir string -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for generate-csr +

help for generate-csr

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

@@ -74,7 +87,7 @@ kubeadm certs generate-csr [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md index 8b627a595d28f..e728f9f06015c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md @@ -1,3 +1,16 @@ + + + +Renew certificates for a Kubernetes cluster ### Synopsis @@ -21,7 +34,7 @@ kubeadm certs renew [flags] -h, --help -help for renew +

help for renew

@@ -42,7 +55,7 @@ kubeadm certs renew [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md index 536164c45a7d7..2a81cee1d4072 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md @@ -1,3 +1,16 @@ + + + +Renew the certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew admin.conf [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for admin.conf +

help for admin.conf

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew admin.conf [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md index 13c12ed0d0071..b948adb65cde5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md @@ -1,3 +1,16 @@ + + + +Renew all available certificates ### Synopsis @@ -21,42 +34,42 @@ kubeadm certs renew all [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for all +

help for all

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -77,7 +90,7 @@ kubeadm certs renew all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md index fac6861a7c9dd..cb8fe0d5f7b88 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate the apiserver uses to access etcd ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew apiserver-etcd-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for apiserver-etcd-client +

help for apiserver-etcd-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew apiserver-etcd-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md index 030fb1425aeee..475e8c9f22e6e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for the API server to connect to kubelet ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew apiserver-kubelet-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for apiserver-kubelet-client +

help for apiserver-kubelet-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew apiserver-kubelet-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md index 8ab01efd89c7b..750df89d834d3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for serving the Kubernetes API ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew apiserver [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for apiserver +

help for apiserver

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew apiserver [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md index 10b44f7c3e8eb..b052fb3e543a0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md @@ -1,3 +1,16 @@ + + + +Renew the certificate embedded in the kubeconfig file for the controller manager to use ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew controller-manager.conf [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for controller-manager.conf +

help for controller-manager.conf

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew controller-manager.conf [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md index b9ddadd6f14af..252296e3950c4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for liveness probes to healthcheck etcd ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew etcd-healthcheck-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for etcd-healthcheck-client +

help for etcd-healthcheck-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew etcd-healthcheck-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md index 3b15fa02f0533..f25b86fa15f57 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for etcd nodes to communicate with each other ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew etcd-peer [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for etcd-peer +

help for etcd-peer

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew etcd-peer [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md index 82b9e43e34bc7..059d0d9bbb233 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for serving etcd ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew etcd-server [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for etcd-server +

help for etcd-server

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew etcd-server [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md index b1f3bc0c840fd..d93fca8d468b0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for the front proxy client ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew front-proxy-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for front-proxy-client +

help for front-proxy-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew front-proxy-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md index f26fbc22b16f1..5d7ade453b5c1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md @@ -1,3 +1,16 @@ + + + +Renew the certificate embedded in the kubeconfig file for the scheduler manager to use ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew scheduler.conf [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for scheduler.conf +

help for scheduler.conf

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew scheduler.conf [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md index f5a69d79fdacf..5fe7d65b403c9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md @@ -1,3 +1,16 @@ + + + +Output shell completion code for the specified shell (bash or zsh) ### Synopsis @@ -59,7 +72,7 @@ source <(kubeadm completion zsh) -h, --help -help for completion +

help for completion

@@ -80,7 +93,7 @@ source <(kubeadm completion zsh) --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md index b39cdd7a0d24a..50cb9f63b9e5d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md @@ -1,3 +1,16 @@ + + + +Manage configuration for a kubeadm cluster persisted in a ConfigMap in the cluster ### Synopsis @@ -26,14 +39,14 @@ kubeadm config [flags] -h, --help -help for config +

help for config

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -54,7 +67,7 @@ kubeadm config [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md index 436f3c3c7e303..0f85b4fbc2183 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md @@ -1,3 +1,16 @@ + + + +Interact with container images used by kubeadm ### Synopsis @@ -21,7 +34,7 @@ kubeadm config images [flags] -h, --help -help for images +

help for images

@@ -42,14 +55,14 @@ kubeadm config images [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md index 842fb2fe9280a..4634bd0a27c45 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md @@ -1,3 +1,16 @@ + + + +Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized ### Synopsis @@ -21,49 +34,56 @@ kubeadm config images list [flags] --allow-missing-template-keys     Default: true -If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. +

If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-o, --experimental-output string     Default: "text" -Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file. +

Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for list +

help for list

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

+ + + +--show-managed-fields + + +

If true, keep the managedFields when printing objects in JSON or YAML format.

@@ -84,14 +104,14 @@ kubeadm config images list [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md index d2f5961f85946..840072d167e03 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md @@ -1,3 +1,16 @@ + + + +Pull images used by kubeadm ### Synopsis @@ -21,42 +34,42 @@ kubeadm config images pull [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for pull +

help for pull

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -77,14 +90,14 @@ kubeadm config images pull [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md index d07ffe8677493..5858bdb307cdc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md @@ -1,3 +1,16 @@ + + + +Read an older version of the kubeadm configuration API types from a file, and output the similar config object for the newer version ### Synopsis @@ -34,21 +47,21 @@ kubeadm config migrate [flags] -h, --help -help for migrate +

help for migrate

--new-config string -Path to the resulting equivalent kubeadm config file using the new API version. Optional, if not specified output will be sent to STDOUT. +

Path to the resulting equivalent kubeadm config file using the new API version. Optional, if not specified output will be sent to STDOUT.

--old-config string -Path to the kubeadm config file that is using an old API version and should be converted. This flag is mandatory. +

Path to the kubeadm config file that is using an old API version and should be converted. This flag is mandatory.

@@ -69,14 +82,14 @@ kubeadm config migrate [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md index c6e1ea2173ed7..2f20d9d1ce4b1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md @@ -1,3 +1,16 @@ + + + +Print configuration ### Synopsis @@ -23,7 +36,7 @@ kubeadm config print [flags] -h, --help -help for print +

help for print

@@ -44,14 +57,14 @@ kubeadm config print [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md index adc76ee41cb7c..f8200dfd52836 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md @@ -1,3 +1,16 @@ + + + +Print default init configuration, that can be used for 'kubeadm init' ### Synopsis @@ -5,7 +18,7 @@ This command prints objects such as the default init configuration that is used for 'kubeadm init'. -Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" <nil> [] []} in order to pass validation but +Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like "abcdef.0123456789abcdef" in order to pass validation but not perform the real computation for creating a token. @@ -23,17 +36,17 @@ kubeadm config print init-defaults [flags] ---component-configs stringSlice +--component-configs strings -A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. +

A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.

-h, --help -help for init-defaults +

help for init-defaults

@@ -54,14 +67,14 @@ kubeadm config print init-defaults [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md index b1c976c663fa6..1c634871eb24a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md @@ -1,3 +1,16 @@ + + + +Print default join configuration, that can be used for 'kubeadm join' ### Synopsis @@ -5,7 +18,7 @@ This command prints objects such as the default join configuration that is used for 'kubeadm join'. -Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" <nil> [] []} in order to pass validation but +Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like "abcdef.0123456789abcdef" in order to pass validation but not perform the real computation for creating a token. @@ -23,17 +36,17 @@ kubeadm config print join-defaults [flags] ---component-configs stringSlice +--component-configs strings -A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. +

A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.

-h, --help -help for join-defaults +

help for join-defaults

@@ -54,14 +67,14 @@ kubeadm config print join-defaults [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md deleted file mode 100644 index c3a3137105dfc..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md +++ /dev/null @@ -1,63 +0,0 @@ - -### Synopsis - - - -Using this command, you can view the ConfigMap in the cluster where the configuration for kubeadm is located. - -The configuration is located in the "kube-system" namespace in the "kubeadm-config" ConfigMap. - - -``` -kubeadm config view [flags] -``` - -### Options - - ---- - - - - - - - - - - -
-h, --help
help for view
- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md index 49c7fd112ac86..4294cffe8b340 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md @@ -1,3 +1,16 @@ + + + +Run this command in order to set up the Kubernetes control plane ### Synopsis @@ -39,7 +52,7 @@ mark-control-plane Mark a node as a control-plane bootstrap-token Generates bootstrap tokens used to join a node to a cluster kubelet-finalize Updates settings relevant to the kubelet after TLS bootstrap /experimental-cert-rotation Enable kubelet client certificate rotation -addon Install required addons for passing Conformance tests +addon Install required addons for passing conformance tests /coredns Install the CoreDNS addon to a Kubernetes cluster /kube-proxy Install the kube-proxy addon to a Kubernetes cluster ``` @@ -62,175 +75,175 @@ kubeadm init [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

---apiserver-cert-extra-sans stringSlice +--apiserver-cert-extra-sans strings -Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. +

Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--certificate-key string -Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. +

Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--dry-run -Don't apply any changes; just output what would be done. +

Don't apply any changes; just output what would be done.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for init +

help for init

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--node-name string -Specify the node name. +

Specify the node name.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

--skip-certificate-key-print -Don't print the key used to encrypt the control-plane certificates. +

Don't print the key used to encrypt the control-plane certificates.

---skip-phases stringSlice +--skip-phases strings -List of phases to be skipped +

List of phases to be skipped

--skip-token-print -Skip printing of the default bootstrap token generated by 'kubeadm init'. +

Skip printing of the default bootstrap token generated by 'kubeadm init'.

--token string -The token to use for establishing bidirectional trust between nodes and control-plane nodes. The format is [a-z0-9]{6}\.[a-z0-9]{16} - e.g. abcdef.0123456789abcdef +

The token to use for establishing bidirectional trust between nodes and control-plane nodes. The format is [a-z0-9]{6}.[a-z0-9]{16} - e.g. abcdef.0123456789abcdef

--token-ttl duration     Default: 24h0m0s -The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire +

The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire

--upload-certs -Upload control-plane certificates to the kubeadm-certs Secret. +

Upload control-plane certificates to the kubeadm-certs Secret.

@@ -251,7 +264,7 @@ kubeadm init [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md index 2db3ea5e54aee..48ccd99cd552a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the init workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the init workflow -h, --help -help for phase +

help for phase

@@ -38,7 +51,7 @@ Use this command to invoke single phase of the init workflow --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md index 67b9c3af7598a..64777661d03ae 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md @@ -1,3 +1,16 @@ + + + +Install required addons for passing conformance tests ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase addon [flags] -h, --help -help for addon +

help for addon

@@ -42,7 +55,7 @@ kubeadm init phase addon [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md index 103dd7e7c5e74..48ae42ca48495 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md @@ -1,3 +1,16 @@ + + + +Install all the addons ### Synopsis @@ -21,84 +34,84 @@ kubeadm init phase addon all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for all +

help for all

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -119,7 +132,7 @@ kubeadm init phase addon all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md index 3eebcb828bf60..68f0d0d0259b0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md @@ -1,3 +1,16 @@ + + + +Install the CoreDNS addon to a Kubernetes cluster ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase addon coredns [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for coredns +

help for coredns

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -91,7 +104,7 @@ kubeadm init phase addon coredns [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md index 78140e94e80ec..4dc9a18339ccc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md @@ -1,3 +1,16 @@ + + + +Install the kube-proxy addon to a Kubernetes cluster ### Synopsis @@ -21,63 +34,63 @@ kubeadm init phase addon kube-proxy [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for kube-proxy +

help for kube-proxy

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

@@ -98,7 +111,7 @@ kubeadm init phase addon kube-proxy [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md index 123ab38fdc843..652399d5cfd7d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md @@ -1,3 +1,16 @@ + + + +Generates bootstrap tokens used to join a node to a cluster ### Synopsis @@ -31,28 +44,28 @@ kubeadm init phase bootstrap-token [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for bootstrap-token +

help for bootstrap-token

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--skip-token-print -Skip printing of the default bootstrap token generated by 'kubeadm init'. +

Skip printing of the default bootstrap token generated by 'kubeadm init'.

@@ -73,7 +86,7 @@ kubeadm init phase bootstrap-token [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md index 28f5acc3e3427..c779b920e1d0a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md @@ -1,3 +1,16 @@ + + + +Certificate generation ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase certs [flags] -h, --help -help for certs +

help for certs

@@ -42,7 +55,7 @@ kubeadm init phase certs [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md index 7ac391c0784ff..7485310462329 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md @@ -1,3 +1,16 @@ + + + +Generate all certificates ### Synopsis @@ -21,63 +34,63 @@ kubeadm init phase certs all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

---apiserver-cert-extra-sans stringSlice +--apiserver-cert-extra-sans strings -Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. +

Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for all +

help for all

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -98,7 +111,7 @@ kubeadm init phase certs all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md index eef07b2afaa27..4c8bed971a32e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate the apiserver uses to access etcd ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs apiserver-etcd-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for apiserver-etcd-client +

help for apiserver-etcd-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs apiserver-etcd-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md index 1d03b13ef10cb..814a9c15ff4ad 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for the API server to connect to kubelet ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs apiserver-kubelet-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for apiserver-kubelet-client +

help for apiserver-kubelet-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs apiserver-kubelet-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md index 28f6cc6244b9f..fa2d46ab8eaba 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for serving the Kubernetes API ### Synopsis @@ -27,63 +40,63 @@ kubeadm init phase certs apiserver [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

---apiserver-cert-extra-sans stringSlice +--apiserver-cert-extra-sans strings -Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. +

Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for apiserver +

help for apiserver

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -104,7 +117,7 @@ kubeadm init phase certs apiserver [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md index 81ccc2cbc2e36..d12b74f19f8f1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md @@ -1,3 +1,16 @@ + + + +Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs ca [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for ca +

help for ca

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs ca [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md index 17066413ddd10..2cddb77aded75 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md @@ -1,3 +1,16 @@ + + + +Generate the self-signed CA to provision identities for etcd ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs etcd-ca [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-ca +

help for etcd-ca

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs etcd-ca [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md index 6ee2e7ea112a6..9876d5bce793b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for liveness probes to healthcheck etcd ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs etcd-healthcheck-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-healthcheck-client +

help for etcd-healthcheck-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs etcd-healthcheck-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md index a127d4095ab47..d86991f8f85d3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for etcd nodes to communicate with each other ### Synopsis @@ -27,28 +40,28 @@ kubeadm init phase certs etcd-peer [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-peer +

help for etcd-peer

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -69,7 +82,7 @@ kubeadm init phase certs etcd-peer [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md index 07a58373c151f..213cf22d2fe1a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for serving etcd ### Synopsis @@ -27,28 +40,28 @@ kubeadm init phase certs etcd-server [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-server +

help for etcd-server

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -69,7 +82,7 @@ kubeadm init phase certs etcd-server [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md index 4a05b78d776b6..c2d37be74fc91 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md @@ -1,3 +1,16 @@ + + + +Generate the self-signed CA to provision identities for front proxy ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs front-proxy-ca [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for front-proxy-ca +

help for front-proxy-ca

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs front-proxy-ca [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md index 8e2d76f4512fd..58a81fa7a286a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for the front proxy client ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs front-proxy-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for front-proxy-client +

help for front-proxy-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs front-proxy-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md index 8d36df6c52f0d..a3df321d886fc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md @@ -1,3 +1,16 @@ + + + +Generate a private key for signing service account tokens along with its public key ### Synopsis @@ -23,14 +36,14 @@ kubeadm init phase certs sa [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

-h, --help -help for sa +

help for sa

@@ -51,7 +64,7 @@ kubeadm init phase certs sa [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md index 2bed8442d3364..86ef35d14d131 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md @@ -1,3 +1,16 @@ + + + +Generate all static Pod manifest files necessary to establish the control plane ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase control-plane [flags] -h, --help -help for control-plane +

help for control-plane

@@ -42,7 +55,7 @@ kubeadm init phase control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md index e03cdb6274d80..daad2e9a39f4d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md @@ -1,3 +1,16 @@ + + + +Generate all static Pod manifest files ### Synopsis @@ -32,105 +45,105 @@ kubeadm init phase control-plane all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

---apiserver-extra-args mapStringString +--apiserver-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the API Server or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the API Server or override default ones in form of =

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

---controller-manager-extra-args mapStringString +--controller-manager-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Controller Manager or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Controller Manager or override default ones in form of =

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for all +

help for all

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

---scheduler-extra-args mapStringString +--scheduler-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Scheduler or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Scheduler or override default ones in form of =

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

@@ -151,7 +164,7 @@ kubeadm init phase control-plane all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md index 50aef041fd535..f95da1c6d2b1b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md @@ -1,3 +1,16 @@ + + + +Generates the kube-apiserver static Pod manifest ### Synopsis @@ -21,84 +34,84 @@ kubeadm init phase control-plane apiserver [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

---apiserver-extra-args mapStringString +--apiserver-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the API Server or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the API Server or override default ones in form of =

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for apiserver +

help for apiserver

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

@@ -119,7 +132,7 @@ kubeadm init phase control-plane apiserver [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md index c1f0989cb6606..0931956c54021 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md @@ -1,3 +1,16 @@ + + + +Generates the kube-controller-manager static Pod manifest ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase control-plane controller-manager [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

---controller-manager-extra-args mapStringString +--controller-manager-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Controller Manager or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Controller Manager or override default ones in form of =

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for controller-manager +

help for controller-manager

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

@@ -91,7 +104,7 @@ kubeadm init phase control-plane controller-manager [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md index ce2f366b13345..5fe483282a26a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md @@ -1,3 +1,16 @@ + + + +Generates the kube-scheduler static Pod manifest ### Synopsis @@ -21,49 +34,49 @@ kubeadm init phase control-plane scheduler [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for scheduler +

help for scheduler

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

---scheduler-extra-args mapStringString +--scheduler-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Scheduler or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Scheduler or override default ones in form of =

@@ -84,7 +97,7 @@ kubeadm init phase control-plane scheduler [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md index dc5227a34ca5f..be2aef8c3bb94 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md @@ -1,3 +1,16 @@ + + + +Generate static Pod manifest file for local etcd ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase etcd [flags] -h, --help -help for etcd +

help for etcd

@@ -42,7 +55,7 @@ kubeadm init phase etcd [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md index 0e4cb7181eb11..1e4e8fa22f164 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md @@ -1,3 +1,16 @@ + + + +Generate the static Pod manifest file for a local, single-node local etcd instance ### Synopsis @@ -33,35 +46,35 @@ kubeadm init phase etcd local [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for local +

help for local

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

@@ -82,7 +95,7 @@ kubeadm init phase etcd local [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md index b3a200a22877b..da4fde7ebcf1d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md @@ -1,3 +1,16 @@ + + + +Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase kubeconfig [flags] -h, --help -help for kubeconfig +

help for kubeconfig

@@ -42,7 +55,7 @@ kubeadm init phase kubeconfig [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md index 85885559f761b..a664e126ffedb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the admin to use and for kubeadm itself ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase kubeconfig admin [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for admin +

help for admin

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -91,7 +104,7 @@ kubeadm init phase kubeconfig admin [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md index 9296e84a199cd..f1ebdbcf12afd 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md @@ -1,3 +1,16 @@ + + + +Generate all kubeconfig files ### Synopsis @@ -21,63 +34,63 @@ kubeadm init phase kubeconfig all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for all +

help for all

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--node-name string -Specify the node name. +

Specify the node name.

@@ -98,7 +111,7 @@ kubeadm init phase kubeconfig all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md index 295d7e57dc819..c49ab4b6c4be7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the controller manager to use ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase kubeconfig controller-manager [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for controller-manager +

help for controller-manager

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -91,7 +104,7 @@ kubeadm init phase kubeconfig controller-manager [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md index 9fd3145290273..fd141ea0fc833 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes ### Synopsis @@ -23,63 +36,63 @@ kubeadm init phase kubeconfig kubelet [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for kubelet +

help for kubelet

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--node-name string -Specify the node name. +

Specify the node name.

@@ -100,7 +113,7 @@ kubeadm init phase kubeconfig kubelet [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md index c608732717d23..9618c2d874b45 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the scheduler to use ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase kubeconfig scheduler [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for scheduler +

help for scheduler

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -91,7 +104,7 @@ kubeadm init phase kubeconfig scheduler [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md index 4e5febf638fdf..d2eb7f01257d4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md @@ -1,3 +1,16 @@ + + + +Updates settings relevant to the kubelet after TLS bootstrap ### Synopsis @@ -28,7 +41,7 @@ kubeadm init phase kubelet-finalize [flags] -h, --help -help for kubelet-finalize +

help for kubelet-finalize

@@ -49,7 +62,7 @@ kubeadm init phase kubelet-finalize [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md index fce712fc45cf9..70e4c634b027c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md @@ -1,3 +1,16 @@ + + + +Run all kubelet-finalize phases ### Synopsis @@ -28,21 +41,21 @@ kubeadm init phase kubelet-finalize all [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for all +

help for all

@@ -63,7 +76,7 @@ kubeadm init phase kubelet-finalize all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md index 2ace62929bb8e..6ce904cc2ba90 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md @@ -1,3 +1,16 @@ + + + +Enable kubelet client certificate rotation ### Synopsis @@ -21,21 +34,21 @@ kubeadm init phase kubelet-finalize experimental-cert-rotation [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for experimental-cert-rotation +

help for experimental-cert-rotation

@@ -56,7 +69,7 @@ kubeadm init phase kubelet-finalize experimental-cert-rotation [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md index f9898b58e0efa..11d2407499e86 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md @@ -1,3 +1,16 @@ + + + +Write kubelet settings and (re)start the kubelet ### Synopsis @@ -28,28 +41,28 @@ kubeadm init phase kubelet-start [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

-h, --help -help for kubelet-start +

help for kubelet-start

--node-name string -Specify the node name. +

Specify the node name.

@@ -70,7 +83,7 @@ kubeadm init phase kubelet-start [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md index 453783db52c13..6ba7e9047932c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md @@ -1,3 +1,16 @@ + + + +Mark a node as a control-plane ### Synopsis @@ -12,7 +25,7 @@ kubeadm init phase mark-control-plane [flags] ``` # Applies control-plane label and taint to the current node, functionally equivalent to what executed by kubeadm init. - kubeadm init phase mark-control-plane --config config.yml + kubeadm init phase mark-control-plane --config config.yaml # Applies control-plane label and taint to a specific node kubeadm init phase mark-control-plane --node-name myNode @@ -31,21 +44,21 @@ kubeadm init phase mark-control-plane [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for mark-control-plane +

help for mark-control-plane

--node-name string -Specify the node name. +

Specify the node name.

@@ -66,7 +79,7 @@ kubeadm init phase mark-control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md index 06d47e861c323..345621f7030a9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run pre-flight checks ### Synopsis @@ -12,7 +25,7 @@ kubeadm init phase preflight [flags] ``` # Run pre-flight checks for kubeadm init using a config file. - kubeadm init phase preflight --config kubeadm-config.yml + kubeadm init phase preflight --config kubeadm-config.yaml ``` ### Options @@ -28,21 +41,21 @@ kubeadm init phase preflight [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for preflight +

help for preflight

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

@@ -63,7 +76,7 @@ kubeadm init phase preflight [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md index 404f62d725778..515060a76c7bb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md @@ -1,3 +1,16 @@ + + + +Upload certificates to kubeadm-certs ### Synopsis @@ -21,42 +34,42 @@ kubeadm init phase upload-certs [flags] --certificate-key string -Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. +

Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for upload-certs +

help for upload-certs

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--skip-certificate-key-print -Don't print the key used to encrypt the control-plane certificates. +

Don't print the key used to encrypt the control-plane certificates.

--upload-certs -Upload control-plane certificates to the kubeadm-certs Secret. +

Upload control-plane certificates to the kubeadm-certs Secret.

@@ -77,7 +90,7 @@ kubeadm init phase upload-certs [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md index c1b5c960921ae..7d007e7b5640e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md @@ -1,3 +1,16 @@ + + + +Upload the kubeadm and kubelet configuration to a ConfigMap ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase upload-config [flags] -h, --help -help for upload-config +

help for upload-config

@@ -42,7 +55,7 @@ kubeadm init phase upload-config [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md index 6370094df9b04..3c087368a77e3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md @@ -1,3 +1,16 @@ + + + +Upload all configuration to a config map ### Synopsis @@ -21,21 +34,21 @@ kubeadm init phase upload-config all [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for all +

help for all

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -56,7 +69,7 @@ kubeadm init phase upload-config all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md index 030595466be3e..13e561f486287 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md @@ -1,3 +1,16 @@ + + + +Upload the kubeadm ClusterConfiguration to a ConfigMap ### Synopsis @@ -30,21 +43,21 @@ kubeadm init phase upload-config kubeadm [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for kubeadm +

help for kubeadm

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -65,7 +78,7 @@ kubeadm init phase upload-config kubeadm [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md index bd334e091c35c..ba27f728cbf80 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md @@ -1,3 +1,16 @@ + + + +Upload the kubelet component config to a ConfigMap ### Synopsis @@ -28,21 +41,21 @@ kubeadm init phase upload-config kubelet [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for kubelet +

help for kubelet

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -63,7 +76,7 @@ kubeadm init phase upload-config kubelet [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md index 3a6f7299d6bea..ae528a44df34f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md @@ -1,3 +1,16 @@ + + + +Run this on any machine you wish to join an existing cluster ### Synopsis @@ -78,119 +91,119 @@ kubeadm join [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for join +

help for join

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--node-name string -Specify the node name. +

Specify the node name.

---skip-phases stringSlice +--skip-phases strings -List of phases to be skipped +

List of phases to be skipped

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -211,7 +224,7 @@ kubeadm join [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md index 873f64aa163f6..b780de18ccdce 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the join workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the join workflow -h, --help -help for phase +

help for phase

@@ -38,7 +51,7 @@ Use this command to invoke single phase of the join workflow --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md index 20170c783c7ae..07768a16c6efb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md @@ -1,3 +1,16 @@ + + + +Join a machine as a control plane instance ### Synopsis @@ -28,7 +41,7 @@ kubeadm join phase control-plane-join [flags] -h, --help -help for control-plane-join +

help for control-plane-join

@@ -49,7 +62,7 @@ kubeadm join phase control-plane-join [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md index 9515d0dfe7bbc..ed1753457a57c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md @@ -1,3 +1,16 @@ + + + +Join a machine as a control plane instance ### Synopsis @@ -21,35 +34,35 @@ kubeadm join phase control-plane-join all [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

-h, --help -help for all +

help for all

--node-name string -Specify the node name. +

Specify the node name.

@@ -70,7 +83,7 @@ kubeadm join phase control-plane-join all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md index 4618107dd2457..9990ce3dc115c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md @@ -1,3 +1,16 @@ + + + +Add a new local etcd member ### Synopsis @@ -21,42 +34,42 @@ kubeadm join phase control-plane-join etcd [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for etcd +

help for etcd

--node-name string -Specify the node name. +

Specify the node name.

@@ -77,7 +90,7 @@ kubeadm join phase control-plane-join etcd [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md index 37bd9675b8802..9e2d117ed9e27 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md @@ -1,3 +1,16 @@ + + + +Mark a node as a control-plane ### Synopsis @@ -21,28 +34,28 @@ kubeadm join phase control-plane-join mark-control-plane [flags] --config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

-h, --help -help for mark-control-plane +

help for mark-control-plane

--node-name string -Specify the node name. +

Specify the node name.

@@ -63,7 +76,7 @@ kubeadm join phase control-plane-join mark-control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md index 258210f3032a7..10127f967f8da 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md @@ -1,3 +1,16 @@ + + + +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap ### Synopsis @@ -21,35 +34,35 @@ kubeadm join phase control-plane-join update-status [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

-h, --help -help for update-status +

help for update-status

--node-name string -Specify the node name. +

Specify the node name.

@@ -70,7 +83,7 @@ kubeadm join phase control-plane-join update-status [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md index 81a88bdaa5a5a..6952dbca80ca4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md @@ -1,3 +1,16 @@ + + + +Prepare the machine for serving a control plane ### Synopsis @@ -28,7 +41,7 @@ kubeadm join phase control-plane-prepare [flags] -h, --help -help for control-plane-prepare +

help for control-plane-prepare

@@ -49,7 +62,7 @@ kubeadm join phase control-plane-prepare [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md index 932ef5f27b911..cfc54c9bb4e98 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md @@ -1,3 +1,16 @@ + + + +Prepare the machine for serving a control plane ### Synopsis @@ -21,98 +34,98 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for all +

help for all

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -133,7 +146,7 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md index c8d59d58eb032..d26c5e1adb6e8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md @@ -1,3 +1,16 @@ + + + +Generate the certificates for the new control plane components ### Synopsis @@ -21,77 +34,77 @@ kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for certs +

help for certs

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -112,7 +125,7 @@ kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md index 3e9a120c000ad..820f499c41968 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md @@ -1,3 +1,16 @@ + + + +Generate the manifests for the new control plane components ### Synopsis @@ -21,42 +34,42 @@ kubeadm join phase control-plane-prepare control-plane [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for control-plane +

help for control-plane

@@ -77,7 +90,7 @@ kubeadm join phase control-plane-prepare control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md index 26e65cce87db4..e45e23cf7d674 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md @@ -1,3 +1,16 @@ + + + +[EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret ### Synopsis @@ -21,70 +34,70 @@ kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [f --certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for download-certs +

help for download-certs

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -105,7 +118,7 @@ kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [f --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md index 722ec2263d9e5..995c6290c4de0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md @@ -1,3 +1,16 @@ + + + +Generate the kubeconfig for the new control plane components ### Synopsis @@ -21,70 +34,70 @@ kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags --certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for kubeconfig +

help for kubeconfig

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -105,7 +118,7 @@ kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md index 719700b9a04ba..9c1cef31c0f1b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md @@ -1,3 +1,16 @@ + + + +Write kubelet settings, certificates and (re)start the kubelet ### Synopsis @@ -21,70 +34,70 @@ kubeadm join phase kubelet-start [api-server-endpoint] [flags] --config string -Path to kubeadm config file. +

Path to kubeadm config file.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for kubelet-start +

help for kubelet-start

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -105,7 +118,7 @@ kubeadm join phase kubelet-start [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md index ca975f9d9204b..5d8e10522bf28 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run join pre-flight checks ### Synopsis @@ -12,7 +25,7 @@ kubeadm join phase preflight [api-server-endpoint] [flags] ``` # Run join pre-flight checks using a config file. - kubeadm join phase preflight --config kubeadm-config.yml + kubeadm join phase preflight --config kubeadm-config.yaml ``` ### Options @@ -28,105 +41,105 @@ kubeadm join phase preflight [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for preflight +

help for preflight

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -147,7 +160,7 @@ kubeadm join phase preflight [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md new file mode 100644 index 0000000000000..b678061bb0ea6 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md @@ -0,0 +1,63 @@ + + + +Kubeconfig file utilities + +### Synopsis + + +Kubeconfig file utilities. + +Alpha Disclaimer: this command is currently alpha. + +### Options + + ++++ + + + + + + + + + + +
-h, --help

help for kubeconfig

+ + + +### Options inherited from parent commands + + ++++ + + + + + + + + + + +
--rootfs string

[EXPERIMENTAL] The path to the 'real' host root filesystem.

+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md new file mode 100644 index 0000000000000..8293ee2f27ec2 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md @@ -0,0 +1,102 @@ + + + +Output a kubeconfig file for an additional user + +### Synopsis + + +Output a kubeconfig file for an additional user. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm kubeconfig user [flags] +``` + +### Examples + +``` + # Output a kubeconfig file for an additional user named foo using a kubeadm config file bar + kubeadm alpha kubeconfig user --client-name=foo --config=bar +``` + +### Options + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--client-name string

The name of user. It will be used as the CN if client certificates are created

--config string

Path to a kubeadm configuration file.

-h, --help

help for user

--org strings

The orgnizations of the client certificate. It will be used as the O if client certificates are created

--token string

The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates

+ + + +### Options inherited from parent commands + + ++++ + + + + + + + + + + +
--rootfs string

[EXPERIMENTAL] The path to the 'real' host root filesystem.

+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md index 4cfa48be372d6..a745cb8c9e31c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md @@ -1,3 +1,16 @@ + + + +Performs a best effort revert of changes made to this host by 'kubeadm init' or 'kubeadm join' ### Synopsis @@ -30,49 +43,49 @@ kubeadm reset [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path to the directory where the certificates are stored. If specified, clean this directory. +

The path to the directory where the certificates are stored. If specified, clean this directory.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

-f, --force -Reset the node without prompting for confirmation. +

Reset the node without prompting for confirmation.

-h, --help -help for reset +

help for reset

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

---skip-phases stringSlice +--skip-phases strings -List of phases to be skipped +

List of phases to be skipped

@@ -93,7 +106,7 @@ kubeadm reset [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md index 498621b95d8f6..e526dafa1fb35 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the reset workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the reset workflow -h, --help -help for phase +

help for phase

@@ -38,7 +51,7 @@ Use this command to invoke single phase of the reset workflow --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md index 84376e67b2d1c..ceabd2045e96a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md @@ -1,3 +1,16 @@ + + + +Run cleanup node. ### Synopsis @@ -21,21 +34,21 @@ kubeadm reset phase cleanup-node [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path to the directory where the certificates are stored. If specified, clean this directory. +

The path to the directory where the certificates are stored. If specified, clean this directory.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

-h, --help -help for cleanup-node +

help for cleanup-node

@@ -56,7 +69,7 @@ kubeadm reset phase cleanup-node [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md index 8f3537bc7c347..b3d1502184650 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run reset pre-flight checks ### Synopsis @@ -21,21 +34,21 @@ kubeadm reset phase preflight [flags] -f, --force -Reset the node without prompting for confirmation. +

Reset the node without prompting for confirmation.

-h, --help -help for preflight +

help for preflight

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

@@ -56,7 +69,7 @@ kubeadm reset phase preflight [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md index c7350d27ca463..d2c1060ff4ac2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md @@ -1,3 +1,16 @@ + + + +Remove a local etcd member. ### Synopsis @@ -21,14 +34,14 @@ kubeadm reset phase remove-etcd-member [flags] -h, --help -help for remove-etcd-member +

help for remove-etcd-member

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -49,7 +62,7 @@ kubeadm reset phase remove-etcd-member [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md index de4700032bf84..b73f736958b1a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md @@ -1,3 +1,16 @@ + + + +Remove this node from the ClusterStatus object. ### Synopsis @@ -21,7 +34,7 @@ kubeadm reset phase update-cluster-status [flags] -h, --help -help for update-cluster-status +

help for update-cluster-status

@@ -42,7 +55,7 @@ kubeadm reset phase update-cluster-status [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md index 2662497699d8c..5384fc4d6cce2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md @@ -1,3 +1,16 @@ + + + +Manage bootstrap tokens ### Synopsis @@ -38,21 +51,21 @@ kubeadm token [flags] --dry-run -Whether to enable dry-run mode or not +

Whether to enable dry-run mode or not

-h, --help -help for token +

help for token

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -73,7 +86,7 @@ kubeadm token [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md index b2212bba44dc5..a2a217033c88b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md @@ -1,3 +1,16 @@ + + + +Create bootstrap tokens on the server ### Synopsis @@ -28,56 +41,56 @@ kubeadm token create [token] --certificate-key string -When used together with '--print-join-command', print the full 'kubeadm join' flag needed to join the cluster as a control-plane. To create a new certificate key you must use 'kubeadm init phase upload-certs --upload-certs'. +

When used together with '--print-join-command', print the full 'kubeadm join' flag needed to join the cluster as a control-plane. To create a new certificate key you must use 'kubeadm init phase upload-certs --upload-certs'.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--description string -A human friendly description of how this token is used. +

A human friendly description of how this token is used.

---groups stringSlice     Default: [system:bootstrappers:kubeadm:default-node-token] +--groups strings     Default: "system:bootstrappers:kubeadm:default-node-token" -Extra groups that this token will authenticate as when used for authentication. Must match "\\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\\z" +

Extra groups that this token will authenticate as when used for authentication. Must match "\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\z"

-h, --help -help for create +

help for create

--print-join-command -Instead of printing only the token, print the full 'kubeadm join' flag needed to join the cluster using the token. +

Instead of printing only the token, print the full 'kubeadm join' flag needed to join the cluster using the token.

--ttl duration     Default: 24h0m0s -The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire +

The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire

---usages stringSlice     Default: [signing,authentication] +--usages strings     Default: "signing,authentication" -Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [signing,authentication] +

Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [signing,authentication]

@@ -98,21 +111,21 @@ kubeadm token create [token] --dry-run -Whether to enable dry-run mode or not +

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md index d1ddd8bd2c542..2040bd3f94ac1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md @@ -1,3 +1,16 @@ + + + +Delete bootstrap tokens on the server ### Synopsis @@ -26,7 +39,7 @@ kubeadm token delete [token-value] ... -h, --help -help for delete +

help for delete

@@ -47,21 +60,21 @@ kubeadm token delete [token-value] ... --dry-run -Whether to enable dry-run mode or not +

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md index 72ca0220ee46d..60de389d6c07f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md @@ -1,3 +1,16 @@ + + + +Generate and print a bootstrap token, but do not create it on the server ### Synopsis @@ -31,7 +44,7 @@ kubeadm token generate [flags] -h, --help -help for generate +

help for generate

@@ -52,21 +65,21 @@ kubeadm token generate [flags] --dry-run -Whether to enable dry-run mode or not +

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md index 20ba81f63fe07..089424492e90d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md @@ -1,3 +1,16 @@ + + + +List bootstrap tokens on the server ### Synopsis @@ -23,21 +36,28 @@ kubeadm token list [flags] --allow-missing-template-keys     Default: true -If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. +

If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.

-o, --experimental-output string     Default: "text" -Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file. +

Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file.

-h, --help -help for list +

help for list

+ + + +--show-managed-fields + + +

If true, keep the managedFields when printing objects in JSON or YAML format.

@@ -58,21 +78,21 @@ kubeadm token list [flags] --dry-run -Whether to enable dry-run mode or not +

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md index b3fe44532beba..0c2a46519454b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md @@ -1,3 +1,16 @@ + + + +Upgrade your cluster smoothly to a newer version with this command ### Synopsis @@ -21,7 +34,7 @@ kubeadm upgrade [flags] -h, --help -help for upgrade +

help for upgrade

@@ -42,7 +55,7 @@ kubeadm upgrade [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md index ed6896b0a779e..d34e01da47e54 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md @@ -1,3 +1,16 @@ + + + +Upgrade your Kubernetes cluster to the specified version ### Synopsis @@ -21,98 +34,98 @@ kubeadm upgrade apply [version] --allow-experimental-upgrades -Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes. +

Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.

--allow-release-candidate-upgrades -Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes. +

Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.

--certificate-renewal     Default: true -Perform the renewal of certificates used by component changed during upgrades. +

Perform the renewal of certificates used by component changed during upgrades.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--dry-run -Do not change any state, just output what actions would be performed. +

Do not change any state, just output what actions would be performed.

--etcd-upgrade     Default: true -Perform the upgrade of etcd. +

Perform the upgrade of etcd.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-f, --force -Force upgrading although some requirements might not be met. This also implies non-interactive mode. +

Force upgrading although some requirements might not be met. This also implies non-interactive mode.

-h, --help -help for apply +

help for apply

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--print-config -Specifies whether the configuration file that will be used in the upgrade should be printed or not. +

Specifies whether the configuration file that will be used in the upgrade should be printed or not.

-y, --yes -Perform the upgrade and do not prompt for confirmation (non-interactive mode). +

Perform the upgrade and do not prompt for confirmation (non-interactive mode).

@@ -133,7 +146,7 @@ kubeadm upgrade apply [version] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md index c15b1180752d5..eb5e3c4cace98 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md @@ -1,3 +1,16 @@ + + + +Show what differences would be applied to existing static pod manifests. See also: kubeadm upgrade apply --dry-run ### Synopsis @@ -21,49 +34,49 @@ kubeadm upgrade diff [version] [flags] --api-server-manifest string     Default: "/etc/kubernetes/manifests/kube-apiserver.yaml" -path to API server manifest +

path to API server manifest

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-c, --context-lines int     Default: 3 -How many lines of context in the diff +

How many lines of context in the diff

--controller-manager-manifest string     Default: "/etc/kubernetes/manifests/kube-controller-manager.yaml" -path to controller manifest +

path to controller manifest

-h, --help -help for diff +

help for diff

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--scheduler-manifest string     Default: "/etc/kubernetes/manifests/kube-scheduler.yaml" -path to scheduler manifest +

path to scheduler manifest

@@ -84,7 +97,7 @@ kubeadm upgrade diff [version] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md index b9198f77d79da..5bd05a9822e99 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md @@ -1,3 +1,16 @@ + + + +Upgrade commands for a node in the cluster ### Synopsis @@ -29,56 +42,56 @@ kubeadm upgrade node [flags] --certificate-renewal     Default: true -Perform the renewal of certificates used by component changed during upgrades. +

Perform the renewal of certificates used by component changed during upgrades.

--dry-run -Do not change any state, just output the actions that would be performed. +

Do not change any state, just output the actions that would be performed.

--etcd-upgrade     Default: true -Perform the upgrade of etcd. +

Perform the upgrade of etcd.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for node +

help for node

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

---skip-phases stringSlice +--skip-phases strings -List of phases to be skipped +

List of phases to be skipped

@@ -99,7 +112,7 @@ kubeadm upgrade node [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md index 39a2e05ab0aef..6b86c950548ec 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the node workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the node workflow -h, --help -help for phase +

help for phase

@@ -38,7 +51,7 @@ Use this command to invoke single phase of the node workflow --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md index 1ca65f50a70d2..835eba68426fc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md @@ -1,3 +1,16 @@ + + + +Upgrade the control plane instance deployed on this node, if any ### Synopsis @@ -21,42 +34,42 @@ kubeadm upgrade node phase control-plane [flags] --certificate-renewal     Default: true -Perform the renewal of certificates used by component changed during upgrades. +

Perform the renewal of certificates used by component changed during upgrades.

--dry-run -Do not change any state, just output the actions that would be performed. +

Do not change any state, just output the actions that would be performed.

--etcd-upgrade     Default: true -Perform the upgrade of etcd. +

Perform the upgrade of etcd.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for control-plane +

help for control-plane

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -77,7 +90,7 @@ kubeadm upgrade node phase control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md index a4f5ceeafb7ca..d2b03974c2042 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md @@ -1,3 +1,16 @@ + + + +Upgrade the kubelet configuration for this node ### Synopsis @@ -21,21 +34,21 @@ kubeadm upgrade node phase kubelet-config [flags] --dry-run -Do not change any state, just output the actions that would be performed. +

Do not change any state, just output the actions that would be performed.

-h, --help -help for kubelet-config +

help for kubelet-config

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -56,7 +69,7 @@ kubeadm upgrade node phase kubelet-config [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md index 943e05cb6e498..d82a193898a21 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run upgrade node pre-flight checks ### Synopsis @@ -21,14 +34,14 @@ kubeadm upgrade node phase preflight [flags] -h, --help -help for preflight +

help for preflight

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

@@ -49,7 +62,7 @@ kubeadm upgrade node phase preflight [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md index eaa58b588f9b8..7d16866b9af27 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md @@ -1,3 +1,16 @@ + + + +Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter ### Synopsis @@ -21,56 +34,56 @@ kubeadm upgrade plan [version] [flags] --allow-experimental-upgrades -Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes. +

Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.

--allow-release-candidate-upgrades -Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes. +

Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for plan +

help for plan

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--print-config -Specifies whether the configuration file that will be used in the upgrade should be printed or not. +

Specifies whether the configuration file that will be used in the upgrade should be printed or not.

@@ -91,7 +104,7 @@ kubeadm upgrade plan [version] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md index 658075c4eacbe..b86c7259774d3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md @@ -1,3 +1,16 @@ + + + +Print the version of kubeadm ### Synopsis @@ -21,14 +34,14 @@ kubeadm version [flags] -h, --help -help for version +

help for version

-o, --output string -Output format; available options are 'yaml', 'json' and 'short' +

Output format; available options are 'yaml', 'json' and 'short'

@@ -49,7 +62,7 @@ kubeadm version [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md index 6da8963f17f99..cc7cef2543b4f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -28,7 +28,7 @@ The cluster that `kubeadm init` and `kubeadm join` set up should be: - lock-down the kubelet API - locking down access to the API for system components like the kube-proxy and CoreDNS - locking down what a Bootstrap Token can access - - **Easy to use**: The user should not have to run anything more than a couple of commands: + - **User-friendly**: The user should not have to run anything more than a couple of commands: - `kubeadm init` - `export KUBECONFIG=/etc/kubernetes/admin.conf` - `kubectl apply -f ` @@ -251,7 +251,7 @@ Other API server flags that are set unconditionally are: - `--requestheader-client-ca-file` to`front-proxy-ca.crt` - `--proxy-client-cert-file` to `front-proxy-client.crt` - `--proxy-client-key-file` to `front-proxy-client.key` - - Other flags for securing the front proxy ([API Aggregation](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/aggregated-api-servers.md)) communications: + - Other flags for securing the front proxy ([API Aggregation](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)) communications: - `--requestheader-username-headers=X-Remote-User` - `--requestheader-group-headers=X-Remote-Group` - `--requestheader-extra-headers-prefix=X-Remote-Extra-` @@ -305,7 +305,7 @@ into `/var/lib/kubelet/config/init/kubelet` file. The init configuration is used for starting the kubelet on this specific node, providing an alternative for the kubelet drop-in file; such configuration will be replaced by the kubelet base configuration as described in following steps. -See [set Kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file) for additional info. +See [set kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file) for additional information. Please note that: @@ -315,6 +315,9 @@ Please note that: a configuration file `--config some-file.yaml`. The `KubeletConfiguration` object can be separated from other objects such as `InitConfiguration` using the `---` separator. For more details have a look at the `kubeadm config print-default` command. +For more details about the `KubeletConfiguration` struct, take a look at the +[`KubeletConfiguration` reference](/docs/reference/config-api/kubelet-config.v1beta1/). + ### Wait for the control plane to come up kubeadm waits (upto 4m0s) until `localhost:6443/healthz` (kube-apiserver liveness) returns `ok`. However in order to detect @@ -325,7 +328,8 @@ kubeadm relies on the kubelet to pull the control plane images and run them prop After the control plane is up, kubeadm completes the tasks described in following paragraphs. ### (optional) Write base kubelet configuration -{{< feature-state for_k8s_version="v1.9" state="alpha" >}} + +{{< feature-state for_k8s_version="v1.11" state="beta" >}} If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig`: @@ -436,12 +440,14 @@ A ServiceAccount for `kube-proxy` is created in the `kube-system` namespace; the #### DNS -- In Kubernetes version 1.18 kube-dns usage with kubeadm is deprecated and will be removed in a future release - The CoreDNS service is named `kube-dns`. This is done to prevent any interruption -in service when the user is switching the cluster DNS from kube-dns to CoreDNS or vice-versa -the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) -- A ServiceAccount for CoreDNS/kube-dns is created in the `kube-system` namespace. -- The `kube-dns` ServiceAccount is bound to the privileges in the `system:kube-dns` ClusterRole + in service when the user is switching the cluster DNS from kube-dns to CoreDNS + the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon). +- A ServiceAccount for CoreDNS is created in the `kube-system` namespace. +- The `coredns` ServiceAccount is bound to the privileges in the `system:coredns` ClusterRole + +In Kubernetes version 1.21, support for using `kube-dns` with kubeadm was removed. +You can use CoreDNS with kubeadm even when the related Service is named `kube-dns`. ## kubeadm join phases internal design @@ -499,10 +505,9 @@ when the connection with the cluster is established, kubeadm try to access the ` ## TLS Bootstrap -Once the cluster info are known, the file `bootstrap-kubelet.conf` is written, thus allowing kubelet to do TLS Bootstrapping -(conversely until v.1.7 TLS bootstrapping were managed by kubeadm). +Once the cluster info are known, the file `bootstrap-kubelet.conf` is written, thus allowing kubelet to do TLS Bootstrapping. -The TLS bootstrap mechanism uses the shared token to temporarily authenticate with the Kubernetes Master to submit a certificate +The TLS bootstrap mechanism uses the shared token to temporarily authenticate with the Kubernetes API server to submit a certificate signing request (CSR) for a locally created key pair. The request is then automatically approved and the operation completes saving `ca.crt` file and `kubelet.conf` file to be used @@ -512,17 +517,17 @@ Please note that: - The temporary authentication is validated against the token saved during the `kubeadm init` process (or with additional tokens created with `kubeadm token`) -- The temporary authentication resolve to a user member of `system:bootstrappers:kubeadm:default-node-token` group which was granted +- The temporary authentication resolve to a user member of `system:bootstrappers:kubeadm:default-node-token` group which was granted access to CSR api during the `kubeadm init` process - The automatic CSR approval is managed by the csrapprover controller, according with configuration done the `kubeadm init` process ### (optional) Write init kubelet configuration -{{< feature-state for_k8s_version="v1.9" state="alpha" >}} +{{< feature-state for_k8s_version="v1.11" state="beta" >}} If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig`: -1. Read the kubelet base configuration from the `kubelet-base-config-v1.9` ConfigMap in the `kube-system` namespace using the +1. Read the kubelet base configuration from the `kubelet-base-config-v1.x` ConfigMap in the `kube-system` namespace using the Bootstrap Token credentials, and write it to disk as kubelet init configuration file `/var/lib/kubelet/config/init/kubelet` 2. As soon as kubelet starts with the Node's own credential (`/etc/kubernetes/kubelet.conf`), update current node configuration specifying that the source for the node/kubelet configuration is the above ConfigMap. diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md index eaef0f5140219..dd946c9737e33 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md @@ -9,34 +9,7 @@ weight: 90 from the community. Please try it out and give us feedback! {{< /caution >}} -## kubeadm alpha kubeconfig user {#cmd-phase-kubeconfig} - -The `user` subcommand can be used for the creation of kubeconfig files for additional users. - -{{< tabs name="tab-kubeconfig" >}} -{{< tab name="kubeconfig" include="generated/kubeadm_alpha_kubeconfig.md" />}} -{{< tab name="user" include="generated/kubeadm_alpha_kubeconfig_user.md" />}} -{{< /tabs >}} - -## kubeadm alpha kubelet config {#cmd-phase-kubelet} - -Use the following command to enable the DynamicKubeletConfiguration feature. - -{{< tabs name="tab-kubelet" >}} -{{< tab name="kubelet" include="generated/kubeadm_alpha_kubelet.md" />}} -{{< tab name="enable-dynamic" include="generated/kubeadm_alpha_kubelet_config_enable-dynamic.md" />}} -{{< /tabs >}} - -## kubeadm alpha selfhosting pivot {#cmd-selfhosting} - -The subcommand `pivot` can be used to convert a static Pod-hosted control plane into a self-hosted one. - -[Documentation](/docs/setup/production-environment/tools/kubeadm/self-hosting/) - -{{< tabs name="selfhosting" >}} -{{< tab name="selfhosting" include="generated/kubeadm_alpha_selfhosting.md" />}} -{{< tab name="pivot" include="generated/kubeadm_alpha_selfhosting_pivot.md" />}} -{{< /tabs >}} +Currently there are no experimental commands under `kubeadm alpha`. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md index 23dff658e9eee..0b373ee42341a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md @@ -20,14 +20,17 @@ For more information navigate to [Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) or [Using kubeadm join with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-join/#config-file). +You can also configure several kubelet-configuration options with `kubeadm init`. These options will be the same on any node in your cluster. +See [Configuring each kubelet in your cluster using kubeadm](/docs/setup/production-environment/tools/kubeadm/kubelet-integration/) for details. + In Kubernetes v1.13.0 and later to list/pull kube-dns images instead of the CoreDNS image the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) has to be used. -## kubeadm config view {#cmd-config-view} +## kubeadm config print {#cmd-config-view} -{{< include "generated/kubeadm_config_view.md" >}} +{{< include "generated/kubeadm_config_print.md" >}} ## kubeadm config print init-defaults {#cmd-config-print-init-defaults} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index 21ab7a863d73f..2b6939bac63e8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -143,25 +143,6 @@ install them selectively. {{< tab name="kube-proxy" include="generated/kubeadm_init_phase_addon_kube-proxy.md" />}} {{< /tabs >}} -To use kube-dns instead of CoreDNS you have to pass a configuration file: - -```bash -# for installing a DNS addon only -kubeadm init phase addon coredns --config=someconfig.yaml -``` - -The file has to contain a [`dns`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#DNS) field in[`ClusterConfiguration`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration) -and also a type for the addon - `kube-dns` (default value is `CoreDNS`). - -```yaml -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -dns: - type: "kube-dns" -``` - -Please note that kube-dns usage with kubeadm is deprecated as of v1.18 and will be removed in a future release. - For more details on each field in the `v1beta2` configuration you can navigate to our [API reference pages.] (https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 3d4b977102e93..2260c2ca225ed 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -66,12 +66,10 @@ following steps: 1. Installs a DNS server (CoreDNS) and the kube-proxy addon components via the API server. In Kubernetes version 1.11 and later CoreDNS is the default DNS server. - To install kube-dns instead of CoreDNS, the DNS addon has to be configured in the kubeadm `ClusterConfiguration`. - For more information about the configuration see the section `Using kubeadm init with a configuration file` below. Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. {{< warning >}} - kube-dns usage with kubeadm is deprecated as of v1.18 and will be removed in a future release. + kube-dns usage with kubeadm is deprecated as of v1.18 and is removed in v1.21. {{< /warning >}} ### Using init phases with kubeadm {#init-phases} @@ -125,12 +123,12 @@ If your configuration is not using the latest version it is **recommended** that the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. For more information on the fields and usage of the configuration you can navigate to our API reference -page and pick a version from [the list](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories). +page and pick a version from [the list](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#section-directories). ### Adding kube-proxy parameters {#kube-proxy} For information about kube-proxy parameters in the kubeadm configuration see: -- [kube-proxy](https://godoc.org/k8s.io/kubernetes/pkg/proxy/apis/config#KubeProxyConfiguration) +- [kube-proxy reference](/docs/reference/config-api/kube-proxy-config.v1alpha1/) For information about enabling IPVS mode with kubeadm see: - [IPVS](https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/ipvs/README.md) diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md index 0a39f709273f4..53ca4a789bdaa 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md @@ -108,7 +108,7 @@ if the `kubeadm init` command was called with `--upload-certs`. control-plane node even if other worker nodes or the network are compromised. - Convenient to execute manually since all of the information required fits - into a single `kubeadm join` command that is easy to copy and paste. + into a single `kubeadm join` command. **Disadvantages:** diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-kubeconfig.md new file mode 100644 index 0000000000000..f912285f7d485 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-kubeconfig.md @@ -0,0 +1,21 @@ +--- +title: kubeadm kubeconfig +content_type: concept +weight: 90 +--- + +`kubeadm kubeconfig` provides utilities for managing kubeconfig files. + +## kubeadm kubeconfig {#cmd-kubeconfig} + +{{< tabs name="tab-kubeconfig" >}} +{{< tab name="overview" include="generated/kubeadm_kubeconfig.md" />}} +{{< /tabs >}} + +## kubeadm kubeconfig user {#cmd-kubeconfig-user} + +This command can be used to output a kubeconfig file for an additional user. + +{{< tabs name="tab-kubeconfig-user" >}} +{{< tab name="user" include="generated/kubeadm_kubeconfig_user.md" />}} +{{< /tabs >}} \ No newline at end of file diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md index 5796e7aec736f..6a2c5f782f24f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md @@ -20,8 +20,6 @@ For older versions of kubeadm, please refer to older documentation sets of the K You can use `kubeadm upgrade diff` to see the changes that would be applied to static pod manifests. -To use kube-dns with upgrades in Kubernetes v1.13.0 and later please follow [this guide](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon). - In Kubernetes v1.15.0 and later, `kubeadm upgrade apply` and `kubeadm upgrade node` will also automatically renew the kubeadm managed certificates on this node, including those stored in kubeconfig files. To opt-out, it is possible to pass the flag `--certificate-renewal=false`. For more details about certificate diff --git a/content/en/docs/reference/tools.md b/content/en/docs/reference/tools/_index.md similarity index 70% rename from content/en/docs/reference/tools.md rename to content/en/docs/reference/tools/_index.md index ceeadbb27984c..7194ab83bd533 100644 --- a/content/en/docs/reference/tools.md +++ b/content/en/docs/reference/tools/_index.md @@ -1,8 +1,10 @@ --- +title: Other Tools reviewers: - janetkuo -title: Tools content_type: concept +weight: 80 +no_list: true --- @@ -10,18 +12,11 @@ Kubernetes contains several built-in tools to help you work with the Kubernetes -## Kubectl - -[`kubectl`](/docs/tasks/tools/install-kubectl/) is the command line tool for Kubernetes. It controls the Kubernetes cluster manager. - -## Kubeadm - -[`kubeadm`](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) is the command line tool for easily provisioning a secure Kubernetes cluster on top of physical or cloud servers or virtual machines (currently in alpha). ## Minikube -[`minikube`](https://minikube.sigs.k8s.io/docs/) is a tool that makes it -easy to run a single-node Kubernetes cluster locally on your workstation for +[`minikube`](https://minikube.sigs.k8s.io/docs/) is a tool that +runs a single-node Kubernetes cluster locally on your workstation for development and testing purposes. ## Dashboard @@ -51,4 +46,3 @@ Use Kompose to: * Translate a Docker Compose file into Kubernetes objects * Go from local Docker development to managing your application via Kubernetes * Convert v1 or v2 Docker Compose `yaml` files or [Distributed Application Bundles](https://docs.docker.com/compose/bundles/) - diff --git a/content/en/docs/reference/using-api/_index.md b/content/en/docs/reference/using-api/_index.md index df9e00758ef75..2039e33e286ba 100644 --- a/content/en/docs/reference/using-api/_index.md +++ b/content/en/docs/reference/using-api/_index.md @@ -1,11 +1,12 @@ --- -title: Kubernetes API Overview +title: API Overview reviewers: - erictune - lavalamp - jbeda content_type: concept weight: 10 +no_list: true card: name: reference weight: 50 diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index e8b834c334a08..e517a13d52959 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -258,7 +258,7 @@ Accept: application/json;as=Table;g=meta.k8s.io;v=v1beta1, application/json ## Alternate representations of resources -By default Kubernetes returns objects serialized to JSON with content type `application/json`. This is the default serialization format for the API. However, clients may request the more efficient Protobuf representation of these objects for better performance at scale. The Kubernetes API implements standard HTTP content type negotiation: passing an `Accept` header with a `GET` call will request that the server return objects in the provided content type, while sending an object in Protobuf to the server for a `PUT` or `POST` call takes the `Content-Type` header. The server will return a `Content-Type` header if the requested format is supported, or the `406 Not acceptable` error if an invalid content type is provided. +By default, Kubernetes returns objects serialized to JSON with content type `application/json`. This is the default serialization format for the API. However, clients may request the more efficient Protobuf representation of these objects for better performance at scale. The Kubernetes API implements standard HTTP content type negotiation: passing an `Accept` header with a `GET` call will request that the server return objects in the provided content type, while sending an object in Protobuf to the server for a `PUT` or `POST` call takes the `Content-Type` header. The server will return a `Content-Type` header if the requested format is supported, or the `406 Not acceptable` error if an invalid content type is provided. See the API documentation for a list of supported content types for each API. @@ -560,4 +560,4 @@ If you request a a resourceVersion outside the applicable limit then, depending ### Unavailable resource versions -Servers are not required to serve unrecognized resource versions. List and Get requests for unrecognized resource versions may wait briefly for the resource version to become available, should timeout with a `504 (Gateway Timeout)` if the provided resource versions does not become available in a resonable amount of time, and may respond with a `Retry-After` response header indicating how many seconds a client should wait before retrying the request. Currently the kube-apiserver also identifies these responses with a "Too large resource version" message. Watch requests for a unrecognized resource version may wait indefinitely (until the request timeout) for the resource version to become available. +Servers are not required to serve unrecognized resource versions. List and Get requests for unrecognized resource versions may wait briefly for the resource version to become available, should timeout with a `504 (Gateway Timeout)` if the provided resource versions does not become available in a reasonable amount of time, and may respond with a `Retry-After` response header indicating how many seconds a client should wait before retrying the request. Currently, the kube-apiserver also identifies these responses with a "Too large resource version" message. Watch requests for an unrecognized resource version may wait indefinitely (until the request timeout) for the resource version to become available. diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index 96589c6a55ff8..9ec9f84c5dcd2 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -30,13 +30,12 @@ The following client libraries are officially maintained by | Language | Client Library | Sample Programs | |----------|----------------|-----------------| +| dotnet | [github.com/kubernetes-client/csharp](https://github.com/kubernetes-client/csharp) | [browse](https://github.com/kubernetes-client/csharp/tree/master/examples/simple) | Go | [github.com/kubernetes/client-go/](https://github.com/kubernetes/client-go/) | [browse](https://github.com/kubernetes/client-go/tree/master/examples) -| Python | [github.com/kubernetes-client/python/](https://github.com/kubernetes-client/python/) | [browse](https://github.com/kubernetes-client/python/tree/master/examples) +| Haskell | [github.com/kubernetes-client/haskell](https://github.com/kubernetes-client/haskell) | [browse](https://github.com/kubernetes-client/haskell/tree/master/kubernetes-client/example) | Java | [github.com/kubernetes-client/java](https://github.com/kubernetes-client/java/) | [browse](https://github.com/kubernetes-client/java#installation) -| dotnet | [github.com/kubernetes-client/csharp](https://github.com/kubernetes-client/csharp) | [browse](https://github.com/kubernetes-client/csharp/tree/master/examples/simple) | JavaScript | [github.com/kubernetes-client/javascript](https://github.com/kubernetes-client/javascript) | [browse](https://github.com/kubernetes-client/javascript/tree/master/examples) -| Haskell | [github.com/kubernetes-client/haskell](https://github.com/kubernetes-client/haskell) | [browse](https://github.com/kubernetes-client/haskell/tree/master/kubernetes-client/example) - +| Python | [github.com/kubernetes-client/python/](https://github.com/kubernetes-client/python/) | [browse](https://github.com/kubernetes-client/python/tree/master/examples) ## Community-maintained client libraries @@ -48,6 +47,10 @@ their authors, not the Kubernetes team. | Language | Client Library | | -------------------- | ---------------------------------------- | | Clojure | [github.com/yanatan16/clj-kubernetes-api](https://github.com/yanatan16/clj-kubernetes-api) | +| DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | +| DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | +| Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | +| Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | | Go | [github.com/ericchiang/k8s](https://github.com/ericchiang/k8s) | | Java (OSGi) | [bitbucket.org/amdatulabs/amdatu-kubernetes](https://bitbucket.org/amdatulabs/amdatu-kubernetes) | | Java (Fabric8, OSGi) | [github.com/fabric8io/kubernetes-client](https://github.com/fabric8io/kubernetes-client) | @@ -67,18 +70,13 @@ their authors, not the Kubernetes team. | Python | [github.com/fiaas/k8s](https://github.com/fiaas/k8s) | | Python | [github.com/mnubo/kubernetes-py](https://github.com/mnubo/kubernetes-py) | | Python | [github.com/tomplus/kubernetes_asyncio](https://github.com/tomplus/kubernetes_asyncio) | +| Python | [github.com/Frankkkkk/pykorm](https://github.com/Frankkkkk/pykorm) | | Ruby | [github.com/abonas/kubeclient](https://github.com/abonas/kubeclient) | | Ruby | [github.com/Ch00k/kuber](https://github.com/Ch00k/kuber) | +| Ruby | [github.com/k8s-ruby/k8s-ruby](https://github.com/k8s-ruby/k8s-ruby) | | Ruby | [github.com/kontena/k8s-client](https://github.com/kontena/k8s-client) | | Rust | [github.com/clux/kube-rs](https://github.com/clux/kube-rs) | | Rust | [github.com/ynqa/kubernetes-rust](https://github.com/ynqa/kubernetes-rust) | -| Scala | [github.com/doriordan/skuber](https://github.com/doriordan/skuber) | +| Scala | [github.com/hagay3/skuber](https://github.com/hagay3/skuber) | | Scala | [github.com/joan38/kubernetes-client](https://github.com/joan38/kubernetes-client) | | Swift | [github.com/swiftkube/client](https://github.com/swiftkube/client) | -| DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | -| DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | -| Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | -| Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | - - - diff --git a/content/en/docs/reference/using-api/deprecation-guide.md b/content/en/docs/reference/using-api/deprecation-guide.md new file mode 100755 index 0000000000000..9f518143b32e2 --- /dev/null +++ b/content/en/docs/reference/using-api/deprecation-guide.md @@ -0,0 +1,312 @@ +--- +reviewers: +- liggitt +- lavalamp +- thockin +- smarterclayton +title: "Deprecated API Migration Guide" +weight: 45 +content_type: reference +--- + + + +As the Kubernetes API evolves, APIs are periodically reorganized or upgraded. +When APIs evolve, the old API is deprecated and eventually removed. +This page contains information you need to know when migrating from +deprecated API versions to newer and more stable API versions. + + + +## Removed APIs by release + + +### v1.25 + +The **v1.25** release will stop serving the following deprecated API versions: + +#### CronJob {#cronjob-v125} + +The **batch/v1beta1** API version of CronJob will no longer be served in v1.25. + +* Migrate manifests and API clients to use the **batch/v1** API version, available since v1.21. +* All existing persisted objects are accessible via the new API +* No notable changes + +#### EndpointSlice {#endpointslice-v125} + +The **discovery.k8s.io/v1beta1** API version of EndpointSlice will no longer be served in v1.25. + +* Migrate manifests and API clients to use the **discovery.k8s.io/v1** API version, available since v1.21. +* All existing persisted objects are accessible via the new API +* Notable changes in **discovery.k8s.io/v1**: + * use per Endpoint `nodeName` field instead of deprecated `topology["kubernetes.io/hostname"]` field + * use per Endpoint `zone` field instead of deprecated `topology["topology.kubernetes.io/zone"]` field + * `topology` is replaced with the `deprecatedTopology` field which is not writable in v1 + +#### Event {#event-v125} + +The **events.k8s.io/v1beta1** API version of Event will no longer be served in v1.25. + +* Migrate manifests and API clients to use the **events.k8s.io/v1** API version, available since v1.19. +* All existing persisted objects are accessible via the new API +* Notable changes in **events.k8s.io/v1**: + * `type` is limited to `Normal` and `Warning` + * `involvedObject` is renamed to `regarding` + * `action`, `reason`, `reportingComponent`, and `reportingInstance` are required when creating new **events.k8s.io/v1** Events + * use `eventTime` instead of the deprecated `firstTimestamp` field (which is renamed to `deprecatedFirstTimestamp` and not permitted in new **events.k8s.io/v1** Events) + * use `series.lastObservedTime` instead of the deprecated `lastTimestamp` field (which is renamed to `deprecatedLastTimestamp` and not permitted in new **events.k8s.io/v1** Events) + * use `series.count` instead of the deprecated `count` field (which is renamed to `deprecatedCount` and not permitted in new **events.k8s.io/v1** Events) + * use `reportingComponent` instead of the deprecated `source.component` field (which is renamed to `deprecatedSource.component` and not permitted in new **events.k8s.io/v1** Events) + * use `reportingInstance` instead of the deprecated `source.host` field (which is renamed to `deprecatedSource.host` and not permitted in new **events.k8s.io/v1** Events) + +#### PodDisruptionBudget {#poddisruptionbudget-v125} + +The **policy/v1beta1** API version of PodDisruptionBudget will no longer be served in v1.25. + +* Migrate manifests and API clients to use the **policy/v1** API version, available since v1.21. +* All existing persisted objects are accessible via the new API +* Notable changes in **policy/v1**: + * an empty `spec.selector` (`{}`) written to a `policy/v1` PodDisruptionBudget selects all pods in the namespace (in `policy/v1beta1` an empty `spec.selector` selected no pods). An unset `spec.selector` selects no pods in either API version. + +#### PodSecurityPolicy {#psp-v125} + +PodSecurityPolicy in the **policy/v1beta1** API version will no longer be served in v1.25, and the PodSecurityPolicy admission controller will be removed. + +PodSecurityPolicy replacements are still under discussion, but current use can be migrated to +[3rd-party admission webhooks](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) now. + +#### RuntimeClass {#runtimeclass-v125} + +RuntimeClass in the **node.k8s.io/v1beta1** API version will no longer be served in v1.25. + +* Migrate manifests and API clients to use the **node.k8s.io/v1** API version, available since v1.20. +* All existing persisted objects are accessible via the new API +* No notable changes + +### v1.22 + +The **v1.22** release will stop serving the following deprecated API versions: + +#### Webhook resources {#webhook-resources-v122} + +The **admissionregistration.k8s.io/v1beta1** API version of MutatingWebhookConfiguration and ValidatingWebhookConfiguration will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **admissionregistration.k8s.io/v1** API version, available since v1.16. +* All existing persisted objects are accessible via the new APIs +* Notable changes: + * `webhooks[*].failurePolicy` default changed from `Ignore` to `Fail` for v1 + * `webhooks[*].matchPolicy` default changed from `Exact` to `Equivalent` for v1 + * `webhooks[*].timeoutSeconds` default changed from `30s` to `10s` for v1 + * `webhooks[*].sideEffects` default value is removed, and the field made required, and only `None` and `NoneOnDryRun` are permitted for v1 + * `webhooks[*].admissionReviewVersions` default value is removed and the field made required for v1 (supported versions for AdmissionReview are `v1` and `v1beta1`) + * `webhooks[*].name` must be unique in the list for objects created via `admissionregistration.k8s.io/v1` + +#### CustomResourceDefinition {#customresourcedefinition-v122} + +The **apiextensions.k8s.io/v1beta1** API version of CustomResourceDefinition will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **apiextensions.k8s.io/v1** API version, available since v1.16. +* All existing persisted objects are accessible via the new API +* Notable changes: + * `spec.scope` is no longer defaulted to `Namespaced` and must be explicitly specified + * `spec.version` is removed in v1; use `spec.versions` instead + * `spec.validation` is removed in v1; use `spec.versions[*].schema` instead + * `spec.subresources` is removed in v1; use `spec.versions[*].subresources` instead + * `spec.additionalPrinterColumns` is removed in v1; use `spec.versions[*].additionalPrinterColumns` instead + * `spec.conversion.webhookClientConfig` is moved to `spec.conversion.webhook.clientConfig` in v1 + * `spec.conversion.conversionReviewVersions` is moved to `spec.conversion.webhook.conversionReviewVersions` in v1 + * `spec.versions[*].schema.openAPIV3Schema` is now required when creating v1 CustomResourceDefinition objects, and must be a [structural schema](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema) + * `spec.preserveUnknownFields: true` is disallowed when creating v1 CustomResourceDefinition objects; it must be specified within schema definitions as `x-kubernetes-preserve-unknown-fields: true` + * In `additionalPrinterColumns` items, the `JSONPath` field was renamed to `jsonPath` in v1 (fixes [#66531](https://github.com/kubernetes/kubernetes/issues/66531)) + +#### APIService {#apiservice-v122} + +The **apiregistration.k8s.io/v1beta1** API version of APIService will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **apiregistration.k8s.io/v1** API version, available since v1.10. +* All existing persisted objects are accessible via the new API +* No notable changes + +#### TokenReview {#tokenreview-v122} + +The **authentication.k8s.io/v1beta1** API version of TokenReview will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **authentication.k8s.io/v1** API version, available since v1.6. +* No notable changes + +#### SubjectAccessReview resources {#subjectaccessreview-resources-v122} + +The **authorization.k8s.io/v1beta1** API version of LocalSubjectAccessReview, SelfSubjectAccessReview, and SubjectAccessReview will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **authorization.k8s.io/v1** API version, available since v1.6. +* Notable changes: + * `spec.group` was renamed to `spec.groups` in v1 (fixes [#32709](https://github.com/kubernetes/kubernetes/issues/32709)) + +#### CertificateSigningRequest {#certificatesigningrequest-v122} + +The **certificates.k8s.io/v1beta1** API version of CertificateSigningRequest will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **certificates.k8s.io/v1** API version, available since v1.19. +* All existing persisted objects are accessible via the new API +* Notable changes in `certificates.k8s.io/v1`: + * For API clients requesting certificates: + * `spec.signerName` is now required (see [known Kubernetes signers](/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers)), and requests for `kubernetes.io/legacy-unknown` are not allowed to be created via the `certificates.k8s.io/v1` API + * `spec.usages` is now required, may not contain duplicate values, and must only contain known usages + * For API clients approving or signing certificates: + * `status.conditions` may not contain duplicate types + * `status.conditions[*].status` is now required + * `status.certificate` must be PEM-encoded, and contain only `CERTIFICATE` blocks + +#### Lease {#lease-v122} + +The **coordination.k8s.io/v1beta1** API version of Lease will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **coordination.k8s.io/v1** API version, available since v1.14. +* All existing persisted objects are accessible via the new API +* No notable changes + +#### Ingress {#ingress-v122} + +The **extensions/v1beta1** and **networking.k8s.io/v1beta1** API versions of Ingress will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **networking.k8s.io/v1** API version, available since v1.19. +* All existing persisted objects are accessible via the new API +* Notable changes: + * `spec.backend` is renamed to `spec.defaultBackend` + * The backend `serviceName` field is renamed to `service.name` + * Numeric backend `servicePort` fields are renamed to `service.port.number` + * String backend `servicePort` fields are renamed to `service.port.name` + * `pathType` is now required for each specified path. Options are `Prefix`, `Exact`, and `ImplementationSpecific`. To match the undefined `v1beta1` behavior, use `ImplementationSpecific`. + +#### IngressClass {#ingressclass-v122} + +The **networking.k8s.io/v1beta1** API version of IngressClass will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **networking.k8s.io/v1** API version, available since v1.19. +* All existing persisted objects are accessible via the new API +* No notable changes + +#### RBAC resources {#rbac-resources-v122} + +The **rbac.authorization.k8s.io/v1beta1** API version of ClusterRole, ClusterRoleBinding, Role, and RoleBinding will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **rbac.authorization.k8s.io/v1** API version, available since v1.8. +* All existing persisted objects are accessible via the new APIs +* No notable changes + +#### PriorityClass {#priorityclass-v122} + +The **scheduling.k8s.io/v1beta1** API version of PriorityClass will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **scheduling.k8s.io/v1** API version, available since v1.14. +* All existing persisted objects are accessible via the new API +* No notable changes + +#### Storage resources {#storage-resources-v122} + +The **storage.k8s.io/v1beta1** API version of CSIDriver, CSINode, StorageClass, and VolumeAttachment will no longer be served in v1.22. + +* Migrate manifests and API clients to use the **storage.k8s.io/v1** API version + * CSIDriver is available in **storage.k8s.io/v1** since v1.19. + * CSINode is available in **storage.k8s.io/v1** since v1.17 + * StorageClass is available in **storage.k8s.io/v1** since v1.6 + * VolumeAttachment is available in **storage.k8s.io/v1** v1.13 +* All existing persisted objects are accessible via the new APIs +* No notable changes + +### v1.16 + +The **v1.16** release stopped serving the following deprecated API versions: + +#### NetworkPolicy {#networkpolicy-v116} + +The **extensions/v1beta1** API version of NetworkPolicy is no longer served as of v1.16. + +* Migrate manifests and API clients to use the **networking.k8s.io/v1** API version, available since v1.8. +* All existing persisted objects are accessible via the new API + +#### DaemonSet {#daemonset-v116} + +The **extensions/v1beta1** and **apps/v1beta2** API versions of DaemonSet are no longer served as of v1.16. + +* Migrate manifests and API clients to use the **apps/v1** API version, available since v1.9. +* All existing persisted objects are accessible via the new API +* Notable changes: + * `spec.templateGeneration` is removed + * `spec.selector` is now required and immutable after creation; use the existing template labels as the selector for seamless upgrades + * `spec.updateStrategy.type` now defaults to `RollingUpdate` (the default in `extensions/v1beta1` was `OnDelete`) + +#### Deployment {#deployment-v116} + +The **extensions/v1beta1**, **apps/v1beta1**, and **apps/v1beta2** API versions of Deployment are no longer served as of v1.16. + +* Migrate manifests and API clients to use the **apps/v1** API version, available since v1.9. +* All existing persisted objects are accessible via the new API +* Notable changes: + * `spec.rollbackTo` is removed + * `spec.selector` is now required and immutable after creation; use the existing template labels as the selector for seamless upgrades + * `spec.progressDeadlineSeconds` now defaults to `600` seconds (the default in `extensions/v1beta1` was no deadline) + * `spec.revisionHistoryLimit` now defaults to `10` (the default in `apps/v1beta1` was `2`, the default in `extensions/v1beta1` was to retain all) + * `maxSurge` and `maxUnavailable` now default to `25%` (the default in `extensions/v1beta1` was `1`) + +#### StatefulSet {#statefulset-v116} + +The **apps/v1beta1** and **apps/v1beta2** API versions of StatefulSet are no longer served as of v1.16. + +* Migrate manifests and API clients to use the **apps/v1** API version, available since v1.9. +* All existing persisted objects are accessible via the new API +* Notable changes: + * `spec.selector` is now required and immutable after creation; use the existing template labels as the selector for seamless upgrades + * `spec.updateStrategy.type` now defaults to `RollingUpdate` (the default in `apps/v1beta1` was `OnDelete`) + +#### ReplicaSet {#replicaset-v116} + +The **extensions/v1beta1**, **apps/v1beta1**, and **apps/v1beta2** API versions of ReplicaSet are no longer served as of v1.16. + +* Migrate manifests and API clients to use the **apps/v1** API version, available since v1.9. +* All existing persisted objects are accessible via the new API +* Notable changes: + * `spec.selector` is now required and immutable after creation; use the existing template labels as the selector for seamless upgrades + +#### PodSecurityPolicy {#psp-v116} + +The **extensions/v1beta1** API version of PodSecurityPolicy is no longer served as of v1.16. + +* Migrate manifests and API client to use the **policy/v1beta1** API version, available since v1.10. +* Note that the **policy/v1beta1** API version of PodSecurityPolicy will be removed in v1.25. + +## What to do + +### Test with deprecated APIs disabled + +You can test your clusters by starting an API server with specific API versions disabled +to simulate upcoming removals. Add the following flag to the API server startup arguments: + +`--runtime-config=/=false` + +For example: + +`--runtime-config=admissionregistration.k8s.io/v1beta1=false,apiextensions.k8s.io/v1beta1,...` + +### Locate use of deprecated APIs + +Use [client warnings, metrics, and audit information available in 1.19+](https://kubernetes.io/blog/2020/09/03/warnings/#deprecation-warnings) +to locate use of deprecated APIs. + +### Migrate to non-deprecated APIs + +* Update custom integrations and controllers to call the non-deprecated APIs +* Change YAML files to reference the non-deprecated APIs + + You can use the `kubectl-convert` command (`kubectl convert` prior to v1.20) + to automatically convert an existing object: + + `kubectl-convert -f --output-version /`. + + For example, to convert an older Deployment to `apps/v1`, you can run: + + `kubectl-convert -f ./my-deployment.yaml --output-version apps/v1` + + Note that this may use non-ideal default values. To learn more about a specific + resource, check the Kubernetes [API reference](/docs/reference/kubernetes-api/). diff --git a/content/en/docs/reference/using-api/deprecation-policy.md b/content/en/docs/reference/using-api/deprecation-policy.md index 17840f195ba84..eed4341628cb2 100644 --- a/content/en/docs/reference/using-api/deprecation-policy.md +++ b/content/en/docs/reference/using-api/deprecation-policy.md @@ -301,7 +301,7 @@ Starting in Kubernetes v1.19, making an API request to a deprecated REST API end 1. Returns a `Warning` header (as defined in [RFC7234, Section 5.5](https://tools.ietf.org/html/rfc7234#section-5.5)) in the API response. 2. Adds a `"k8s.io/deprecated":"true"` annotation to the [audit event](/docs/tasks/debug-application-cluster/audit/) recorded for the request. -3. Sets an `apiserver_requested_deprecated_apis` gauge metric to `1` in the `kube-apiserver` +3. Sets an `apiserver_requested_deprecated_apis` gauge metric to `1` in the `kube-apiserver` process. The metric has labels for `group`, `version`, `resource`, `subresource` that can be joined to the `apiserver_request_total` metric, and a `removed_release` label that indicates the Kubernetes release in which the API will no longer be served. The following Prometheus query @@ -327,7 +327,7 @@ supported in API v1 must exist and function until API v1 is removed. ### Component config structures -Component configs are versioned and managed just like REST resources. +Component configs are versioned and managed similar to REST resources. ### Future work @@ -428,6 +428,46 @@ transitions a lifecycle stage as follows. Feature gates must function for no les is deprecated it must be documented in both in the release notes and the corresponding CLI help. Both warnings and documentation must indicate whether a feature gate is non-operational.** +## Deprecating a metric + +Each component of the Kubernetes control-plane exposes metrics (usually the +`/metrics` endpoint), which are typically ingested by cluster administrators. +Not all metrics are the same: some metrics are commonly used as SLIs or used +to determine SLOs, these tend to have greater import. Other metrics are more +experimental in nature or are used primarily in the Kubernetes development +process. + +Accordingly, metrics fall under two stability classes (`ALPHA` and `STABLE`); +this impacts removal of a metric during a Kubernetes release. These classes +are determined by the perceived importance of the metric. The rules for +deprecating and removing a metric are as follows: + +**Rule #9a: Metrics, for the corresponding stability class, must function for no less than:** + + * **STABLE: 4 releases or 12 months (whichever is longer)** + * **ALPHA: 0 releases** + +**Rule #9b: Metrics, after their _announced deprecation_, must function for no less than:** + + * **STABLE: 3 releases or 9 months (whichever is longer)** + * **ALPHA: 0 releases** + +Deprecated metrics will have their description text prefixed with a deprecation notice +string '(Deprecated from x.y)' and a warning log will be emitted during metric +registration. Like their stable undeprecated counterparts, deprecated metrics will +be automatically registered to the metrics endpoint and therefore visible. + +On a subsequent release (when the metric's `deprecatedVersion` is equal to +_current_kubernetes_version - 3_)), a deprecated metric will become a _hidden_ metric. +**_Unlike_** their deprecated counterparts, hidden metrics will _no longer_ be +automatically registered to the metrics endpoint (hence hidden). However, they +can be explicitly enabled through a command line flag on the binary +(`--show-hidden-metrics-for-version=`). This provides cluster admins an +escape hatch to properly migrate off of a deprecated metric, if they were not +able to react to the earlier deprecation warnings. Hidden metrics should be +deleted after one release. + + ## Exceptions No policy can cover every possible situation. This policy is a living @@ -438,4 +478,3 @@ leaders to find the best solutions for those specific cases, always bearing in mind that Kubernetes is committed to being a stable system that, as much as possible, never breaks users. Exceptions will always be announced in all relevant release notes. - diff --git a/content/en/docs/reference/using-api/server-side-apply.md b/content/en/docs/reference/using-api/server-side-apply.md index c281eb940007a..15026843256ea 100644 --- a/content/en/docs/reference/using-api/server-side-apply.md +++ b/content/en/docs/reference/using-api/server-side-apply.md @@ -16,10 +16,10 @@ min-kubernetes-server-version: 1.16 ## Introduction -Server Side Apply helps users and controllers manage their resources via -declarative configurations. It allows them to create and/or modify their +Server Side Apply helps users and controllers manage their resources through +declarative configurations. Clients can create and modify their [objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) -declaratively, simply by sending their fully specified intent. +declaratively by sending their fully specified intent. A fully specified intent is a partial object that only includes the fields and values for which the user has an opinion. That intent either creates a new @@ -46,7 +46,7 @@ Server side apply is meant both as a replacement for the original `kubectl apply` and as a simpler mechanism for controllers to enact their changes. If you have Server Side Apply enabled, the control plane tracks managed fields -for all newlly created objects. +for all newly created objects. ## Field Management @@ -209,9 +209,8 @@ would have failed due to conflicting ownership. The merging strategy, implemented with Server Side Apply, provides a generally more stable object lifecycle. Server Side Apply tries to merge fields based on -the fact who manages them instead of overruling just based on values. This way -it is intended to make it easier and more stable for multiple actors updating -the same object by causing less unexpected interference. +the actor who manages them instead of overruling based on values. This way +multiple actors can update the same object without causing unexpected interference. When a user sends a "fully-specified intent" object to the Server Side Apply endpoint, the server merges it with the live object favoring the value in the @@ -225,17 +224,75 @@ merging, see A number of markers were added in Kubernetes 1.16 and 1.17, to allow API developers to describe the merge strategy supported by lists, maps, and structs. These markers can be applied to objects of the respective type, -in Go files or in the OpenAPI schema definition of the +in Go files or in the OpenAPI schema definition of the [CRD](/docs/reference/generated/kubernetes-api/{{< param "version" >}}#jsonschemaprops-v1-apiextensions-k8s-io): | Golang marker | OpenAPI extension | Accepted values | Description | Introduced in | |---|---|---|---|---| -| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `atomic` and `set` apply to lists with scalar elements only. `map` applies to lists of nested types only. If configured as `atomic`, the entire list is replaced during merge; a single manager manages the list as a whole at any one time. If `set` or `map`, different managers can manage entries separately. | 1.16 | -| `//+listMapKey` | `x-kubernetes-list-map-keys` | Slice of map keys that uniquely identify entries for example `["port", "protocol"]` | Only applicable when `+listType=map`. A slice of strings whose values in combination must uniquely identify list entries. While there can be multiple keys, `listMapKey` is singular because keys need to be specified individually in the Go type. | 1.16 | +| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `set` applies to lists that include only scalar elements. These elements must be unique. `map` applies to lists of nested types only. The key values (see `listMapKey`) must be unique in the list. `atomic` can apply to any list. If configured as `atomic`, the entire list is replaced during merge. At any point in time, a single manager owns the list. If `set` or `map`, different managers can manage entries separately. | 1.16 | +| `//+listMapKey` | `x-kubernetes-list-map-keys` | List of field names, e.g. `["port", "protocol"]` | Only applicable when `+listType=map`. A list of field names whose values uniquely identify entries in the list. While there can be multiple keys, `listMapKey` is singular because keys need to be specified individually in the Go type. The key fields must be scalars. | 1.16 | | `//+mapType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to maps. `atomic` means that the map can only be entirely replaced by a single manager. `granular` means that the map supports separate managers updating individual fields. | 1.17 | | `//+structType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to structs; otherwise same usage and OpenAPI annotation as `//+mapType`.| 1.17 | -### Custom Resources +If `listType` is missing, the API server interprets a +`patchMergeStrategy=merge` marker as a `listType=map` and the +corresponding `patchMergeKey` marker as a `listMapKey`. + +The `atomic` list type is recursive. + +These markers are specified as comments and don't have to be repeated as +field tags. + +### Compatibility across topology changes + +On rare occurences, a CRD or built-in type author may want to change the +specific topology of a field in their resource without incrementing its +version. Changing the topology of types, by upgrading the cluster or +updating the CRD, has different consequences when updating existing +objects. There are two categories of changes: when a field goes from +`map`/`set`/`granular` to `atomic` and the other way around. + +When the `listType`, `mapType`, or `structType` changes from +`map`/`set`/`granular` to `atomic`, the whole list, map or struct of +existing objects will end-up being owned by actors who owned an element +of these types. This means that any further change to these objects +would cause a conflict. + +When a list, map, or struct changes from `atomic` to +`map`/`set`/`granular`, the API server won't be able to infer the new +ownership of these fields. Because of that, no conflict will be produced +when objects have these fields updated. For that reason, it is not +recommended to change a type from `atomic` to `map`/`set`/`granular`. + +Take for example, the custom resource: + +```yaml +apiVersion: example.com/v1 +kind: Foo +metadata: + name: foo-sample + managedFields: + - manager: manager-one + operation: Apply + apiVersion: example.com/v1 + fields: + f:spec: + f:data: {} +spec: + data: + key1: val1 + key2: val2 +``` + +Before `spec.data` gets changed from `atomic` to `granular`, +`manager-one` owns the field `spec.data`, and all the fields within it +(`key1` and `key2`). When the CRD gets changed to make `spec.data` +`granular`, `manager-one` continues to own the top-level field +`spec.data` (meaning no other managers can delete the map called `data` +without a conflict), but it no longer owns `key1` and `key2`, so another +manager can then modify or delete those fields without conflict. + +## Custom Resources By default, Server Side Apply treats custom resources as unstructured data. All keys are treated the same as struct fields, and all lists are considered atomic. @@ -246,7 +303,7 @@ that contains annotations as defined in the previous "Merge Strategy" section, these annotations will be used when merging objects of this type. -### Using Server-Side Apply in a controller +## Using Server-Side Apply in a controller As a developer of a controller, you can use server-side apply as a way to simplify the update logic of your controller. The main differences with a @@ -261,7 +318,7 @@ read-modify-write and/or patch are the following: It is strongly recommended for controllers to always "force" conflicts, since they might not be able to resolve or act on these conflicts. -### Transferring Ownership +## Transferring Ownership In addition to the concurrency controls provided by [conflict resolution](#conflicts), Server Side Apply provides ways to perform coordinated @@ -297,7 +354,7 @@ is not what the user wants to happen, even temporarily. There are two solutions: -- (easy) Leave `replicas` in the configuration; when HPA eventually writes to that +- (basic) Leave `replicas` in the configuration; when HPA eventually writes to that field, the system gives the user a conflict over it. At that point, it is safe to remove from the configuration. @@ -319,7 +376,7 @@ kubectl apply -f https://k8s.io/examples/application/ssa/nginx-deployment-replic ``` If the apply results in a conflict with the HPA controller, then do nothing. The -conflict just indicates the controller has claimed the field earlier in the +conflict indicates the controller has claimed the field earlier in the process than it sometimes does. At this point the user may remove the `replicas` field from their configuration. @@ -330,7 +387,7 @@ Note that whenever the HPA controller sets the `replicas` field to a new value, the temporary field manager will no longer own any fields and will be automatically deleted. No clean up is required. -## Transferring Ownership Between Users +### Transferring Ownership Between Users Users can transfer ownership of a field between each other by setting the field to the same value in both of their applied configs, causing them to share @@ -436,7 +493,7 @@ Data: [{"op": "replace", "path": "/metadata/managedFields", "value": [{}]}] This will overwrite the managedFields with a list containing a single empty entry that then results in the managedFields being stripped entirely from the -object. Note that just setting the managedFields to an empty list will not +object. Note that setting the managedFields to an empty list will not reset the field. This is on purpose, so managedFields never get stripped by clients not aware of the field. @@ -459,4 +516,3 @@ Server Side Apply is a beta feature, so it is enabled by default. To turn this you need to include the `--feature-gates ServerSideApply=false` flag when starting `kube-apiserver`. If you have multiple `kube-apiserver` replicas, all should have the same flag setting. - diff --git a/content/en/docs/setup/best-practices/certificates.md b/content/en/docs/setup/best-practices/certificates.md index a065462baf240..1648cc4e9eb92 100644 --- a/content/en/docs/setup/best-practices/certificates.md +++ b/content/en/docs/setup/best-practices/certificates.md @@ -9,7 +9,7 @@ weight: 40 Kubernetes requires PKI certificates for authentication over TLS. -If you install Kubernetes with [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), the certificates that your cluster requires are automatically generated. +If you install Kubernetes with [kubeadm](/docs/reference/setup-tools/kubeadm/), the certificates that your cluster requires are automatically generated. You can also generate your own certificates -- for example, to keep your private keys more secure by not storing them on the API server. This page explains the certificates that your cluster requires. @@ -74,7 +74,7 @@ Required certificates: | kube-apiserver-kubelet-client | kubernetes-ca | system:masters | client | | | front-proxy-client | kubernetes-front-proxy-ca | | client | | -[1]: any other IP or DNS name you contact your cluster on (as used by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) +[1]: any other IP or DNS name you contact your cluster on (as used by [kubeadm](/docs/reference/setup-tools/kubeadm/) the load balancer stable IP and/or DNS name, `kubernetes`, `kubernetes.default`, `kubernetes.default.svc`, `kubernetes.default.svc.cluster`, `kubernetes.default.svc.cluster.local`) @@ -100,7 +100,7 @@ For kubeadm users only: ### Certificate paths -Certificates should be placed in a recommended path (as used by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/)). +Certificates should be placed in a recommended path (as used by [kubeadm](/docs/reference/setup-tools/kubeadm/)). Paths should be specified using the given argument regardless of location. | Default CN | recommended key path | recommended cert path | command | key argument | cert argument | diff --git a/content/en/docs/setup/best-practices/cluster-large.md b/content/en/docs/setup/best-practices/cluster-large.md index ccb31fe108498..a75499a811f11 100644 --- a/content/en/docs/setup/best-practices/cluster-large.md +++ b/content/en/docs/setup/best-practices/cluster-large.md @@ -69,10 +69,9 @@ When creating a cluster, you can (using custom tooling): ## Addon resources Kubernetes [resource limits](/docs/concepts/configuration/manage-resources-containers/) -help to minimise the impact of memory leaks and other ways that pods and containers can -impact on other components. These resource limits can and should apply to -{{< glossary_tooltip text="addon" term_id="addons" >}} just as they apply to application -workloads. +help to minimize the impact of memory leaks and other ways that pods and containers can +impact on other components. These resource limits apply to +{{< glossary_tooltip text="addon" term_id="addons" >}} resources just as they apply to application workloads. For example, you can set CPU and memory limits for a logging component: diff --git a/content/en/docs/setup/best-practices/multiple-zones.md b/content/en/docs/setup/best-practices/multiple-zones.md index 107ee2d0f7342..8f51a3bd06099 100644 --- a/content/en/docs/setup/best-practices/multiple-zones.md +++ b/content/en/docs/setup/best-practices/multiple-zones.md @@ -59,7 +59,7 @@ When nodes start up, the kubelet on each node automatically adds {{< glossary_tooltip text="labels" term_id="label" >}} to the Node object that represents that specific kubelet in the Kubernetes API. These labels can include -[zone information](/docs/reference/kubernetes-api/labels-annotations-taints/#topologykubernetesiozone). +[zone information](/docs/reference/labels-annotations-taints/#topologykubernetesiozone). If your cluster spans multiple zones or regions, you can use node labels in conjunction with diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index 59725188d806c..ebbd11f0810f7 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -48,7 +48,7 @@ Changing the settings such that your container runtime and kubelet use `systemd` stabilized the system. To configure this for Docker, set `native.cgroupdriver=systemd`. {{< caution >}} -Changing the cgroup driver of a Node that has joined a cluster is strongly *not* recommended. +Changing the cgroup driver of a Node that has joined a cluster is a sensitive operation. If the kubelet has created Pods using the semantics of one cgroup driver, changing the container runtime to another cgroup driver can cause errors when trying to re-create the Pod sandbox for such existing Pods. Restarting the kubelet may not solve such errors. @@ -57,13 +57,18 @@ If you have automation that makes it feasible, replace the node with another usi configuration, or reinstall it using automation. {{< /caution >}} +### Migrating to the `systemd` driver in kubeadm managed clusters + +Follow this [Migration guide](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/) +if you wish to migrate to the `systemd` cgroup driver in existing kubeadm managed clusters. + ## Container runtimes {{% thirdparty-content %}} ### containerd -This section contains the necessary steps to use `containerd` as CRI runtime. +This section contains the necessary steps to use containerd as CRI runtime. Use the following commands to install Containerd on your system: @@ -92,161 +97,62 @@ sudo sysctl --system Install containerd: {{< tabs name="tab-cri-containerd-installation" >}} -{{% tab name="Ubuntu 16.04" %}} +{{% tab name="Linux" %}} -```shell -# (Install containerd) -## Set up the repository -### Install packages to allow apt to use a repository over HTTPS -sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common -``` +1. Install the `containerd.io` package from the official Docker repositories. Instructions for setting up the Docker repository for your respective Linux distribution and installing the `containerd.io` package can be found at [Install Docker Engine](https://docs.docker.com/engine/install/#server). -```shell -## Add Docker's official GPG key -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add - -``` +2. Configure containerd: -```shell -## Add Docker apt repository. -sudo add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" -``` + ```shell + sudo mkdir -p /etc/containerd + containerd config default | sudo tee /etc/containerd/config.toml + ``` -```shell -## Install containerd -sudo apt-get update && sudo apt-get install -y containerd.io -``` +3. Restart containerd: -```shell -# Configure containerd -sudo mkdir -p /etc/containerd -containerd config default | sudo tee /etc/containerd/config.toml -``` + ```shell + sudo systemctl restart containerd + ``` -```shell -# Restart containerd -sudo systemctl restart containerd -``` {{% /tab %}} -{{% tab name="Ubuntu 18.04/20.04" %}} - -```shell -# (Install containerd) -sudo apt-get update && sudo apt-get install -y containerd -``` - -```shell -# Configure containerd -sudo mkdir -p /etc/containerd -containerd config default | sudo tee /etc/containerd/config.toml -``` - -```shell -# Restart containerd -sudo systemctl restart containerd -``` -{{% /tab %}} -{{% tab name="Debian 9+" %}} - -```shell -# (Install containerd) -## Set up the repository -### Install packages to allow apt to use a repository over HTTPS -sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common -``` - -```shell -## Add Docker's official GPG key -curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add - -``` +{{% tab name="Windows (PowerShell)" %}} -```shell -## Add Docker apt repository. -sudo add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" -``` +Start a Powershell session, set `$Version` to the desired version (ex: `$Version=1.4.3`), and then run the following commands: -```shell -## Install containerd -sudo apt-get update && sudo apt-get install -y containerd.io -``` +1. Download containerd: -```shell -# Set default containerd configuration -sudo mkdir -p /etc/containerd -containerd config default | sudo tee /etc/containerd/config.toml -``` + ```powershell + curl.exe -L https://github.com/containerd/containerd/releases/download/v$Version/containerd-$Version-windows-amd64.tar.gz -o containerd-windows-amd64.tar.gz + tar.exe xvf .\containerd-windows-amd64.tar.gz + ``` -```shell -# Restart containerd -sudo systemctl restart containerd -``` -{{% /tab %}} -{{% tab name="CentOS/RHEL 7.4+" %}} +2. Extract and configure: -```shell -# (Install containerd) -## Set up the repository -### Install required packages -sudo yum install -y yum-utils device-mapper-persistent-data lvm2 -``` + ```powershell + Copy-Item -Path ".\bin\" -Destination "$Env:ProgramFiles\containerd" -Recurse -Force + cd $Env:ProgramFiles\containerd\ + .\containerd.exe config default | Out-File config.toml -Encoding ascii -```shell -## Add docker repository -sudo yum-config-manager \ - --add-repo \ - https://download.docker.com/linux/centos/docker-ce.repo -``` + # Review the configuration. Depending on setup you may want to adjust: + # - the sandbox_image (Kubernetes pause image) + # - cni bin_dir and conf_dir locations + Get-Content config.toml -```shell -## Install containerd -sudo yum update -y && sudo yum install -y containerd.io -``` + # (Optional - but highly recommended) Exclude containerd from Windows Defender Scans + Add-MpPreference -ExclusionProcess "$Env:ProgramFiles\containerd\containerd.exe" + ``` -```shell -## Configure containerd -sudo mkdir -p /etc/containerd -containerd config default | sudo tee /etc/containerd/config.toml -``` +3. Start containerd: -```shell -# Restart containerd -sudo systemctl restart containerd -``` -{{% /tab %}} -{{% tab name="Windows (PowerShell)" %}} -```powershell -# (Install containerd) -# download containerd -cmd /c curl -OL https://github.com/containerd/containerd/releases/download/v1.4.1/containerd-1.4.1-windows-amd64.tar.gz -cmd /c tar xvf .\containerd-1.4.1-windows-amd64.tar.gz -``` + ```powershell + .\containerd.exe --register-service + Start-Service containerd + ``` -```powershell -# extract and configure -Copy-Item -Path ".\bin\" -Destination "$Env:ProgramFiles\containerd" -Recurse -Force -cd $Env:ProgramFiles\containerd\ -.\containerd.exe config default | Out-File config.toml -Encoding ascii - -# review the configuration. depending on setup you may want to adjust: -# - the sandbox_image (kubernetes pause image) -# - cni bin_dir and conf_dir locations -Get-Content config.toml -``` - -```powershell -# start containerd -.\containerd.exe --register-service -Start-Service containerd -``` {{% /tab %}} {{< /tabs >}} -#### systemd {#containerd-systemd} +#### Using the `systemd` cgroup driver {#containerd-systemd} To use the `systemd` cgroup driver in `/etc/containerd/config.toml` with `runc`, set @@ -257,6 +163,12 @@ To use the `systemd` cgroup driver in `/etc/containerd/config.toml` with `runc`, SystemdCgroup = true ``` +If you apply this change make sure to restart containerd again: + +```shell +sudo systemctl restart containerd +``` + When using kubeadm, manually configure the [cgroup driver for kubelet](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node). @@ -420,7 +332,7 @@ Start CRI-O: ```shell sudo systemctl daemon-reload -sudo systemctl start crio +sudo systemctl enable crio --now ``` Refer to the [CRI-O installation guide](https://github.com/cri-o/cri-o/blob/master/install.md) @@ -446,138 +358,38 @@ in sync. ### Docker -On each of your nodes, install Docker CE. +1. On each of your nodes, install the Docker for your Linux distribution as per [Install Docker Engine](https://docs.docker.com/engine/install/#server). You can find the latest validated version of Docker in this [dependencies](https://git.k8s.io/kubernetes/build/dependencies.yaml) file. -The Kubernetes release notes list which versions of Docker are compatible -with that version of Kubernetes. +2. Configure the Docker daemon, in particular to use systemd for the management of the container’s cgroups. -Use the following commands to install Docker on your system: - -{{< tabs name="tab-cri-docker-installation" >}} -{{% tab name="Ubuntu 16.04+" %}} - -```shell -# (Install Docker CE) -## Set up the repository: -### Install packages to allow apt to use a repository over HTTPS -sudo apt-get update && sudo apt-get install -y \ - apt-transport-https ca-certificates curl software-properties-common gnupg2 -``` + ```shell + sudo mkdir /etc/docker + cat <}} + `overlay2` is the preferred storage driver for systems running Linux kernel version 4.0 or higher, or RHEL or CentOS using version 3.10.0-514 and above. + {{< /note >}} -```shell -# Add the Docker apt repository: -sudo add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" -``` +3. Restart Docker and enable on boot: -```shell -# Install Docker CE -sudo apt-get update && sudo apt-get install -y \ - containerd.io=1.2.13-2 \ - docker-ce=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) \ - docker-ce-cli=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) -``` + ```shell + sudo systemctl enable docker + sudo systemctl daemon-reload + sudo systemctl restart docker + ``` -```shell -## Create /etc/docker -sudo mkdir /etc/docker -``` - -```shell -# Set up the Docker daemon -cat <}} - -If you want the `docker` service to start on boot, run the following command: - -```shell -sudo systemctl enable docker -``` - -Refer to the [official Docker installation guides](https://docs.docker.com/engine/installation/) -for more information. +{{< note >}} +For more information refer to + - [Configure the Docker daemon](https://docs.docker.com/config/daemon/) + - [Control Docker with systemd](https://docs.docker.com/config/daemon/systemd/) +{{< /note >}} diff --git a/content/en/docs/setup/production-environment/tools/kops.md b/content/en/docs/setup/production-environment/tools/kops.md index 8394c28fafec5..cf5333a92d078 100644 --- a/content/en/docs/setup/production-environment/tools/kops.md +++ b/content/en/docs/setup/production-environment/tools/kops.md @@ -23,7 +23,7 @@ kops is an automated provisioning system: ## {{% heading "prerequisites" %}} -* You must have [kubectl](/docs/tasks/tools/install-kubectl/) installed. +* You must have [kubectl](/docs/tasks/tools/) installed. * You must [install](https://github.com/kubernetes/kops#installing) `kops` on a 64-bit (AMD64 and Intel 64) device architecture. @@ -39,7 +39,7 @@ kops is an automated provisioning system: #### Installation -Download kops from the [releases page](https://github.com/kubernetes/kops/releases) (it is also easy to build from source): +Download kops from the [releases page](https://github.com/kubernetes/kops/releases) (it is also convenient to build from source): {{< tabs name="kops_installation" >}} {{% tab name="macOS" %}} @@ -56,10 +56,10 @@ To download a specific version, replace the following portion of the command wit $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) ``` -For example, to download kops version v1.15.0 type: +For example, to download kops version v1.20.0 type: ```shell -curl -LO https://github.com/kubernetes/kops/releases/download/1.15.0/kops-darwin-amd64 +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-darwin-amd64 ``` Make the kops binary executable. @@ -94,10 +94,10 @@ To download a specific version of kops, replace the following portion of the com $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) ``` -For example, to download kops version v1.15.0 type: +For example, to download kops version v1.20.0 type: ```shell -curl -LO https://github.com/kubernetes/kops/releases/download/1.15.0/kops-linux-amd64 +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-linux-amd64 ``` Make the kops binary executable @@ -147,7 +147,7 @@ You must then set up your NS records in the parent domain, so that records in th you would create NS records in `example.com` for `dev`. If it is a root domain name you would configure the NS records at your domain registrar (e.g. `example.com` would need to be configured where you bought `example.com`). -This step is easy to mess up (it is the #1 cause of problems!) You can double-check that +Verify your route53 domain setup (it is the #1 cause of problems!). You can double-check that your cluster is configured correctly if you have the dig tool by running: `dig NS dev.example.com` diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index 1bcdad0092915..1dd44e9b0b942 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -78,7 +78,7 @@ kind: ClusterConfiguration kubernetesVersion: v1.16.0 scheduler: extraArgs: - address: 0.0.0.0 + bind-address: 0.0.0.0 config: /home/johndoe/schedconfig.yaml kubeconfig: /home/johndoe/kubeconfig.yaml ``` diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 9c6acf5560741..0a394ad022416 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -8,7 +8,7 @@ weight: 30 -Creating a minimum viable Kubernetes cluster that conforms to best practices. In fact, you can use `kubeadm` to set up a cluster that will pass the [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). +Using `kubeadm`, you can create a minimum viable Kubernetes cluster that conforms to best practices. In fact, you can use `kubeadm` to set up a cluster that will pass the [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). `kubeadm` also supports other cluster lifecycle functions, such as [bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) and cluster upgrades. @@ -137,7 +137,7 @@ is not supported by kubeadm. ### More information -For more information about `kubeadm init` arguments, see the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm/). +For more information about `kubeadm init` arguments, see the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/). To configure `kubeadm init` with a configuration file see [Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). @@ -187,6 +187,13 @@ Alternatively, if you are the `root` user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf ``` +{{< warning >}} +Kubeadm signs the certificate in the `admin.conf` to have `Subject: O = system:masters, CN = kubernetes-admin`. +`system:masters` is a break-glass, super user group that bypasses the authorization layer (e.g. RBAC). +Do not share the `admin.conf` file with anyone and instead grant users custom permissions by generating +them a kubeconfig file using the `kubeadm kubeconfig user` command. +{{< /warning >}} + Make a record of the `kubeadm join` command that `kubeadm init` outputs. You need this command to [join nodes to your cluster](#join-nodes). @@ -229,7 +236,7 @@ Cluster DNS (CoreDNS) will not start up before a network is installed.** {{< /caution >}} {{< note >}} -Currently Calico is the only CNI plugin that the kubeadm project performs e2e tests against. +Kubeadm should be CNI agnostic and the validation of CNI providers is out of the scope of our current e2e testing. If you find an issue related to a CNI plugin you should log a ticket in its respective issue tracker instead of the kubeadm or kubernetes issue trackers. {{< /note >}} @@ -434,7 +441,7 @@ Now remove the node: kubectl delete node ``` -If you wish to start over simply run `kubeadm init` or `kubeadm join` with the +If you wish to start over, run `kubeadm init` or `kubeadm join` with the appropriate arguments. ### Clean up the control plane diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/dual-stack-support.md b/content/en/docs/setup/production-environment/tools/kubeadm/dual-stack-support.md new file mode 100644 index 0000000000000..767787179c98d --- /dev/null +++ b/content/en/docs/setup/production-environment/tools/kubeadm/dual-stack-support.md @@ -0,0 +1,152 @@ +--- +title: Dual-stack support with kubeadm +content_type: task +weight: 110 +min-kubernetes-server-version: 1.21 +--- + + + +{{< feature-state for_k8s_version="v1.21" state="beta" >}} + +Your Kubernetes cluster can run in [dual-stack](/docs/concepts/services-networking/dual-stack/) networking mode, which means that cluster networking lets you use either address family. In a dual-stack cluster, the control plane can assign both an IPv4 address and an IPv6 address to a single {{< glossary_tooltip text="Pod" term_id="pod" >}} or a {{< glossary_tooltip text="Service" term_id="service" >}}. + + + +## {{% heading "prerequisites" %}} + +You need to have installed the {{< glossary_tooltip text="kubeadm" term_id="kubeadm" >}} tool, following the steps from [Installing kubeadm](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). + +For each server that you want to use as a {{< glossary_tooltip text="node" term_id="node" >}}, make sure it allows IPv6 forwarding. On Linux, you can set this by running run `sysctl -w net.ipv6.conf.all.forwarding=1` as the root user on each server. + +You need to have an IPv4 and and IPv6 address range to use. Cluster operators typically +use private address ranges for IPv4. For IPv6, a cluster operator typically chooses a global +unicast address block from within `2000::/3`, using a range that is assigned to the operator. +You don't have to route the cluster's IP address ranges to the public internet. + +The size of the IP address allocations should be suitable for the number of Pods and +Services that you are planning to run. + +{{< note >}} +If you are upgrading an existing cluster then, by default, the `kubeadm upgrade` command +changes the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +`IPv6DualStack` to `true` if that is not already enabled. +However, `kubeadm` does not support making modifications to the pod IP address range +(“cluster CIDR”) nor to the cluster's Service address range (“Service CIDR”). +{{< /note >}} + +### Create a dual-stack cluster + +To create a dual-stack cluster with `kubeadm init` you can pass command line arguments +similar to the following example: + +```shell +# These address ranges are examples +kubeadm init --pod-network-cidr=10.244.0.0/16,2001:db8:42:0::/56 --service-cidr=10.96.0.0/16,2001:db8:42:1::/112 +``` + +To make things clearer, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for the primary dual-stack control plane node. + +```yaml +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +featureGates: + IPv6DualStack: true +networking: + podSubnet: 10.244.0.0/16,2001:db8:42:0::/56 + serviceSubnet: 10.96.0.0/16,2001:db8:42:1::/112 +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: "10.100.0.1" + bindPort: 6443 +nodeRegistration: + kubeletExtraArgs: + node-ip: 10.100.0.2,fd00:1:2:3::2 +``` + +`advertiseAddress` in InitConfiguration specifies the IP address that the API Server will advertise it is listening on. The value of `advertiseAddress` equals the `--apiserver-advertise-address` flag of `kubeadm init` + +Run kubeadm to initiate the dual-stack control plane node: + +```shell +kubeadm init --config=kubeadm-config.yaml +``` + +Currently, the kube-controller-manager flags `--node-cidr-mask-size-ipv4|--node-cidr-mask-size-ipv6` are being left with default values. See [enable IPv4/IPv6 dual stack](/docs/concepts/services-networking/dual-stack#enable-ipv4ipv6-dual-stack). + +{{< note >}} +The `--apiserver-advertise-address` flag does not support dual-stack. +{{< /note >}} + +### Join a node to dual-stack cluster + +Before joining a node, make sure that the node has IPv6 routable network interface and allows IPv6 forwarding. + +Here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for joining a worker node to the cluster. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +discovery: + bootstrapToken: + apiServerEndpoint: 10.100.0.1:6443 +nodeRegistration: + kubeletExtraArgs: + node-ip: 10.100.0.3,fd00:1:2:3::3 +``` + +Also, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for joining another control plane node to the cluster. +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +controlPlane: + localAPIEndpoint: + advertiseAddress: "10.100.0.2" + bindPort: 6443 +discovery: + bootstrapToken: + apiServerEndpoint: 10.100.0.1:6443 +nodeRegistration: + kubeletExtraArgs: + node-ip: 10.100.0.4,fd00:1:2:3::4 + +``` + +`advertiseAddress` in JoinConfiguration.controlPlane specifies the IP address that the API Server will advertise it is listening on. The value of `advertiseAddress` equals the `--apiserver-advertise-address` flag of `kubeadm join`. + +```shell +kubeadm join --config=kubeadm-config.yaml ... +``` + +### Create a single-stack cluster + +{{< note >}} +Enabling the dual-stack feature doesn't mean that you need to use dual-stack addressing. +You can deploy a single-stack cluster that has the dual-stack networking feature enabled. +{{< /note >}} + +In 1.21 the `IPv6DualStack` feature is Beta and the feature gate is defaulted to `true`. To disable the feature you must configure the feature gate to `false`. Note that once the feature is GA, the feature gate will be removed. + +```shell +kubeadm init --feature-gates IPv6DualStack=false +``` + +To make things more clear, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for the single-stack control plane node. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +featureGates: + IPv6DualStack: false +networking: + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/16 +``` + +## {{% heading "whatsnext" %}} + +* [Validate IPv4/IPv6 dual-stack](/docs/tasks/network/validate-dual-stack) networking +* Read about [Dual-stack](/docs/concepts/services-networking/dual-stack/) cluster networking diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 7c44f6e6896c6..c997156827726 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -18,14 +18,7 @@ For information how to create a cluster with kubeadm once you have performed thi ## {{% heading "prerequisites" %}} -* One or more machines running one of: - - Ubuntu 16.04+ - - Debian 9+ - - CentOS 7+ - - Red Hat Enterprise Linux (RHEL) 7+ - - Fedora 25+ - - HypriotOS v1.0.1+ - - Flatcar Container Linux (tested with 2512.3.0) +* A compatible Linux host. The Kubernetes project provides generic instructions for Linux distributions based on Debian and Red Hat, and those distributions without a package manager. * 2 GB or more of RAM per machine (any less will leave little room for your apps). * 2 CPUs or more. * Full network connectivity between all machines in the cluster (public or private network is fine). @@ -78,7 +71,7 @@ For more details please see the [Network Plugin Requirements](/docs/concepts/ext | Protocol | Direction | Port Range | Purpose | Used By | |----------|-----------|------------|-------------------------|---------------------------| -| TCP | Inbound | 6443* | Kubernetes API server | All | +| TCP | Inbound | 6443\* | Kubernetes API server | All | | TCP | Inbound | 2379-2380 | etcd server client API | kube-apiserver, etcd | | TCP | Inbound | 10250 | kubelet API | Self, Control plane | | TCP | Inbound | 10251 | kube-scheduler | Self | @@ -122,7 +115,7 @@ The following table lists container runtimes and their associated socket paths: {{< table caption = "Container runtimes and their socket paths" >}} | Runtime | Path to Unix domain socket | |------------|-----------------------------------| -| Docker | `/var/run/docker.sock` | +| Docker | `/var/run/dockershim.sock` | | containerd | `/run/containerd/containerd.sock` | | CRI-O | `/var/run/crio/crio.sock` | {{< /table >}} @@ -167,7 +160,7 @@ kubelet and the control plane is supported, but the kubelet version may never ex server version. For example, the kubelet running 1.7.0 should be fully compatible with a 1.8.0 API server, but not vice versa. -For information about installing `kubectl`, see [Install and set up kubectl](/docs/tasks/tools/install-kubectl/). +For information about installing `kubectl`, see [Install and set up kubectl](/docs/tasks/tools/). {{< warning >}} These instructions exclude all Kubernetes packages from any system upgrades. @@ -181,19 +174,37 @@ For more information on version skews, see: * Kubeadm-specific [version skew policy](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#version-skew-policy) {{< tabs name="k8s_install" >}} -{{% tab name="Ubuntu, Debian or HypriotOS" %}} -```bash -sudo apt-get update && sudo apt-get install -y apt-transport-https curl -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - -cat <}} -The DOWNLOAD_DIR variable must be set to a writable directory. -If you are running Flatcar Container Linux, set DOWNLOAD_DIR=/opt/bin. +The `DOWNLOAD_DIR` variable must be set to a writable directory. +If you are running Flatcar Container Linux, set `DOWNLOAD_DIR=/opt/bin`. {{< /note >}} ```bash @@ -284,40 +295,17 @@ See the [Kubeadm Troubleshooting guide](/docs/setup/production-environment/tools The kubelet is now restarting every few seconds, as it waits in a crashloop for kubeadm to tell it what to do. -## Configure cgroup driver used by kubelet on control-plane node - -When using Docker, kubeadm will automatically detect the cgroup driver for the kubelet -and set it in the `/var/lib/kubelet/config.yaml` file during runtime. +## Configuring a cgroup driver -If you are using a different CRI, you must pass your `cgroupDriver` value to `kubeadm init`, like so: +Both the container runtime and the kubelet have a property called +["cgroup driver"](/docs/setup/production-environment/container-runtimes/), which is important +for the management of cgroups on Linux machines. -```yaml -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -cgroupDriver: -``` - -For further details, please read [Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). - -Please mind, that you **only** have to do that if the cgroup driver of your CRI -is not `cgroupfs`, because that is the default value in the kubelet already. - -{{< note >}} -Since `--cgroup-driver` flag has been deprecated by the kubelet, if you have that in `/var/lib/kubelet/kubeadm-flags.env` -or `/etc/default/kubelet`(`/etc/sysconfig/kubelet` for RPMs), please remove it and use the KubeletConfiguration instead -(stored in `/var/lib/kubelet/config.yaml` by default). -{{< /note >}} - -Restarting the kubelet is required: - -```bash -sudo systemctl daemon-reload -sudo systemctl restart kubelet -``` - -The automatic detection of cgroup driver for other container runtimes -like CRI-O and containerd is work in progress. +{{< warning >}} +Matching the container runtime and kubelet cgroup drivers is required or otherwise the kubelet process will fail. +See [Configuring a cgroup driver](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/) for more details. +{{< /warning >}} ## Troubleshooting @@ -325,5 +313,5 @@ If you are running into difficulties with kubeadm, please consult our [troublesh ## {{% heading "whatsnext" %}} - * [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md b/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md index f68b026ccb341..dda9aba77a553 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md @@ -23,10 +23,8 @@ manager instead, but you need to configure it manually. Some kubelet configuration details need to be the same across all kubelets involved in the cluster, while other configuration aspects need to be set on a per-kubelet basis to accommodate the different characteristics of a given machine (such as OS, storage, and networking). You can manage the configuration -of your kubelets manually, but kubeadm now provides a `KubeletConfiguration` API type for [managing your -kubelet configurations centrally](#configure-kubelets-using-kubeadm). - - +of your kubelets manually, but kubeadm now provides a `KubeletConfiguration` API type for +[managing your kubelet configurations centrally](#configure-kubelets-using-kubeadm). @@ -52,8 +50,9 @@ Virtual IPs for services are now allocated from this subnet. You also need to se by the kubelet, using the `--cluster-dns` flag. This setting needs to be the same for every kubelet on every manager and Node in the cluster. The kubelet provides a versioned, structured API object that can configure most parameters in the kubelet and push out this configuration to each running -kubelet in the cluster. This object is called **the kubelet's ComponentConfig**. -The ComponentConfig allows the user to specify flags such as the cluster DNS IP addresses expressed as +kubelet in the cluster. This object is called +[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/). +The `KubeletConfiguration` allows the user to specify flags such as the cluster DNS IP addresses expressed as a list of values to a camelCased key, illustrated by the following example: ```yaml @@ -63,7 +62,7 @@ clusterDNS: - 10.96.0.10 ``` -For more details on the ComponentConfig have a look at [this section](#configure-kubelets-using-kubeadm). +For more details on the `KubeletConfiguration` have a look at [this section](#configure-kubelets-using-kubeadm). ### Providing instance-specific configuration details @@ -99,8 +98,8 @@ API object is passed with a configuration file like so `kubeadm ... --config som By calling `kubeadm config print init-defaults --component-configs KubeletConfiguration` you can see all the default values for this structure. -Also have a look at the [API reference for the -kubelet ComponentConfig](https://godoc.org/k8s.io/kubernetes/pkg/kubelet/apis/config#KubeletConfiguration) +Also have a look at the +[reference for the KubeletConfiguration](/docs/reference/config-api/kubelet-config.v1beta1/) for more information on the individual fields. ### Workflow when using `kubeadm init` @@ -160,9 +159,13 @@ has finished performing the TLS Bootstrap. `kubeadm` ships with configuration for how systemd should run the kubelet. Note that the kubeadm CLI command never touches this drop-in file. -This configuration file installed by the `kubeadm` [DEB](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf) or [RPM package](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubeadm/10-kubeadm.conf) is written to +This configuration file installed by the `kubeadm` +[DEB](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf) or +[RPM package](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubeadm/10-kubeadm.conf) is written to `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` and is used by systemd. -It augments the basic [`kubelet.service` for RPM](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubelet/kubelet.service) or [`kubelet.service` for DEB](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service): +It augments the basic +[`kubelet.service` for RPM](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubelet/kubelet.service) or +[`kubelet.service` for DEB](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service): ```none [Service] diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md deleted file mode 100644 index d860a88bddc37..0000000000000 --- a/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -reviewers: -- sig-cluster-lifecycle -title: Configuring your kubernetes cluster to self-host the control plane -content_type: concept -weight: 100 ---- - - - -### Self-hosting the Kubernetes control plane {#self-hosting} - -kubeadm allows you to experimentally create a _self-hosted_ Kubernetes control -plane. This means that key components such as the API server, controller -manager, and scheduler run as [DaemonSet pods](/docs/concepts/workloads/controllers/daemonset/) -configured via the Kubernetes API instead of [static pods](/docs/tasks/configure-pod-container/static-pod/) -configured in the kubelet via static files. - -To create a self-hosted cluster see the -[kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting) command. - - - -#### Caveats - -{{< caution >}} -This feature pivots your cluster into an unsupported state, rendering kubeadm unable -to manage you cluster any longer. This includes `kubeadm upgrade`. -{{< /caution >}} - -1. Self-hosting in 1.8 and later has some important limitations. In particular, a - self-hosted cluster _cannot recover from a reboot of the control-plane node_ - without manual intervention. - -1. By default, self-hosted control plane Pods rely on credentials loaded from - [`hostPath`](/docs/concepts/storage/volumes/#hostpath) - volumes. Except for initial creation, these credentials are not managed by - kubeadm. - -1. The self-hosted portion of the control plane does not include etcd, - which still runs as a static Pod. - -#### Process - -The self-hosting bootstrap process is documented in the [kubeadm design -document](https://github.com/kubernetes/kubeadm/blob/master/docs/design/design_v1.9.md#optional-self-hosting). - -In summary, `kubeadm alpha selfhosting` works as follows: - - 1. Waits for this bootstrap static control plane to be running and - healthy. This is identical to the `kubeadm init` process without self-hosting. - - 1. Uses the static control plane Pod manifests to construct a set of - DaemonSet manifests that will run the self-hosted control plane. - It also modifies these manifests where necessary, for example adding new volumes - for secrets. - - 1. Creates DaemonSets in the `kube-system` namespace and waits for the - resulting Pods to be running. - - 1. Once self-hosted Pods are operational, their associated static Pods are deleted - and kubeadm moves on to install the next component. This triggers kubelet to - stop those static Pods. - - 1. When the original static control plane stops, the new self-hosted control - plane is able to bind to listening ports and become active. - - diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index ded4250787e6a..5de8afd20bca5 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -99,10 +99,11 @@ This may be caused by a number of problems. The most common are: There are two common ways to fix the cgroup driver problem: - 1. Install Docker again following instructions - [here](/docs/setup/production-environment/container-runtimes/#docker). + 1. Install Docker again following instructions + [here](/docs/setup/production-environment/container-runtimes/#docker). - 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to [Configure cgroup driver used by kubelet on control-plane node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) + 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to + [Configure cgroup driver used by kubelet on control-plane node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) - control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`. @@ -110,8 +111,11 @@ This may be caused by a number of problems. The most common are: The following could happen if Docker halts and does not remove any Kubernetes-managed containers: -```bash +```shell sudo kubeadm reset +``` + +```console [preflight] Running pre-flight checks [reset] Stopping the kubelet service [reset] Unmounting mounted directories in "/var/lib/kubelet" @@ -121,14 +125,14 @@ sudo kubeadm reset A possible solution is to restart the Docker service and then re-run `kubeadm reset`: -```bash +```shell sudo systemctl restart docker.service sudo kubeadm reset ``` Inspecting the logs for docker may also be useful: -```sh +```shell journalctl -u docker ``` @@ -138,10 +142,10 @@ Right after `kubeadm init` there should not be any pods in these states. - If there are pods in one of these states _right after_ `kubeadm init`, please open an issue in the kubeadm repo. `coredns` (or `kube-dns`) should be in the `Pending` state - until you have deployed the network solution. + until you have deployed the network add-on. - If you see Pods in the `RunContainerError`, `CrashLoopBackOff` or `Error` state - after deploying the network solution and nothing happens to `coredns` (or `kube-dns`), - it's very likely that the Pod Network solution that you installed is somehow broken. + after deploying the network add-on and nothing happens to `coredns` (or `kube-dns`), + it's very likely that the Pod Network add-on that you installed is somehow broken. You might have to grant it more RBAC privileges or use a newer version. Please file an issue in the Pod Network providers' issue tracker and get the issue triaged there. - If you install a version of Docker older than 1.12.1, remove the `MountFlags=slave` option @@ -149,17 +153,17 @@ Right after `kubeadm init` there should not be any pods in these states. MountFlags can interfere with volumes mounted by Kubernetes, and put the Pods in `CrashLoopBackOff` state. The error happens when Kubernetes does not find `var/run/secrets/kubernetes.io/serviceaccount` files. -## `coredns` (or `kube-dns`) is stuck in the `Pending` state +## `coredns` is stuck in the `Pending` state This is **expected** and part of the design. kubeadm is network provider-agnostic, so the admin -should [install the pod network solution](/docs/concepts/cluster-administration/addons/) +should [install the pod network add-on](/docs/concepts/cluster-administration/addons/) of choice. You have to install a Pod Network before CoreDNS may be deployed fully. Hence the `Pending` state before the network is set up. ## `HostPort` services do not work The `HostPort` and `HostIP` functionality is available depending on your Pod Network -provider. Please contact the author of the Pod Network solution to find out whether +provider. Please contact the author of the Pod Network add-on to find out whether `HostPort` and `HostIP` functionality are available. Calico, Canal, and Flannel CNI providers are verified to support HostPort. @@ -363,7 +367,7 @@ kubectl taint nodes NODE_NAME node-role.kubernetes.io/master:NoSchedule- ## `/usr` is mounted read-only on nodes {#usr-mounted-read-only} -On Linux distributions such as Fedora CoreOS, the directory `/usr` is mounted as a read-only filesystem. +On Linux distributions such as Fedora CoreOS or Flatcar Container Linux, the directory `/usr` is mounted as a read-only filesystem. For [flex-volume support](https://github.com/kubernetes/community/blob/ab55d85/contributors/devel/sig-storage/flexvolume.md), Kubernetes components like the kubelet and kube-controller-manager use the default path of `/usr/libexec/kubernetes/kubelet-plugins/volume/exec/`, yet the flex-volume directory _must be writeable_ @@ -404,7 +408,7 @@ be advised that this is modifying a design principle of the Linux distribution. ## `kubeadm upgrade plan` prints out `context deadline exceeded` error message -This error message is shown when upgrading a Kubernetes cluster with `kubeadm` in the case of running an external etcd. This is not a critical bug and happens because older versions of kubeadm perform a version check on the external etcd cluster. You can proceed with `kubeadm upgrade apply ...`. +This error message is shown when upgrading a Kubernetes cluster with `kubeadm` in the case of running an external etcd. This is not a critical bug and happens because older versions of kubeadm perform a version check on the external etcd cluster. You can proceed with `kubeadm upgrade apply ...`. This issue is fixed as of version 1.19. @@ -415,3 +419,21 @@ If `/var/lib/kubelet` is being mounted, performing a `kubeadm reset` will effect To workaround the issue, re-mount the `/var/lib/kubelet` directory after performing the `kubeadm reset` operation. This is a regression introduced in kubeadm 1.15. The issue is fixed in 1.20. + +## Cannot use the metrics-server securely in a kubeadm cluster + +In a kubeadm cluster, the [metrics-server](https://github.com/kubernetes-sigs/metrics-server) +can be used insecurely by passing the `--kubelet-insecure-tls` to it. This is not recommended for production clusters. + +If you want to use TLS between the metrics-server and the kubelet there is a problem, +since kubeadm deploys a self-signed serving certificate for the kubelet. This can cause the following errors +on the side of the metrics-server: +``` +x509: certificate signed by unknown authority +x509: certificate is valid for IP-foo not IP-bar +``` + +See [Enabling signed kubelet serving certificates](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs) +to understand how to configure the kubelets in a kubeadm cluster to have properly signed serving certificates. + +Also see [How to run the metrics-server securely](https://github.com/kubernetes-sigs/metrics-server/blob/master/FAQ.md#how-to-run-metrics-server-securely). diff --git a/content/en/docs/setup/production-environment/tools/kubespray.md b/content/en/docs/setup/production-environment/tools/kubespray.md index ac635101a0e83..08893370cf004 100644 --- a/content/en/docs/setup/production-environment/tools/kubespray.md +++ b/content/en/docs/setup/production-environment/tools/kubespray.md @@ -23,7 +23,7 @@ Kubespray is a composition of [Ansible](https://docs.ansible.com/) playbooks, [i * continuous integration tests To choose a tool which best fits your use case, read [this comparison](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md) to -[kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). +[kubeadm](/docs/reference/setup-tools/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). @@ -50,7 +50,7 @@ Kubespray provides the following utilities to help provision your environment: ### (2/5) Compose an inventory file -After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/intro_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". +After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/latest/network/getting_started/first_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". ### (3/5) Plan your cluster deployment @@ -68,7 +68,7 @@ Kubespray provides the ability to customize many aspects of the deployment: * {{< glossary_tooltip term_id="cri-o" >}} * Certificate generation methods -Kubespray customizations can be made to a [variable file](https://docs.ansible.com/ansible/playbooks_variables.html). If you are just getting started with Kubespray, consider using the Kubespray defaults to deploy your cluster and explore Kubernetes. +Kubespray customizations can be made to a [variable file](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html). If you are getting started with Kubespray, consider using the Kubespray defaults to deploy your cluster and explore Kubernetes. ### (4/5) Deploy a Cluster diff --git a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index 25eeb18050adc..a2055ce4257ed 100644 --- a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -1,7 +1,9 @@ --- reviewers: -- michmike -- patricklang +- jayunit100 +- jsturtevant +- marosset +- perithompson title: Intro to Windows support in Kubernetes content_type: concept weight: 65 @@ -15,7 +17,7 @@ Windows applications constitute a large portion of the services and applications ## Windows containers in Kubernetes -To enable the orchestration of Windows containers in Kubernetes, simply include Windows nodes in your existing Linux cluster. Scheduling Windows containers in {{< glossary_tooltip text="Pods" term_id="pod" >}} on Kubernetes is as simple and easy as scheduling Linux-based containers. +To enable the orchestration of Windows containers in Kubernetes, include Windows nodes in your existing Linux cluster. Scheduling Windows containers in {{< glossary_tooltip text="Pods" term_id="pod" >}} on Kubernetes is similar to scheduling Linux-based containers. In order to run Windows containers, your Kubernetes cluster must include multiple operating systems, with control plane nodes running Linux and workers running either Windows or Linux depending on your workload needs. Windows Server 2019 is the only Windows operating system supported, enabling [Kubernetes Node](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) on Windows (including kubelet, [container runtime](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/containerd), and kube-proxy). For a detailed explanation of Windows distribution channels see the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19). @@ -36,7 +38,7 @@ In this document, when we talk about Windows containers we mean Windows containe Refer to the following table for Windows operating system support in Kubernetes. A single heterogeneous Kubernetes cluster can have both Windows and Linux worker nodes. Windows containers have to be scheduled on Windows nodes and Linux containers on Linux nodes. | Kubernetes version | Windows Server LTSC releases | Windows Server SAC releases | -| --- | --- | --- | --- | +| --- | --- | --- | | *Kubernetes v1.17* | Windows Server 2019 | Windows Server ver 1809 | | *Kubernetes v1.18* | Windows Server 2019 | Windows Server ver 1809, Windows Server ver 1903, Windows Server ver 1909 | | *Kubernetes v1.19* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | @@ -213,15 +215,16 @@ On Windows, you can use the following settings to configure Services and load ba {{< table caption="Windows Service Settings" >}} | Feature | Description | Supported Kubernetes version | Supported Windows OS build | How to enable | | ------- | ----------- | ----------------------------- | -------------------------- | ------------- | -| Session affinity | Ensures that connections from a particular client are passed to the same Pod each time. | v1.19+ | [Windows Server vNext Insider Preview Build 19551](https://blogs.windows.com/windowsexperience/2020/01/28/announcing-windows-server-vnext-insider-preview-build-19551/) (or higher) | Set `service.spec.sessionAffinity` to "ClientIP" | -| Direct Server Return | Load balancing mode where the IP address fixups and the LBNAT occurs at the container vSwitch port directly; service traffic arrives with the source IP set as the originating pod IP. Promises lower latency and scalability. | v1.15+ | Windows Server, version 2004 | Set the following flags in kube-proxy: `--feature-gates="WinDSR=true" --enable-dsr=true` | -| Preserve-Destination | Skips DNAT of service traffic, thereby preserving the virtual IP of the target service in packets reaching the backend Pod. This setting will also ensure that the client IP of incoming packets get preserved. | v1.15+ | Windows Server, version 1903 (or higher) | Set `"preserve-destination": "true"` in service annotations and enable DSR flags in kube-proxy. | -| IPv4/IPv6 dual-stack networking | Native IPv4-to-IPv4 in parallel with IPv6-to-IPv6 communications to, from, and within a cluster | v1.19+ | Windows Server vNext Insider Preview Build 19603 (or higher) | See [IPv4/IPv6 dual-stack](#ipv4ipv6-dual-stack) | +| Session affinity | Ensures that connections from a particular client are passed to the same Pod each time. | v1.20+ | [Windows Server vNext Insider Preview Build 19551](https://blogs.windows.com/windowsexperience/2020/01/28/announcing-windows-server-vnext-insider-preview-build-19551/) (or higher) | Set `service.spec.sessionAffinity` to "ClientIP" | +| Direct Server Return (DSR) | Load balancing mode where the IP address fixups and the LBNAT occurs at the container vSwitch port directly; service traffic arrives with the source IP set as the originating pod IP. | v1.20+ | Windows Server 2019 | Set the following flags in kube-proxy: `--feature-gates="WinDSR=true" --enable-dsr=true` | +| Preserve-Destination | Skips DNAT of service traffic, thereby preserving the virtual IP of the target service in packets reaching the backend Pod. Also disables node-node forwarding. | v1.20+ | Windows Server, version 1903 (or higher) | Set `"preserve-destination": "true"` in service annotations and enable DSR in kube-proxy. | +| IPv4/IPv6 dual-stack networking | Native IPv4-to-IPv4 in parallel with IPv6-to-IPv6 communications to, from, and within a cluster | v1.19+ | Windows Server, version 2004 (or higher) | See [IPv4/IPv6 dual-stack](#ipv4ipv6-dual-stack) | +| Client IP preservation | Ensures that source IP of incoming ingress traffic gets preserved. Also disables node-node forwarding. | v1.20+ | Windows Server, version 2019 (or higher) | Set `service.spec.externalTrafficPolicy` to "Local" and enable DSR in kube-proxy | {{< /table >}} #### IPv4/IPv6 dual-stack -You can enable IPv4/IPv6 dual-stack networking for `l2bridge` networks using the `IPv6DualStack` [feature gate](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). See [enable IPv4/IPv6 dual stack](/docs/concepts/services-networking/dual-stack#enable-ipv4ipv6-dual-stack) for more details. +You can enable IPv4/IPv6 dual-stack networking for `l2bridge` networks using the `IPv6DualStack` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). See [enable IPv4/IPv6 dual stack](/docs/concepts/services-networking/dual-stack#enable-ipv4ipv6-dual-stack) for more details. {{< note >}} On Windows, using IPv6 with Kubernetes require Windows Server, version 2004 (kernel version 10.0.19041.610) or later. @@ -233,23 +236,33 @@ Overlay (VXLAN) networks on Windows do not support dual-stack networking today. ### Limitations -#### Control Plane - Windows is only supported as a worker node in the Kubernetes architecture and component matrix. This means that a Kubernetes cluster must always include Linux master nodes, zero or more Linux worker nodes, and zero or more Windows worker nodes. -#### Compute {compute-limitations} -##### Resource management and process isolation +#### Resource Handling Linux cgroups are used as a pod boundary for resource controls in Linux. Containers are created within that boundary for network, process and file system isolation. The cgroups APIs can be used to gather cpu/io/memory stats. In contrast, Windows uses a Job object per container with a system namespace filter to contain all processes in a container and provide logical isolation from the host. There is no way to run a Windows container without the namespace filtering in place. This means that system privileges cannot be asserted in the context of the host, and thus privileged containers are not available on Windows. Containers cannot assume an identity from the host because the Security Account Manager (SAM) is separate. -##### Operating System Restrictions +#### Resource Reservations -Windows has strict compatibility rules, where the host OS version must match the container base image OS version. Only Windows containers with a container operating system of Windows Server 2019 are supported. Hyper-V isolation of containers, enabling some backward compatibility of Windows container image versions, is planned for a future release. +##### Memory Reservations +Windows does not have an out-of-memory process killer as Linux does. Windows always treats all user-mode memory allocations as virtual, and pagefiles are mandatory. The net effect is that Windows won't reach out of memory conditions the same way Linux does, and processes page to disk instead of being subject to out of memory (OOM) termination. If memory is over-provisioned and all physical memory is exhausted, then paging can slow down performance. + +Keeping memory usage within reasonable bounds is possible using the kubelet parameters `--kubelet-reserve` and/or `--system-reserve` to account for memory usage on the node (outside of containers). This reduces [NodeAllocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). + +{{< note >}} +As you deploy workloads, use resource limits (must set only limits or limits must equal requests) on containers. This also subtracts from NodeAllocatable and prevents the scheduler from adding more pods once a node is full. +{{< /note >}} + +A best practice to avoid over-provisioning is to configure the kubelet with a system reserved memory of at least 2GB to account for Windows, Docker, and Kubernetes processes. + +##### CPU Reservations +To account for Windows, Docker and other Kubernetes host processes it is recommended to reserve a percentage of CPU so they are able to respond to events. This value needs to be scaled based on the number of CPU cores available on the Windows node.To determine this percentage a user should identify the maximum pod density for each of their nodes and monitor the CPU usage of the system services choosing a value that meets their workload needs. -##### Feature Restrictions +Keeping CPU usage within reasonable bounds is possible using the kubelet parameters `--kubelet-reserve` and/or `--system-reserve` to account for CPU usage on the node (outside of containers). This reduces [NodeAllocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). -* TerminationGracePeriod: requires CRI-containerD +#### Feature Restrictions +* TerminationGracePeriod: not implemented * Single file mapping: to be implemented with CRI-ContainerD * Termination message: to be implemented with CRI-ContainerD * Privileged Containers: not currently supported in Windows containers @@ -257,15 +270,8 @@ Windows has strict compatibility rules, where the host OS version must match the * The existing node problem detector is Linux-only and requires privileged containers. In general, we don't expect this to be used on Windows because privileged containers are not supported * Not all features of shared namespaces are supported (see API section for more details) -##### Memory Reservations and Handling - -Windows does not have an out-of-memory process killer as Linux does. Windows always treats all user-mode memory allocations as virtual, and pagefiles are mandatory. The net effect is that Windows won't reach out of memory conditions the same way Linux does, and processes page to disk instead of being subject to out of memory (OOM) termination. If memory is over-provisioned and all physical memory is exhausted, then paging can slow down performance. - -Keeping memory usage within reasonable bounds is possible with a two-step process. First, use the kubelet parameters `--kubelet-reserve` and/or `--system-reserve` to account for memory usage on the node (outside of containers). This reduces [NodeAllocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)). As you deploy workloads, use resource limits (must set only limits or limits must equal requests) on containers. This also subtracts from NodeAllocatable and prevents the scheduler from adding more pods once a node is full. - -A best practice to avoid over-provisioning is to configure the kubelet with a system reserved memory of at least 2GB to account for Windows, Docker, and Kubernetes processes. - -The behavior of the flags behave differently as described below: +#### Difference in behavior of flags when compared to Linux +The behavior of the following kubelet flags is different on Windows nodes as described below: * `--kubelet-reserve`, `--system-reserve` , and `--eviction-hard` flags update Node Allocatable * Eviction by using `--enforce-node-allocable` is not implemented @@ -297,7 +303,7 @@ As a result, the following storage functionality is not supported on Windows nod * NFS based storage/volume support * Expanding the mounted volume (resizefs) -#### Networking {networking-limitations} +#### Networking {#networking-limitations} Windows Container Networking differs in some important ways from Linux networking. The [Microsoft documentation for Windows Container Networking](https://docs.microsoft.com/en-us/virtualization/windowscontainers/container-networking/architecture) contains additional details and background. @@ -308,9 +314,10 @@ The following networking functionality is not supported on Windows nodes * Host networking mode is not available for Windows pods * Local NodePort access from the node itself fails (works for other nodes or external clients) * Accessing service VIPs from nodes will be available with a future release of Windows Server -* Overlay networking support in kube-proxy is an alpha release. In addition, it requires [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) to be installed on Windows Server 2019 -* Local Traffic Policy and DSR mode -* Windows containers connected to l2bridge, l2tunnel, or overlay networks do not support communicating over the IPv6 stack. There is outstanding Windows platform work required to enable these network drivers to consume IPv6 addresses and subsequent Kubernetes work in kubelet, kube-proxy, and CNI plugins. +* A single service can only support up to 64 backend pods / unique destination IPs +* Overlay networking support in kube-proxy is a beta feature. In addition, it requires [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) to be installed on Windows Server 2019 +* Local Traffic Policy in non-DSR mode +* Windows containers connected to overlay networks do not support communicating over the IPv6 stack. There is outstanding Windows platform work required to enable this network driver to consume IPv6 addresses and subsequent Kubernetes work in kubelet, kube-proxy, and CNI plugins. * Outbound communication using the ICMP protocol via the win-overlay, win-bridge, and Azure-CNI plugin. Specifically, the Windows data plane ([VFP](https://www.microsoft.com/en-us/research/project/azure-virtual-filtering-platform/)) doesn't support ICMP packet transpositions. This means: * ICMP packets directed to destinations within the same network (e.g. pod to pod communication via ping) work as expected and without any limitations * TCP/UDP packets work as expected and without any limitations @@ -332,7 +339,7 @@ These features were added in Kubernetes v1.15: ##### DNS {#dns-limitations} * ClusterFirstWithHostNet is not supported for DNS. Windows treats all names with a '.' as a FQDN and skips PQDN resolution -* On Linux, you have a DNS suffix list, which is used when trying to resolve PQDNs. On Windows, we only have 1 DNS suffix, which is the DNS suffix associated with that pod's namespace (mydns.svc.cluster.local for example). Windows can resolve FQDNs and services or names resolvable with just that suffix. For example, a pod spawned in the default namespace, will have the DNS suffix **default.svc.cluster.local**. On a Windows pod, you can resolve both **kubernetes.default.svc.cluster.local** and **kubernetes**, but not the in-betweens, like **kubernetes.default** or **kubernetes.default.svc**. +* On Linux, you have a DNS suffix list, which is used when trying to resolve PQDNs. On Windows, we only have 1 DNS suffix, which is the DNS suffix associated with that pod's namespace (mydns.svc.cluster.local for example). Windows can resolve FQDNs and services or names resolvable with only that suffix. For example, a pod spawned in the default namespace, will have the DNS suffix **default.svc.cluster.local**. On a Windows pod, you can resolve both **kubernetes.default.svc.cluster.local** and **kubernetes**, but not the in-betweens, like **kubernetes.default** or **kubernetes.default.svc**. * On Windows, there are multiple DNS resolvers that can be used. As these come with slightly different behaviors, using the `Resolve-DNSName` utility for name query resolutions is recommended. ##### IPv6 @@ -362,9 +369,9 @@ There are no differences in how most of the Kubernetes APIs work for Windows. Th At a high level, these OS concepts are different: -* Identity - Linux uses userID (UID) and groupID (GID) which are represented as integer types. User and group names are not canonical - they are just an alias in `/etc/groups` or `/etc/passwd` back to UID+GID. Windows uses a larger binary security identifier (SID) which is stored in the Windows Security Access Manager (SAM) database. This database is not shared between the host and containers, or between containers. +* Identity - Linux uses userID (UID) and groupID (GID) which are represented as integer types. User and group names are not canonical - they are an alias in `/etc/groups` or `/etc/passwd` back to UID+GID. Windows uses a larger binary security identifier (SID) which is stored in the Windows Security Access Manager (SAM) database. This database is not shared between the host and containers, or between containers. * File permissions - Windows uses an access control list based on SIDs, rather than a bitmask of permissions and UID+GID -* File paths - convention on Windows is to use `\` instead of `/`. The Go IO libraries typically accept both and just make it work, but when you're setting a path or command line that's interpreted inside a container, `\` may be needed. +* File paths - convention on Windows is to use `\` instead of `/`. The Go IO libraries accept both types of file path separators. However, when you're setting a path or command line that's interpreted inside a container, `\` may be needed. * Signals - Windows interactive apps handle termination differently, and can implement one or more of these: * A UI thread handles well-defined messages including WM_CLOSE * Console apps handle ctrl-c or ctrl-break using a Control Handler @@ -412,6 +419,10 @@ None of the PodSecurityContext fields work on Windows. They're listed here for r * V1.PodSecurityContext.SupplementalGroups - provides GID, not available on Windows * V1.PodSecurityContext.Sysctls - these are part of the Linux sysctl interface. There's no equivalent on Windows. +#### Operating System Version Restrictions + +Windows has strict compatibility rules, where the host OS version must match the container base image OS version. Only Windows containers with a container operating system of Windows Server 2019 are supported. Hyper-V isolation of containers, enabling some backward compatibility of Windows container image versions, is planned for a future release. + ## Getting Help and Troubleshooting {#troubleshooting} Your main source of help for troubleshooting your Kubernetes cluster should start with this [section](/docs/tasks/debug-application-cluster/troubleshooting/). Some additional, Windows-specific troubleshooting help is included in this section. Logs are an important element of troubleshooting issues in Kubernetes. Make sure to include them any time you seek troubleshooting assistance from other contributors. Follow the instructions in the SIG-Windows [contributing guide on gathering logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs). @@ -547,7 +558,7 @@ Your main source of help for troubleshooting your Kubernetes cluster should star 1. After launching `start.ps1`, flanneld is stuck in "Waiting for the Network to be created" - There are numerous reports of this [issue which are being investigated](https://github.com/coreos/flannel/issues/1066); most likely it is a timing issue for when the management IP of the flannel network is set. A workaround is to simply relaunch start.ps1 or relaunch it manually as follows: + There are numerous reports of this [issue](https://github.com/coreos/flannel/issues/1066); most likely it is a timing issue for when the management IP of the flannel network is set. A workaround is to relaunch start.ps1 or relaunch it manually as follows: ```powershell PS C:> [Environment]::SetEnvironmentVariable("NODE_NAME", "") diff --git a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md index 6c9c05cc90cb6..ce7aee8a89cf7 100644 --- a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -1,7 +1,9 @@ --- reviewers: -- michmike -- patricklang +- jayunit100 +- jsturtevant +- marosset +- perithompson title: Guide for scheduling Windows containers in Kubernetes content_type: concept weight: 75 @@ -23,7 +25,7 @@ Windows applications constitute a large portion of the services and applications ## Before you begin * Create a Kubernetes cluster that includes a [master and a worker node running Windows Server](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes) -* It is important to note that creating and deploying services and workloads on Kubernetes behaves in much the same way for Linux and Windows containers. [Kubectl commands](/docs/reference/kubectl/overview/) to interface with the cluster are identical. The example in the section below is provided simply to jumpstart your experience with Windows containers. +* It is important to note that creating and deploying services and workloads on Kubernetes behaves in much the same way for Linux and Windows containers. [Kubectl commands](/docs/reference/kubectl/overview/) to interface with the cluster are identical. The example in the section below is provided to jumpstart your experience with Windows containers. ## Getting Started: Deploying a Windows container diff --git a/content/en/docs/setup/release/notes.md b/content/en/docs/setup/release/notes.md index adbdb7c48e238..2741de7e50c06 100644 --- a/content/en/docs/setup/release/notes.md +++ b/content/en/docs/setup/release/notes.md @@ -1,5 +1,5 @@ --- -title: v1.20 Release Notes +title: v1.21 Release Notes weight: 10 card: name: release-notes @@ -13,953 +13,671 @@ card: -# v1.20.0 +# v1.21.0 [Documentation](https://docs.k8s.io) -## Downloads for v1.20.0 +## Downloads for v1.21.0 + +### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes.tar.gz) | `ebfe49552bbda02807034488967b3b62bf9e3e507d56245e298c4c19090387136572c1fca789e772a5e8a19535531d01dcedb61980e42ca7b0461d3864df2c14` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-src.tar.gz) | `bcbd67ed0bb77840828c08c6118ad0c9bf2bcda16763afaafd8731fd6ce735be654feef61e554bcc34c77c65b02a25dae565adc5e1dc49a2daaa0d115bf1efe6` +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes.tar.gz) | `19bb76a3fa5ce4b9f043b2a3a77c32365ab1fcb902d8dd6678427fb8be8f49f64a5a03dc46aaef9c7dadee05501cf83412eda46f0edacbb8fc1ed0bf5fb79142` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-src.tar.gz) | `f942e6d6c10007a6e9ce21e94df597015ae646a7bc3e515caf1a3b79f1354efb9aff59c40f2553a8e3d43fe4a01742241f5af18b69666244906ed11a22e3bc49` ### Client Binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-darwin-amd64.tar.gz) | `3609f6483f4244676162232b3294d7a2dc40ae5bdd86a842a05aa768f5223b8f50e1d6420fd8afb2d0ce19de06e1d38e5e5b10154ba0cb71a74233e6dc94d5a0` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-386.tar.gz) | `e06c08016a08137d39804383fdc33a40bb2567aa77d88a5c3fd5b9d93f5b581c635b2c4faaa718ed3bb2d120cb14fe91649ed4469ba72c3a3dda1e343db545ed` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-amd64.tar.gz) | `081472833601aa4fa78e79239f67833aa4efcb4efe714426cd01d4ddf6f36fbf304ef7e1f5373bff0fdff44a845f7560165c093c108bd359b5ab4189f36b1f2f` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm.tar.gz) | `037f84a2f29fe62d266cab38ac5600d058cce12cbc4851bcf062fafba796c1fbe23a0c2939cd15784854ca7cd92383e5b96a11474fc71fb614b47dbf98a477d9` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm64.tar.gz) | `275727e1796791ca3cbe52aaa713a2660404eab6209466fdc1cfa8559c9b361fe55c64c6bcecbdeba536b6d56213ddf726e58adc60f959b6f77e4017834c5622` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-ppc64le.tar.gz) | `7a9965293029e9fcdb2b7387467f022d2026953b8461e6c84182abf35c28b7822d2389a6d8e4d8e532d2ea5d5d67c6fee5fb6c351363cb44c599dc8800649b04` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-s390x.tar.gz) | `85fc449ce1980f5f030cc32e8c8e2198c1cc91a448e04b15d27debc3ca56aa85d283f44b4f4e5fed26ac96904cc12808fa3e9af3d8bf823fc928befb9950d6f5` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-386.tar.gz) | `4c0a27dba1077aaee943e0eb7a787239dd697e1d968e78d1933c1e60b02d5d233d58541d5beec59807a4ffe3351d5152359e11da120bf64cacb3ee29fbc242e6` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-amd64.tar.gz) | `29336faf7c596539b8329afbbdceeddc843162501de4afee44a40616278fa1f284d8fc48c241fc7d52c65dab70f76280cc33cec419c8c5dbc2625d9175534af8` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-amd64.tar.gz) | `be9d1440e418e5253fb8a3d8aba705ca8160746a9bd17325ad626a986b6da9f733af864155a651a32b7bca94b533b8d596005ddbe5248bdeea85db47a1b957ed` +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-arm64.tar.gz) | `eed0ddc81d104bb2d41ace13f737c490423d5df4ebddc7376e45c18ed66af35933c9376b912c1c3da105945b04056f6ca0870c156bee8a307cf4189ca5eb1dd1` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-386.tar.gz) | `8a2f30c4434199762f2a96141dab4241c1cce2711bea9ea39cc63c2c5e7d31719ed7f076efac1931604e3a94578d3bbf0cfa454965708c96f3cfb91789868746` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-amd64.tar.gz) | `cd3cfa645fa31de3716f1f63506e31b73d2aa8d37bb558bb3b3e8c151f35b3d74d44e03cbd05be67e380f9a5d015aba460222afdac6677815cd99a85c2325cf0` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm.tar.gz) | `936042aa11cea0f6dfd2c30fc5dbe655420b34799bede036b1299a92d6831f589ca10290b73b9c9741560b603ae31e450ad024e273f2b4df5354bfac272691d8` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm64.tar.gz) | `42beb75364d7bf4bf526804b8a35bd0ab3e124b712e9d1f45c1b914e6be0166619b30695feb24b3eecef134991dacb9ab3597e788bd9e45cf35addddf20dd7f6` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-ppc64le.tar.gz) | `4baba2ed7046b28370eccc22e2378ae79e3ce58220d6f4f1b6791e8233bec8379e30200bb20b971456b83f2b791ea166fdfcf1ea56908bc1eea03590c0eda468` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-s390x.tar.gz) | `37fa0c4d703aef09ce68c10ef3e7362b0313c8f251ce38eea579cd18fae4023d3d2b70e0f31577cabe6958ab9cfc30e98d25a7c64e69048b423057c3cf728339` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-386.tar.gz) | `6900db36c1e3340edfd6dfd8d720575a904c932d39a8a7fa36401595e971a0235bd42111dbcc1cbb77e7374e47f1380a68c637997c18f96a0d9cdc9f3714c4c9` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-amd64.tar.gz) | `90de67f6f79fc63bcfdf35066e3d84501cc85433265ffad36fd1a7a428a31b446249f0644a1e97495ea8b2a08e6944df6ef30363003750339edaa2aceffe937c` ### Server Binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-amd64.tar.gz) | `fb56486a55dbf7dbacb53b1aaa690bae18d33d244c72a1e2dc95fb0fcce45108c44ba79f8fa04f12383801c46813dc33d2d0eb2203035cdce1078871595e446e` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm.tar.gz) | `735ed9993071fe35b292bf06930ee3c0f889e3c7edb983195b1c8e4d7113047c12c0f8281fe71879fc2fcd871e1ee587f03b695a03c8512c873abad444997a19` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm64.tar.gz) | `ffab155531d5a9b82487ee1abf4f6ef49626ea58b2de340656a762e46cf3e0f470bdbe7821210901fe1114224957c44c1d9cc1e32efb5ee24e51fe63990785b2` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-ppc64le.tar.gz) | `9d5730d35c4ddfb4c5483173629fe55df35d1e535d96f02459468220ac2c97dc01b995f577432a6e4d1548b6edbfdc90828dc9c1f7cf7464481af6ae10aaf118` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-s390x.tar.gz) | `6e4c165306940e8b99dd6e590f8542e31aed23d2c7a6808af0357fa425cec1a57016dd66169cf2a95f8eb8ef70e1f29e2d500533aae889e2e3d9290d04ab8721` +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-amd64.tar.gz) | `3941dcc2309ac19ec185603a79f5a086d8a198f98c04efa23f15a177e5e1f34946ea9392ba9f5d24d0d727839438f067fef1001fc6e88b27b8b01e35bbd962ca` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm.tar.gz) | `6507abf6c2ec2b336901dc23269f6c577ec0049b8bad3c9dd6ad63f21aa10f09bfbbfa6e064c2466d250411d3e10f8672791a9e10942e38de7bfbaf7a8bcc9da` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm64.tar.gz) | `5abe76f867ca6865344e957bf166b81766c049ec4eb183a8a5580c22a7f8474db1edf90fd901a5833e56128b6825811653a1d27f72fd34ce5b1287a8c10da05c` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-ppc64le.tar.gz) | `62507b182ca25396a285d91241536860e58f54fac937e97cbdf91948c83bb41be97d33277400489bf50e85164d560205540b76e94e5d519892312bdc63df1067` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-s390x.tar.gz) | `04f2a1f7d1388e4a7d7d9f597f872a3da36f26839cfed16aad6df07021c03f4dca1df06b19cfda56df09d1c2d9a13ebd0af40ca1b9b6aecfaf427ab7712d88f3` ### Node Binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-amd64.tar.gz) | `3e6c90561dd1c27fa1dff6953c503251c36001f7e0f8eff3ec918c74ae2d9aa25917d8ac87d5b4224b8229f620b1830442e6dce3b2a497043f8497eee3705696` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm.tar.gz) | `26db385d9ae9a97a1051a638e7e3de22c4bbff389d5a419fe40d5893f9e4fa85c8b60a2bd1d370fd381b60c3ca33c5d72d4767c90898caa9dbd4df6bd116a247` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm64.tar.gz) | `5b8b63f617e248432b7eb913285a8ef8ba028255216332c05db949666c3f9e9cb9f4c393bbd68d00369bda77abf9bfa2da254a5c9fe0d79ffdad855a77a9d8ed` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-ppc64le.tar.gz) | `60da7715996b4865e390640525d6e98593ba3cd45c6caeea763aa5355a7f989926da54f58cc5f657f614c8134f97cd3894b899f8b467d100dca48bc22dd4ff63` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-s390x.tar.gz) | `9407dc55412bd04633f84fcefe3a1074f3eaa772a7cb9302242b8768d6189b75d37677a959f91130e8ad9dc590f9ba8408ba6700a0ceff6827315226dd5ee1e6` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-windows-amd64.tar.gz) | `9d4261af343cc330e6359582f80dbd6efb57d41f882747a94bbf47b4f93292d43dd19a86214d4944d268941622dfbc96847585e6fec15fddc4dbd93d17015fa8` +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-amd64.tar.gz) | `c1831c708109c31b3878e5a9327ea4b9e546504d0b6b00f3d43db78b5dd7d5114d32ac24a9a505f9cadbe61521f0419933348d2cd309ed8cfe3987d9ca8a7e2c` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm.tar.gz) | `b68dd5bcfc7f9ce2781952df40c8c3a64c29701beff6ac22f042d6f31d4de220e9200b7e8272ddf608114327770acdaf3cb9a34a0a5206e784bda717ea080e0f` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm64.tar.gz) | `7fa84fc500c28774ed25ca34b6f7b208a2bea29d6e8379f84b9f57bd024aa8fe574418cee7ee26edd55310716d43d65ae7b9cbe11e40c995fe2eac7f66bdb423` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-ppc64le.tar.gz) | `a4278b3f8e458e9581e01f0c5ba8443303c987988ee136075a8f2f25515d70ca549fbd2e4d10eefca816c75c381d62d71494bd70c47034ab47f8315bbef4ae37` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-s390x.tar.gz) | `8de2bc6f22f232ff534b45012986eac23893581ccb6c45bd637e40dbe808ce31d5a92375c00dc578bdbadec342b6e5b70c1b9f3d3a7bb26ccfde97d71f9bf84a` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-windows-amd64.tar.gz) | `b82e94663d330cff7a117f99a7544f27d0bc92b36b5a283b3c23725d5b33e6f15e0ebf784627638f22f2d58c58c0c2b618ddfd226a64ae779693a0861475d355` -## Changelog since v1.19.0 +## Changelog since v1.20.0 ## What's New (Major Themes) -### Dockershim deprecation - -Docker as an underlying runtime is being deprecated. Docker-produced images will continue to work in your cluster with all runtimes, as they always have. -The Kubernetes community [has written a blog post about this in detail](https://blog.k8s.io/2020/12/02/dont-panic-kubernetes-and-docker/) with [a dedicated FAQ page for it](https://blog.k8s.io/2020/12/02/dockershim-faq/). - -### External credential provider for client-go - -The client-go credential plugins can now be passed in the current cluster information via the `KUBERNETES_EXEC_INFO` environment variable. Learn more about this on [client-go credential plugins documentation](https://docs.k8s.io/reference/access-authn-authz/authentication/#client-go-credential-plugins/). - -### CronJob controller v2 is available through feature gate - -An alternative implementation of `CronJob` controller is now available as an alpha feature in this release, which has experimental performance improvement by using informers instead of polling. While this will be the default behavior in the future, you can [try them in this release through a feature gate](https://docs.k8s.io/concepts/workloads/controllers/cron-jobs/). - -### PID Limits graduates to General Availability - -PID Limits features are now generally available on both `SupportNodePidsLimit` (node-to-pod PID isolation) and `SupportPodPidsLimit` (ability to limit PIDs per pod), after being enabled-by-default in beta stage for a year. - -### API Priority and Fairness graduates to Beta - -Initially introduced in 1.18, Kubernetes 1.20 now enables API Priority and Fairness (APF) by default. This allows `kube-apiserver` to [categorize incoming requests by priority levels](https://docs.k8s.io/concepts/cluster-administration/flow-control/). - -### IPv4/IPv6 run - -IPv4/IPv6 dual-stack has been reimplemented for 1.20 to support dual-stack Services, based on user and community feedback. If your cluster has dual-stack enabled, you can create Services which can use IPv4, IPv6, or both, and you can change this setting for existing Services. Details are available in updated [IPv4/IPv6 dual-stack docs](https://docs.k8s.io/concepts/services-networking/dual-stack/), which cover the nuanced array of options. - -We expect this implementation to progress from alpha to beta and GA in coming releases, so we’re eager to have you comment about your dual-stack experiences in [#k8s-dual-stack](https://kubernetes.slack.com/messages/k8s-dual-stack) or in [enhancements #563](https://features.k8s.io/563). - -### go1.15.5 - -go1.15.5 has been integrated to Kubernetes project as of this release, [including other infrastructure related updates on this effort](https://github.com/kubernetes/kubernetes/pull/95776). - -### CSI Volume Snapshot graduates to General Availability - -CSI Volume Snapshot moves to GA in the 1.20 release. This feature provides a standard way to trigger volume snapshot operations in Kubernetes and allows Kubernetes users to incorporate snapshot operations in a portable manner on any Kubernetes environment regardless of supporting underlying storage providers. -Additionally, these Kubernetes snapshot primitives act as basic building blocks that unlock the ability to develop advanced, enterprise grade, storage administration features for Kubernetes: including application or cluster level backup solutions. -Note that snapshot support will require Kubernetes distributors to bundle the Snapshot controller, Snapshot CRDs, and validation webhook. In addition, a CSI driver supporting the snapshot functionality must also be deployed on the cluster. - -### Non-recursive Volume Ownership (FSGroup) graduates to Beta - -By default, the `fsgroup` setting, if specified, recursively updates permissions for every file in a volume on every mount. This can make mount, and pod startup, very slow if the volume has many files. -This setting enables a pod to specify a `PodFSGroupChangePolicy` that indicates that volume ownership and permissions will be changed only when permission and ownership of the root directory does not match with expected permissions on the volume. - -### CSIDriver policy for FSGroup graduates to Beta - -The FSGroup's CSIDriver Policy is now beta in 1.20. This allows CSIDrivers to explicitly indicate if they want Kubernetes to manage permissions and ownership for their volumes via `fsgroup`. - -### Security Improvements for CSI Drivers (Alpha) +### Deprecation of PodSecurityPolicy -In 1.20, we introduce a new alpha feature `CSIServiceAccountToken`. This feature allows CSI drivers to impersonate the pods that they mount the volumes for. This improves the security posture in the mounting process where the volumes are ACL’ed on the pods’ service account without handing out unnecessary permissions to the CSI drivers’ service account. This feature is especially important for secret-handling CSI drivers, such as the secrets-store-csi-driver. Since these tokens can be rotated and short-lived, this feature also provides a knob for CSI drivers to receive `NodePublishVolume` RPC calls periodically with the new token. This knob is also useful when volumes are short-lived, e.g. certificates. +PSP as an admission controller resource is being deprecated. Deployed PodSecurityPolicy's will keep working until version 1.25, their target removal from the codebase. A new feature, with a working title of "PSP replacement policy", is being developed in [KEP-2579](https://features.k8s.io/2579). To learn more, read [PodSecurityPolicy Deprecation: Past, Present, and Future](https://blog.k8s.io/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). -### Introducing Graceful Node Shutdown (Alpha) +### Kubernetes API Reference Documentation -The `GracefulNodeShutdown` feature is now in Alpha. This allows kubelet to be aware of node system shutdowns, enabling graceful termination of pods during a system shutdown. This feature can be [enabled through feature gate](https://docs.k8s.io/concepts/architecture/nodes/#graceful-node-shutdown). +The API reference is now generated with [`gen-resourcesdocs`](https://github.com/kubernetes-sigs/reference-docs/tree/c96658d89fb21037b7d00d27e6dbbe6b32375837/gen-resourcesdocs) and it is moving to [Kubernetes API](https://docs.k8s.io/reference/kubernetes-api/) -### Runtime log sanitation +### Kustomize Updates in Kubectl -Logs can now be configured to use runtime protection from leaking sensitive data. [Details for this experimental feature is available in documentation](https://docs.k8s.io/concepts/cluster-administration/system-logs/#log-sanitization). +[Kustomize](https://github.com/kubernetes-sigs/kustomize) version in kubectl had a jump from v2.0.3 to [v4.0.5](https://github.com/kubernetes/kubernetes/pull/98946). Kustomize is now treated as a library and future updates will be less sporadic. -### Pod resource metrics +### Default Container Labels -On-demand metrics calculation is now available through `/metrics/resources`. [When enabled]( -https://docs.k8s.io/concepts/cluster-administration/system-metrics#kube-scheduler-metrics), the endpoint will report the requested resources and the desired limits of all running pods. +Pod with multiple containers can use `kubectl.kubernetes.io/default-container` label to have a container preselected for kubectl commands. More can be read in [KEP-2227](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/2227-kubectl-default-container/README.md). -### Introducing `RootCAConfigMap` +### Immutable Secrets and ConfigMaps -`RootCAConfigMap` graduates to Beta, seperating from `BoundServiceAccountTokenVolume`. The `kube-root-ca.crt` ConfigMap is now available to every namespace, by default. It contains the Certificate Authority bundle for verify kube-apiserver connections. +Immutable Secrets and ConfigMaps graduates to GA. This feature allows users to specify that the contents of a particular Secret or ConfigMap is immutable for its object lifetime. For such instances, Kubelet will not watch/poll for changes and therefore reducing apiserver load. -### `kubectl debug` graduates to Beta +### Structured Logging in Kubelet -`kubectl alpha debug` graduates from alpha to beta in 1.20, becoming `kubectl debug`. -`kubectl debug` provides support for common debugging workflows directly from kubectl. Troubleshooting scenarios supported in this release of `kubectl` include: -Troubleshoot workloads that crash on startup by creating a copy of the pod that uses a different container image or command. -Troubleshoot distroless containers by adding a new container with debugging tools, either in a new copy of the pod or using an ephemeral container. (Ephemeral containers are an alpha feature that are not enabled by default.) -Troubleshoot on a node by creating a container running in the host namespaces and with access to the host’s filesystem. -Note that as a new builtin command, `kubectl debug` takes priority over any `kubectl` plugin named “debug”. You will need to rename the affected plugin. -Invocations using `kubectl alpha debug` are now deprecated and will be removed in a subsequent release. Update your scripts to use `kubectl debug` instead of `kubectl alpha debug`! -For more information about kubectl debug, see Debugging Running Pods on the Kubernetes website, kubectl help debug, or reach out to SIG CLI by visiting #sig-cli or commenting on [enhancement #1441](https://features.k8s.io/1441). +Kubelet has adopted structured logging, thanks to community effort in accomplishing this within the release timeline. Structured logging in the project remains an ongoing effort -- for folks interested in participating, [keep an eye / chime in to the mailing list discussion](https://groups.google.com/g/kubernetes-dev/c/y4WIw-ntUR8). -### Removing deprecated flags in kubeadm +### Storage Capacity Tracking -`kubeadm` applies a number of deprecations and removals of deprecated features in this release. More details are available in the Urgent Upgrade Notes and Kind / Deprecation sections. +Traditionally, the Kubernetes scheduler was based on the assumptions that additional persistent storage is available everywhere in the cluster and has infinite capacity. Topology constraints addressed the first point, but up to now pod scheduling was still done without considering that the remaining storage capacity may not be enough to start a new pod. [Storage capacity tracking](https://docs.k8s.io/concepts/storage/storage-capacity/) addresses that by adding an API for a CSI driver to report storage capacity and uses that information in the Kubernetes scheduler when choosing a node for a pod. This feature serves as a stepping stone for supporting dynamic provisioning for local volumes and other volume types that are more capacity constrained. -### Pod Hostname as FQDN graduates to Beta +### Generic Ephemeral Volumes -Previously introduced in 1.19 behind a feature gate, `SetHostnameAsFQDN` is now enabled by default. More details on this behavior is available in [documentation for DNS for Services and Pods](https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field) +[Generic ephermeral volumes](https://docs.k8s.io/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) feature allows any existing storage driver that supports dynamic provisioning to be used as an ephemeral volume with the volume’s lifecycle bound to the Pod. It can be used to provide scratch storage that is different from the root disk, for example persistent memory, or a separate local disk on that node. All StorageClass parameters for volume provisioning are supported. All features supported with PersistentVolumeClaims are supported, such as storage capacity tracking, snapshots and restore, and volume resizing. -### `TokenRequest` / `TokenRequestProjection` graduates to General Availability +### CSI Service Account Token -Service account tokens bound to pod is now a stable feature. The feature gates will be removed in 1.21 release. For more information, refer to notes below on the changelogs. +CSI Service Account Token feature moves to Beta in 1.21. This feature improves the security posture and allows CSI drivers to receive pods' [bound service account tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md). This feature also provides a knob to re-publish volumes so that short-lived volumes can be refreshed. -### RuntimeClass feature graduates to General Availability. +### CSI Health Monitoring -The `node.k8s.io` API groups are promoted from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] - -### Cloud Controller Manager now exclusively shipped by Cloud Provider - -Kubernetes will no longer ship an instance of the Cloud Controller Manager binary. Each Cloud Provider is expected to ship their own instance of this binary. Details for a Cloud Provider to create an instance of such a binary can be found under [here](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/cloud-provider/sample). Anyone with questions on building a Cloud Controller Manager should reach out to SIG Cloud Provider. Questions about the Cloud Controller Manager on a Managed Kubernetes solution should go to the relevant Cloud Provider. Questions about the Cloud Controller Manager on a non managed solution can be brought up with SIG Cloud Provider. +The CSI health monitoring feature is being released as a second Alpha in Kubernetes 1.21. This feature enables CSI Drivers to share abnormal volume conditions from the underlying storage systems with Kubernetes so that they can be reported as events on PVCs or Pods. This feature serves as a stepping stone towards programmatic detection and resolution of individual volume health issues by Kubernetes. ## Known Issues -### Summary API in kubelet doesn't have accelerator metrics -Currently, cadvisor_stats_provider provides AcceleratorStats but cri_stats_provider does not. As a result, when using cri_stats_provider, kubelet's Summary API does not have accelerator metrics. [There is an open work in progress to fix this](https://github.com/kubernetes/kubernetes/pull/96873). +### `TopologyAwareHints` feature falls back to default behavior + +The feature gate currently falls back to the default behavior in most cases. Enabling the feature gate will add hints to `EndpointSlices`, but functional differences are only observed in non-dual stack kube-proxy implementation. [The fix will be available in coming releases](https://github.com/kubernetes/kubernetes/pull/100804). ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) -- A bug was fixed in kubelet where exec probe timeouts were not respected. This may result in unexpected behavior since the default timeout (if not specified) is `1s` which may be too small for some exec probes. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. See [configure probe](https://docs.k8s.io/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) section of the documentation for more details. - - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the `ExecProbeTimeout` feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] -- RuntimeClass feature graduates to General Availability. Promote `node.k8s.io` API groups from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- API priority and fairness graduated to beta. 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Kubeadm: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - -- Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] -- The deprecated flag --experimental-kustomize is now removed from kubeadm commands. Use --experimental-patches instead, which was introduced in 1.19. Migration information available in --help description for --experimental-patches. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) -- Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42), [SIG API Machinery, Node, Testing]) -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- `TokenRequest` and `TokenRequestProjection` are now GA features. The following flags are required by the API server: - - `--service-account-issuer`, should be set to a URL identifying the API server that will be stable over the cluster lifetime. - - `--service-account-key-file`, set to one or more files containing one or more public keys used to verify tokens. - - `--service-account-signing-key-file`, set to a file containing a private key to use to sign service account tokens. Can be the same file given to `kube-controller-manager` with `--service-account-private-key-file`. ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle] -- kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] - - +- Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Remove alpha `CSIMigrationXXComplete` flag and add alpha `InTreePluginXXUnregister` flag. Deprecate `CSIMigrationvSphereComplete` flag and it will be removed in v1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the `storage_operation_duration_seconds` metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) + - `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior` features have been promoted to GA. `ServiceNodeExclusion` and `NodeDisruptionExclusion` are now unconditionally enabled, while `LegacyNodeRoleBehavior` is unconditionally disabled. To prevent control plane nodes from being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) + ## Changes by Kind ### Deprecation -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] - +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm: deprecated command "alpha selfhosting pivot" is now removed. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Remove deprecated `--generator, --replicas, --service-generator, --service-overrides, --schedule` from `kubectl run` + Deprecate `--serviceaccount, --hostport, --requests, --limits` in `kubectl run` ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- The GA TokenRequest and TokenRequestProjection feature gates have been removed and are unconditionally enabled. Remove explicit use of those feature gates in CLI invocations. ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `discovery.k8s.io/v1beta1` EndpointSlices are deprecated in favor of `discovery.k8s.io/v1`, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) +- `diskformat` storage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) ### API Change -- `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - - kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a `serving` and `terminating` condition to the EndpointSlice API. - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fewer candidates are enumerated for preemption to improve performance in large clusters. ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- GPU metrics provided by kubelet are now disabled by default. ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The status of v1beta1 CRDs without "preserveUnknownFields:false" now shows a violation, "spec.preserveUnknownFields: Invalid value: true: must be false". ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBService is enabled. The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed`. This is an alpha field and is only honored by servers with the `IndexedJob` feature gate enabled. ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Adds support for endPort field in NetworkPolicy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- CSIServiceAccountToken graduates to Beta and enabled by default. ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) +- Cluster admins can now turn off `/debug/pprof` and `/debug/flags/v` endpoint in kubelet by setting `enableProfilingHandler` and `enableDebugFlagsHandler` to `false` in the Kubelet configuration file. Options `enableProfilingHandler` and `enableDebugFlagsHandler` can be set to `true` only when `enableDebuggingHandlers` is also set to `true`. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl portforward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Fixes server-side apply for APIService resources. ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Hugepages request values are limited to integer multiples of the page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration (previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) +- Jobs API has a new `.spec.suspend` field that can be used to suspend and resume Jobs. This is an alpha field which is only honored by servers with the `SuspendJob` feature gate enabled. ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) +- Kubelet Graceful Node Shutdown feature graduates to Beta and enabled by default. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Namespace API objects now have a `kubernetes.io/metadata.name` label matching their metadata.name field to allow selecting any namespace by its name using a label selector. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodDisruptionBudget API objects can now contain conditions in status. ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Promote Immutable Secrets/ConfigMaps feature to Stable. This allows to set `immutable` field in Secret or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] +- Services can specify loadBalancerClass to use a custom load balancer ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) +- Storage capacity tracking (= the CSIStorageCapacity feature) graduates to Beta and enabled by default, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] +- The EndpointSlice Controllers are now GA. The `EndpointSliceController` will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The `EndpointSlice` API is now GA. The `EndpointSlice` topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The `discovery.k8s.io/v1alpha1` API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) +- The `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a `Pod` compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) +- The kube-apiserver now resets `managedFields` that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#97099](https://github.com/kubernetes/kubernetes/pull/97099), [@pacoxu](https://github.com/pacoxu)) [SIG CLI] ### Feature -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add foreground cascading deletion to kubectl with the new `kubectl delete foreground|background|orphan` option. ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation. ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- Configure AWS LoadBalancer health check protocol via service annotations. ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] -- Kubeadm now prints warnings instead of throwing errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. - - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: [@mikedanese](https://github.com/mikedanese): - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. - - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. - - - make kube::util::find-binary not dependent on bazel-out/ structure - - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node-to-pod PID isolation. - Promote SupportPodPidsLimit to GA to provide ability to limit PIDs per pod. ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Send GCE node startup scripts logs to console and journal. ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric buckets were set to 0.005 0.0125 0.03125 0.078125 0.1953125 0.48828125 1.220703125 3.0517578125 7.62939453125 19.073486328125 47.6837158203125 119.20928955078125 298.0232238769531 and 745.0580596923828 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- A new histogram metric to track the time it took to delete a job by the `TTLAfterFinished` controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) +- AWS cloud provider supports auto-discovering subnets without any `kubernetes.io/cluster/` tags. It also supports additional service annotation `service.beta.kubernetes.io/aws-load-balancer-subnets` to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Add --permit-address-sharing flag to `kube-apiserver` to listen with `SO_REUSEADDR`. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoids waiting for the kernel to release socket in `TIME_WAIT` state, and hence, considerably reducing `kube-apiserver` restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Add metric etcd_lease_object_counts for kube-apiserver to observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Added support for installing `arm64` node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, as well as a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- CRIContainerLogRotation graduates to GA and unconditionally enabled. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99385](https://github.com/kubernetes/kubernetes/pull/99385), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Release] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- EndpointSliceNodeName graduates to GA and thus will be unconditionally enabled -- NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) +- Export `NewDebuggingRoundTripper` function and `DebugLevel` options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: IPv6DualStack feature gate graduates to Beta and enabled by default ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: perform pre-flight validation on host/node name upon `kubeadm init` and `kubeadm join`, showing warnings on non-compliant names ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubectl: Add `--use-protocol-buffers` flag to kubectl top pods and nodes. ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Kubectl: a Pod can be preselected as default container using `kubectl.kubernetes.io/default-container` annotation ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) +- Kubectl: add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=metric1,metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) +- New admission controller `DenyServiceExternalIPs` is available. Clusters which do not *need* the Service `externalIPs` feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Port the devicemanager to Windows node to allow device plugins like directx ([#93285](https://github.com/kubernetes/kubernetes/pull/93285), [@aarnaud](https://github.com/aarnaud)) [SIG Node, Testing and Windows] +- Removes cAdvisor JSON metrics (/stats/container, /stats//, /stats////) from the kubelet. ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Sysctls graduates to General Availability and thus unconditionally enabled. ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The `CronJobControllerV2` feature flag graduates to Beta and set to be enabled by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) +- The `EndpointSlice` mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation `endpoints.kubernetes.io/last-change-trigger-time` is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] +- The `TTLAfterFinished` feature flag is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` takes into consideration global IP addresses on loopback interfaces when 1) the host has default routes, or 2) there are no global IPs on those interfaces in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] +- The feature gate `RootCAConfigMap` graduated to GA in v1.21 and therefore will be unconditionally enabled. This flag will be removed in v1.22 release. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) +- The pause image upgraded to `v3.4.1` in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. New clusters or existing clusters are not be affected until an actor starts adding secondary Pods and service CIDRS CLI flags as described here: [IPv4/IPv6 Dual-stack](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack) ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) +- Users might specify the `kubectl.kubernetes.io/default-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] +- `kubectl wait` ensures that observedGeneration >= generation to prevent stale state reporting. An example scenario can be found on CRD updates. ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- Azure file migration graduates to beta, with CSIMigrationAzureFile flag off by default + as it requires installation of AzureFile CSI Driver. Users should enable CSIMigration and + CSIMigrationAzureFile features and install the [AzureFile CSI Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) + to avoid disruption to existing Pod and PVC objects at that time. Azure File CSI driver does not support using same persistent + volume with different fsgroups. When CSI migration is enabled for azurefile driver, such case is not supported. + (there is a case we support where volume is mounted with 0777 and then it readable/writable by everyone) ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### Failing Test -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP `NodePort` Services where stale connection tracking entries may blackhole the traffic directed to the `NodePort` ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) +- Kubelet: fixes a bug in the HostPort dockershim implementation that caused the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### Bug or Regression -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure armclient backoff step defaults to 1 (no retry). ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp fields of an audit event now take into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) -- Build/lib/release: Explicitly use '--platform' in building server images - - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. - - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. - - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. - - Before: - `FROM ${base_image}` - - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- Ceph RBD volume expansion now works even when ceph.conf was not provided. ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Change the calculation of pod UIDs so that static pods get a unique value - will cause all containers to be killed and recreated after in-place upgrade. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Endpoint slice controller now mirrors parent's service label to its corresponding endpoint slices. ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix a bug where loadbalancer deletion gets stuck because of missing resource group. ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a panic in `kubectl debug` when a pod has multiple init or ephemeral containers. ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix race condition on timeCache locks. ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) -- Fix regression on `kubectl portforward` when TCP and UCP services were configured on the same port. ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Increase maximum IOPS of AWS EBS io1 volume to current maximum (64,000). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kubeadm ensures "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user. ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm now warns (instead of error out) on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Port mapping now allows the same `containerPort` of different containers to different `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Recreate EndpointSlices on rapid Service creation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- StatefulSet Controller now waits for PersistentVolumeClaim deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) -- StreamWatcher now calls HandleCrash at appropriate sequence. ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] -- Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- All data is no longer automatically deleted when a failure is detected during creation of the volume data file on a CSI volume. Now only the data file and volume path is removed. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Client-go exec credential plugins will pass stdin only when interactive terminal is detected on stdin. This fixes a bug where previously it was checking if **stdout** is an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Cluster Autoscaler version bump to v1.20.0 ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure all vSphere nodes are are tracked by volume attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) +- Ensure empty string annotations are copied over in rollbacks. ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Explicitly pass `KUBE_BUILD_CONFORMANCE=y` in `package-tarballs` to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) +- Fix Azure file migration e2e test failure when CSIMigration is turned on. ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) +- Fix CSI-migrated inline EBS volumes failing to mount if their volumeID is prefixed by aws:// ([#96821](https://github.com/kubernetes/kubernetes/pull/96821), [@wongma7](https://github.com/wongma7)) [SIG Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix PermissionDenied issue on SMB mount for Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fix kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix privileged config of Pod Sandbox which was previously ignored. ([#96877](https://github.com/kubernetes/kubernetes/pull/96877), [@xeniumlee](https://github.com/xeniumlee)) +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed authentication_duration_seconds metric scope. Previously, it included whole apiserver request duration which yields inaccurate results. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a bug of identifying the correct containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) +- Fixes add-on manager leader election to use leases instead of endpoints, similar to what kube-controller-manager does in 1.20 ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- Fixes problem where invalid selector on `PodDisruptionBudget` leads to a nil pointer dereference that causes the Controller manager to crash loop. ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) +- Fixes spurious errors about IPv6 in `kube-proxy` logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm installs etcd v3.4.13 when creating cluster v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: Fixes a kubeadm upgrade bug that could cause a custom CoreDNS configuration to be replaced with the default. ([#97016](https://github.com/kubernetes/kubernetes/pull/97016), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl logs: `--ignore-errors` is now honored by all containers, maintaining consistency with parallelConsumeRequest behavior. ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) +- Kubectl-convert: Fix `no kind "Ingress" is registered for version` error ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fix repeatedly acquiring the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Limits lease to have 1000 maximum attached objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Performance regression #97685 has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] +- Pod status updates faster when reacting on probe results. The first readiness probe will be called faster when startup probes succeeded, which will make Pod status as ready faster. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) +- Readjust `kubelet_containers_per_pod_count` buckets to only show metrics greater than 1. ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Requests with invalid timeout parameters in the request URL now appear in the audit log correctly. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Resolve a "concurrent map read and map write" crashing error in the kubelet ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- ResourceQuota of an entity now inclusively calculate Pod overhead ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using `dockershim` as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) +- Reverts breaking change to inline AzureFile volumes; referenced secrets are now searched for in the same namespace as the pod as in previous releases. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- The calculation of pod UIDs for static pods has changed to ensure each static pod gets a unique value - this will cause all static pod containers to be recreated/restarted if an in-place kubelet upgrade from 1.20 to 1.21 is performed. Note that draining pods before upgrading the kubelet across minor versions is the supported upgrade path. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Truncates a message if it hits the `NoteLengthLimit` when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) +- Updated k8s.gcr.io/ingress-gce-404-server-with-metrics-amd64 to a version that serves /metrics endpoint on a non-default port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Updates the commands ` + - kubectl kustomize {arg} + - kubectl apply -k {arg} + `to use same code as kustomize CLI [v4.0.5](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5) ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] +- When using `Containerd` on Windows, the `C:\Windows\System32\drivers\etc\hosts` file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### Other (Cleanup or Flake) -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: - - ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Add fine grained debugging to intra-pod conformance test to troubleshoot networking issues for potentially unhealthy nodes when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) -- Add the following metrics: - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Decrease warning message frequency on setting volume ownership for configmap/secret. ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Fix kubelet to properly log when a container is started. Previously, kubelet may log that container is dead and was restarted when it was actually started for the first time. This behavior only happened on pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Mask ceph RBD adminSecrets in logs when logLevel >= 4. ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) -- Remove offensive words from kubectl cluster-info command. ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) -- Remove support for "ci/k8s-master" version label in kubeadm, use "ci/latest" instead. See [kubernetes/test-infra#18517](https://github.com/kubernetes/test-infra/pull/18517). ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- APIs for kubelet annotations and labels from `k8s.io/kubernetes/pkg/kubelet/apis` are now moved under `k8s.io/kubelet/pkg/apis/` ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Clients required to use go1.15.8+ or go1.16+ if kube-apiserver has the goaway feature enabled to avoid unexpected data race condition. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy graduates to GA and unconditionally enabled. ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) +- Increase timeout for pod lifecycle test to reach pod status=ready ([#96691](https://github.com/kubernetes/kubernetes/pull/96691), [@hh](https://github.com/hh)) +- Increased `CSINodeIDMaxLength` from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) +- Kube-apiserver: The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Kubelet command line flags related to dockershim are now showing deprecation message as they will be removed along with dockershim in future release. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The `AttachVolumeLimit` feature gate (GA since v1.17) has been removed and now unconditionally enabled. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The `apiserver_request_total` metric is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts`) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) + +### Uncategorized + +- GCE L4 Loadbalancers now handle > 5 ports in service spec correctly. ([#99595](https://github.com/kubernetes/kubernetes/pull/99595), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## Dependencies ### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/creack/pty: [v1.1.7 → v1.1.11](https://github.com/creack/pty/compare/v1.1.7...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.5...v0.39.0) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: c1934b7 → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/gengo: 83324d8 → b6c5ce2 +- k8s.io/klog/v2: v2.4.0 → v2.8.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.2.0 → v1.4.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.1.0 ### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 - - -## Dependencies - -### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) - rsc.io/quote/v3: v3.1.0 - rsc.io/sampler: v1.3.0 - -### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 - -### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-rc.0 +# v1.21.0-rc.0 -## Downloads for v1.20.0-rc.0 +## Downloads for v1.21.0-rc.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes.tar.gz) | acfee8658831f9503fccda0904798405434f17be7064a361a9f34c6ed04f1c0f685e79ca40cef5fcf34e3193bacbf467665e8dc277e0562ebdc929170034b5ae -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-src.tar.gz) | 9d962f8845e1fa221649cf0c0e178f0f03808486c49ea15ab5ec67861ec5aa948cf18bc0ee9b2067643c8332227973dd592e6a4457456a9d9d80e8ef28d5f7c3 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes.tar.gz) | ef53a41955d6f8a8d2a94636af98b55d633fb8a5081517559039e019b3dd65c9d10d4e7fa297ab88a7865d772f3eecf72e7b0eeba5e87accb4000c91da33e148 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-src.tar.gz) | 9335a01b50d351776d3b8d00c07a5233844c51d307e361fa7e55a0620c1cb8b699e43eacf45ae9cafd8cbc44752e6987450c528a5bede8204706b7673000b5fc ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 062b57f1a450fe01d6184f104d81d376bdf5720010412821e315fd9b1b622a400ac91f996540daa66cee172006f3efade4eccc19265494f1a1d7cc9450f0b50a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-386.tar.gz) | 86e96d2c2046c5e62e02bef30a6643f25e01f1b3eba256cab7dd61252908540c26cb058490e9cecc5a9bad97d2b577f5968884e9f1a90237e302419f39e068bc -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 619d3afb9ce902368390e71633396010e88e87c5fd848e3adc71571d1d4a25be002588415e5f83afee82460f8a7c9e0bd968335277cb8f8cb51e58d4bb43e64e -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 60965150a60ab3d05a248339786e0c7da4b89a04539c3719737b13d71302bac1dd9bcaa427d8a1f84a7b42d0c67801dce2de0005e9e47d21122868b32ac3d40f -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | 688e064f4ef6a17189dbb5af468c279b9de35e215c40500fb97b1d46692d222747023f9e07a7f7ba006400f9532a8912e69d7c5143f956b1dadca144c67ee711 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | 47b8abc02b42b3b1de67da184921b5801d7e3cb09befac840c85913193fc5ac4e5e3ecfcb57da6b686ff21af9a3bd42ae6949d4744dbe6ad976794340e328b83 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 971b41d3169f30e6c412e0254c180636abb7ccc8dcee6641b0e9877b69752fc61aa30b76c19c108969df654fe385da3cb3a44dd59d3c28dc45561392d7e08874 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-386.tar.gz) | 2d34e8387e31531d9aca5655f2f0d18e75b01825dc1c39b7beb73a7b7b610e2ba429e5ca97d5c41a71b67e75e7096c86ab63fda9baab4c0878c1ccb3a1aefac8 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | f909640f4140693bb871936f10a40e79b43502105d0adb318b35bb7a64a770ad9d05a3a732368ccd3d15d496d75454789165bd1f5c2571da9a00569b3e6c007c +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 964135e43234cee275c452f5f06fb6d2bcd3cff3211a0d50fa35fff1cc4446bc5a0ac5125405dadcfb6596cb152afe29fabf7aad5b35b100e1288db890b70f8e +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-arm64.tar.gz) | 50d782abaa4ded5e706b3192d87effa953ceabbd7d91e3d48b0c1fa2206a1963a909c14b923560f5d09cac2c7392edc5f38a13fbf1e9a40bc94e3afe8de10622 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-386.tar.gz) | 72af5562f24184a2d7c27f95fa260470da979fbdcacce39a372f8f3add2991d7af8bc78f4e1dbe7a0f97e3f559b149b72a51491d3b13008da81872ee50f02f37 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 1eddb8f6b51e005bc6f7b519d036cbe3d2f6d97dbf7d212dd933fb56354c29f222d050519115a9bcf94555aef095db7cf763469e47bb4ae3c6c07f97edf437cb +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 670f8ca60ea3cf0bb3262a772715e0ea735fccda6a92f3186299361dc455b304ae177d4017e0b67bbfa4a95e36f4cc3f7eb335e2a5130c93ac3fba2aff4519bf +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | a69a47907cff138ba393d8c87044fd95d97f3ca8f35d301b50742e2801ad7c229d99d6667971091f65825eb51854d585be0dd7421670110b1aa567e67e7ab4b3 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | b929feade94b71c81908abdcd4343b1e1e20098fd65e10d4d02585ad649d292d06f52c7ddc349efa188ce5b093e703c7aa9582c6ae5a69699adb87bbf5350243 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 899d1470e412282cf289d8e24806d1a08c62ec0151f345ae3c9e497cc7bc0feab76498de4dd897d6adcdfa0c422e6b1a37e25d928669030f53457fd69d6e7df7 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-386.tar.gz) | 9f0bc90a269eabd06fe4f637b5172a3a6a7d3de26de0d66504c2e1f2093083c584ea39031db6075a7da7a86b98c48bed25aa88d4ac09060b38692c6a5b637078 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | 05c8cc10188a1294b0d51d052942742a9b26411a08ec73494bf0e728a8a167e0a7863bdfc8864e76a371b584380098381805341e18b4b283b5d0cf298d5f7c7c ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 0ea4458ae34108c633b4d48f1f128c6274dbc82b613492e78b3e0a2f656ac0df0bb9a75124e15d67c8e81850adcecf19f4ab0234c17247ee7ddf84f2df3e5eaa -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm.tar.gz) | aef6a4d457faa29936603370f29a8523bb274211c3cb5101bd31aaf469c91ba6bd149ea99a4ccdd83352cf37e4d6508c5ee475ec10292bccd2f77ceea31e1c28 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | 4829f473e9d60f9929ad17c70fdc2b6b6509ed75418be0b23a75b28580949736cb5b0bd6382070f93aa0a2a8863f0b1596daf965186ca749996c29d03ef7d8b8 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 9ab0790d382a3e28df1c013762c09da0085449cfd09d176d80be932806c24a715ea829be0075c3e221a2ad9cf06e726b4c39ab41987c1fb0fee2563e48206763 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 98670b587e299856dd9821b7517a35f9a65835b915b153de08b66c54d82160438b66f774bf5306c07bc956d70ff709860bc23162225da5e89f995d3fdc1f0122 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 355f278728ef7ac7eb2f5568c99c1429543c6302bbd0ed3bd0378c08116075e56ae850a49241313f078e2392702672ec6c9b70c8d97b4f2f5f4bee36828a63ba +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm.tar.gz) | 9ac02c2825e2fd4e92f0c0f67180c67c24e32841ccbabc82284bf6293727ffecfae65e8a42b527c2a7ca482752384928eb65c2a1706144ae7819a6b3a1ab291c +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | eb412453da03c82a9248412c8ccf4d4baa1fbfa81edd8d4f81d28969b40a3727e18934accc68f643d253446c58ffd2623292402495480b3d4b2a837b5318b957 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 07da2812c35bbc427ee5b4a0b601c3ae271e0d50ab0dd4c5c25399f43506fa2a187642eb9d4d2085df7b90264d48ea2f31088af87d9efa7eb2e87f91e1fdbde4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 3b79442a3d6e389c4ff105922a8e49994c0b6c088d2c501bd8c78d9f9e814902f5bb72c8f9c89380b750fda9b3a336759b9b68f11d70bef4f0e984564a95c29e ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | 699e9c8d1837198312eade8eb6fec390f6a2fea9e08207d2f58e8bb6e3e799028aca69e4670aac0a4ba7cf0af683aee2c158bf78cc520c80edc876c8d94d521a -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm.tar.gz) | f3b5eab0669490e3cd7e802693daf3555d08323dfff6e73a881fce00fed4690e8bdaf1610278d9de74036ca37631016075e5695a02158b7d3e7582b20ef7fa35 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | e5012f77363561a609aaf791baaa17d09009819c4085a57132e5feb5366275a54640094e6ed1cba527f42b586c6d62999c2a5435edf5665ff0e114db4423c2ae -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 2a6d6501620b1a9838dff05c66a40260cc22154a28027813346eb16e18c386bc3865298a46a0f08da71cd55149c5e7d07c4c4c431b4fd231486dd9d716548adb -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 5eca02777519e31428a1e5842fe540b813fb8c929c341bbc71dcfd60d98deb89060f8f37352e8977020e21e053379eead6478eb2d54ced66fb9d38d5f3142bf0 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | 8ace02e7623dff894e863a2e0fa7dfb916368431d1723170713fe82e334c0ae0481b370855b71e2561de0fb64fed124281be604761ec08607230b66fb9ed1c03 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | f12edf1faf5f07de1ebc5a8626601c12927902e10aca3f11e398637382fdf55365dbd9a0ef38858553fb7569495ae2cf68f155dd2e49b85b27d76fb599bb92e4 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm.tar.gz) | 4fba8fc4e2102f07fb778aab597ec7231ea65c35e1aa618fe98b707b64a931237bd842c173e9120326e4d9deb983bb3917176762bba2212612bbc09d6e2105c4 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | a2e1be5459a8346839970faf4e7ebdb8ab9f3273e02babf1f3199b06bdb67434a2d18fcd1628cf1b989756e99d8dad6624a455b9db11d50f51f509f4df5c27da +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 16d2c1cc295474fc49fe9a827ddd73e81bdd6b76af7074987b90250023f99b6d70bf474e204c7d556802111984fcb3a330740b150bdc7970d0e3634eb94a1665 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 9dc6faa6cd007b13dfce703f3e271f80adcc4e029c90a4a9b4f2f143b9756f2893f8af3d7c2cf813f2bd6731cffd87d15d4229456c1685939f65bf467820ec6e +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | f8bac2974c9142bfb80cd5eadeda79f79f27b78899a4e6e71809b795c708824ba442be83fdbadb98e01c3823dd8350776358258a205e851ed045572923cacba7 -## Changelog since v1.20.0-beta.2 +## Changelog since v1.21.0-beta.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + + - Migrated pkg/kubelet/cm/cpuset/cpuset.go to structured logging. Exit code changed from 255 to 1. ([#100007](https://github.com/kubernetes/kubernetes/pull/100007), [@utsavoza](https://github.com/utsavoza)) [SIG Instrumentation and Node] + ## Changes by Kind -### Feature +### API Change -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- CSIServiceAccountToken is Beta now ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) [SIG Auth, Storage and Testing] +- Discovery.k8s.io/v1beta1 EndpointSlices are deprecated in favor of discovery.k8s.io/v1, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) [SIG Network] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] -### Failing Test +### Feature -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Add e2e test to validate performance metrics of volume lifecycle operations ([#94334](https://github.com/kubernetes/kubernetes/pull/94334), [@RaunakShah](https://github.com/RaunakShah)) [SIG Storage and Testing] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- Introduce a churn operator to scheduler perf testing framework. ([#98900](https://github.com/kubernetes/kubernetes/pull/98900), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Migrated pkg/kubelet/cm/devicemanager to structured logging ([#99976](https://github.com/kubernetes/kubernetes/pull/99976), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/memorymanager to structured logging ([#99974](https://github.com/kubernetes/kubernetes/pull/99974), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/topologymanager to structure logging ([#99969](https://github.com/kubernetes/kubernetes/pull/99969), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] ### Bug or Regression -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] +- Add ability to skip OpenAPI handler installation to the GenericAPIServer ([#100341](https://github.com/kubernetes/kubernetes/pull/100341), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery] +- Count pod overhead against an entity's ResourceQuota ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) [SIG API Machinery and Node] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed authentication_duration_seconds metric. Previously it included whole apiserver request duration. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) [SIG API Machinery, Instrumentation and Scalability] +- Fixes issue where inline AzueFile secrets could not be accessed from the pod's namespace. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) [SIG Storage] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubectl: fix case of age column in describe node (#96963, @bl-ue) ([#96963](https://github.com/kubernetes/kubernetes/pull/96963), [@bl-ue](https://github.com/bl-ue)) [SIG CLI] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Pass `KUBE_BUILD_CONFORMANCE=y` to the package-tarballs to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) [SIG Release] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] + +### Other (Cleanup or Flake) + +- A new storage E2E testsuite covers CSIStorageCapacity publishing if a driver opts into the test. ([#100537](https://github.com/kubernetes/kubernetes/pull/100537), [@pohly](https://github.com/pohly)) [SIG Storage and Testing] +- Convert cmd/kubelet/app/server.go to structured logging ([#98334](https://github.com/kubernetes/kubernetes/pull/98334), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- If kube-apiserver enabled goaway feature, clients required golang 1.15.8 or 1.16+ version to avoid un-expected data race issue. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] +- Increased CSINodeIDMaxLength from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps and Storage] +- Migrate `pkg/kubelet/pluginmanager` to structured logging ([#99885](https://github.com/kubernetes/kubernetes/pull/99885), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/preemption/preemption.go` and `pkg/kubelet/logs/container_log_manager.go` to structured logging ([#99848](https://github.com/kubernetes/kubernetes/pull/99848), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/(cri)` to structured logging ([#99006](https://github.com/kubernetes/kubernetes/pull/99006), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(node, pod)` to structured logging ([#98847](https://github.com/kubernetes/kubernetes/pull/98847), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(volume,container)` to structured logging ([#98850](https://github.com/kubernetes/kubernetes/pull/98850), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/kubelet_node_status.go` to structured logging ([#98154](https://github.com/kubernetes/kubernetes/pull/98154), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node and Release] +- Migrate `pkg/kubelet/lifecycle,oom` to structured logging ([#99479](https://github.com/kubernetes/kubernetes/pull/99479), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG Instrumentation and Node] +- Migrate cmd/kubelet/+ pkg/kubelet/cadvisor/cadvisor_linux.go + pkg/kubelet/cri/remote/util/util_unix.go + pkg/kubelet/images/image_manager.go to structured logging ([#99994](https://github.com/kubernetes/kubernetes/pull/99994), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/container_manager_linux.go and pkg/kubelet/cm/container_manager_stub.go to structured logging ([#100001](https://github.com/kubernetes/kubernetes/pull/100001), [@shiyajuan123](https://github.com/shiyajuan123)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanage/{topology/togit pology.go, policy_none.go, cpu_assignment.go} to structured logging ([#100163](https://github.com/kubernetes/kubernetes/pull/100163), [@lala123912](https://github.com/lala123912)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanager/state to structured logging ([#99563](https://github.com/kubernetes/kubernetes/pull/99563), [@jmguzik](https://github.com/jmguzik)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/config to structured logging ([#100002](https://github.com/kubernetes/kubernetes/pull/100002), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubelet.go to structured logging ([#99861](https://github.com/kubernetes/kubernetes/pull/99861), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubeletconfig to structured logging ([#100265](https://github.com/kubernetes/kubernetes/pull/100265), [@ehashman](https://github.com/ehashman)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime to structured logging ([#99970](https://github.com/kubernetes/kubernetes/pull/99970), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/prober to structured logging ([#99830](https://github.com/kubernetes/kubernetes/pull/99830), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/winstats to structured logging ([#99855](https://github.com/kubernetes/kubernetes/pull/99855), [@hexxdump](https://github.com/hexxdump)) [SIG Instrumentation and Node] +- Migrate probe log messages to structured logging ([#97093](https://github.com/kubernetes/kubernetes/pull/97093), [@aldudko](https://github.com/aldudko)) [SIG Instrumentation and Node] +- Migrate remaining kubelet files to structured logging ([#100196](https://github.com/kubernetes/kubernetes/pull/100196), [@ehashman](https://github.com/ehashman)) [SIG Instrumentation and Node] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] ## Dependencies @@ -967,411 +685,411 @@ filename | sha512 hash _Nothing has changed._ ### Changed -- github.com/google/cadvisor: [v0.38.4 → v0.38.5](https://github.com/google/cadvisor/compare/v0.38.4...v0.38.5) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/creack/pty: [v1.1.9 → v1.1.11](https://github.com/creack/pty/compare/v1.1.9...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/google/cadvisor: [v0.38.8 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.8...v0.39.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/klog/v2: v2.5.0 → v2.8.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.3 → v4.1.0 ### Removed _Nothing has changed._ -# v1.20.0-beta.2 +# v1.21.0-beta.1 -## Downloads for v1.20.0-beta.2 +## Downloads for v1.21.0-beta.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes.tar.gz) | fe769280aa623802a949b6a35fbddadbba1d6f9933a54132a35625683719595ecf58096a9aa0f7456f8d4931774df21bfa98e148bc3d85913f1da915134f77bd -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-src.tar.gz) | ce1c8d97c52e5189af335d673bd7e99c564816f6adebf249838f7e3f0e920f323b4e398a5d163ea767091497012ec38843c59ff14e6fdd07683b682135eed645 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes.tar.gz) | c9f4f25242e319e5d90f49d26f239a930aad69677c0f3c2387c56bb13482648a26ed234be2bfe2352508f35010e3eb6d3b127c31a9f24fa1e53ac99c38520fe4 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-src.tar.gz) | 255357db8fa160cab2187658906b674a8b0d9b9a5b5f688cc7b69dc124f5da00362c6cc18ae9b80f7ddb3da6f64c2ab2f12fb9b63a4e063c7366a5375b175cda ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | d6c14bd0f6702f4bbdf14a6abdfa4e5936de5b4efee38aa86c2bd7272967ec6d7868b88fc00ad4a7c3a20717a35e6be2b84e56dec04154fd702315f641409f7c -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-386.tar.gz) | b923c44cb0acb91a8f6fd442c2168aa6166c848f5d037ce50a7cb11502be3698db65836b373c916f75b648d6ac8d9158807a050eecc4e1c77cffa25b386c8cdb -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | 8cae14146a9034dcd4e9d69d5d700f195a77aac35f629a148960ae028ed8b4fe12213993fe3e6e464b4b3e111adebe6f3dd7ca0accc70c738ed5cfd8993edd7c -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm.tar.gz) | 1f54e5262a0432945ead57fcb924e6bfedd9ea76db1dd9ebd946787a2923c247cf16e10505307b47e365905a1b398678dac5af0f433c439c158a33e08362d97b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | 31cf79c01e4878a231b4881fe3ed5ef790bd5fb5419388438d3f8c6a2129e655aba9e00b8e1d77e0bc5d05ecc75cf4ae02cf8266788822d0306c49c85ee584ed -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | 2527948c40be2e16724d939316ad5363f15aa22ebf42d59359d8b6f757d30cfef6447434cc93bc5caa5a23a6a00a2da8d8191b6441e06bba469d9d4375989a97 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | b777ad764b3a46651ecb0846e5b7f860bb2c1c4bd4d0fcc468c6ccffb7d3b8dcb6dcdd73b13c16ded7219f91bba9f1e92f9258527fd3bb162b54d7901ac303ff -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-386.tar.gz) | 8a2f58aaab01be9fe298e4d01456536047cbdd39a37d3e325c1f69ceab3a0504998be41a9f41a894735dfc4ed22bed02591eea5f3c75ce12d9e95ba134e72ec5 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | 2f69cda177a178df149f5de66b7dba7f5ce14c1ffeb7c8d7dc4130c701b47d89bb2fbe74e7a262f573e4d21dee2c92414d050d7829e7c6fc3637a9d6b0b9c5c1 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | 02efd389c8126456416fd2c7ea25c3cc30f612649ad91f631f068d6c0e5e539484d3763cb9a8645ad6b8077e4fcd1552a659d7516ebc4ce6828cf823b65c3016 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-arm64.tar.gz) | ac90dcd1699d1d7ff9c8342d481f6d0d97ccdc3ec501a56dc7c9e1898a8f77f712bf66942d304bfe581b5494f13e3efa211865de88f89749780e9e26e673dbdb +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-386.tar.gz) | cce5fb84cc7a1ee664f89d8ad3064307c51c044e9ddd2ae5a004939b69d3b3ef6f29acc5782e27d0c8f0d6d3d9c96e922f5d1b99d210ca3e754666d775df9f0c +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 2e93bbd2e60ad7cd8fe495115e96c55b1dc8facd100a827ef9c197a732679b60cceb9ea7bf92a1f5e328c3b8adfa8d3922cbc5d8370e374f3381b83f5b877b4f +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm.tar.gz) | 23f03b6a8fa9decce9b89a2c1bd3dae6d0b2f9e533e35a79e2c5a29326a165259677594ae83c877219a21bdb95557a284e55f4eec12954742794579c89a7d7e5 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3acf3101b46568b0ded6b90f13df0e918870d6812dc1a584903ddb8ba146484a204b9e442f863df47c7d4dab043fd9f7294c5510d3eb09004993d6d3b1e9e13c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | f749198df69577f62872d3096138a1b8969ec6b1636eb68eb56640bf33cf5f97a11df4363462749a1c0dc3ccbb8ae76c5d66864bf1c5cf7e52599caaf498e504 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | 3f6c0189d59fca22cdded3a02c672ef703d17e6ab0831e173a870e14ccec436c142600e9fc35b403571b6906f2be8d18d38d33330f7caada971bbe1187b388f6 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-386.tar.gz) | 03d92371c425cf331c80807c0ac56f953be304fc6719057258a363d527d186d610e1d4b4d401b34128062983265c2e21f2d2389231aa66a6f5787eee78142cf6 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | 489ece0c886a025ca3a25d28518637a5a824ea6544e7ef8778321036f13c8909a978ad4ceca966cec1e1cda99f25ca78bfd37460d1231c77436d216d43c872ad ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | 3ecaac0213d369eab691ac55376821a80df5013cb12e1263f18d1c236a9e49d42b3cea422175556d8f929cdf3109b22c0b6212ac0f2e80cc7a5f4afa3aba5f24 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm.tar.gz) | 580030b57ff207e177208fec0801a43389cae10cc2c9306327d354e7be6a055390184531d54b6742e0983550b7a76693cc4a705c2d2f4ac30495cf63cef26b9b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | 3e3286bd54671549fbef0dfdaaf1da99bc5c3efb32cc8d1e1985d9926520cea0c43bcf7cbcbbc8b1c1a95eab961255693008af3bb1ba743362998b5f0017d6d7 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | 9fa051e7e97648e97e26b09ab6d26be247b41b1a5938d2189204c9e6688e455afe76612bbcdd994ed5692935d0d960bd96dc222bce4b83f61d62557752b9d75b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | fa85d432eff586f30975c95664ac130b9f5ae02dc52b97613ed7a41324496631ea11d1a267daba564cf2485a9e49707814d86bbd3175486c7efc8b58a9314af5 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 2e95cb31d5afcb6842c41d25b7d0c18dd7e65693b2d93c8aa44e5275f9c6201e1a67685c7a8ddefa334babb04cb559d26e39b6a18497695a07dc270568cae108 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 2927e82b98404c077196ce3968f3afd51a7576aa56d516019bd3976771c0213ba01e78da5b77478528e770da0d334e9457995fafb98820ed68b2ee34beb68856 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | e0f7aea3ea598214a9817bc04949389cb7e4e7b9503141a590ef48c0b681fe44a4243ebc6280752fa41aa1093149b3ee1bcef7664edb746097a342281825430b +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | c011f7eb01294e9ba5d5ced719068466f88ed595dcb8d554a36a4dd5118fb6b3d6bafe8bf89aa2d42988e69793ed777ba77b8876c6ec74f898a43cfce1f61bf4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 15f6683e7f16caab7eebead2b7c15799460abbf035a43de0b75f96b0be19908f58add98a777a0cca916230d60cf6bfe3fee92b9dcff50274b1e37c243c157969 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | 86e631f95fe670b467ead2b88d34e0364eaa275935af433d27cc378d82dcaa22041ccce40f5fa9561b9656dadaa578dc018ad458a59b1690d35f86dca4776b5c -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm.tar.gz) | a8754ff58a0e902397056b8615ab49af07aca347ba7cc4a812c238e3812234862270f25106b6a94753b157bb153b8eae8b39a01ed67384774d798598c243583b -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | 28d727d7d08e2c856c9b4a574ef2dbf9e37236a0555f7ec5258b4284fa0582fb94b06783aaf50bf661f7503d101fbd70808aba6de02a2f0af94db7d065d25947 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | a1283449f1a0b155c11449275e9371add544d0bdd4609d6dc737ed5f7dd228e84e24ff249613a2a153691627368dd894ad64f4e6c0010eecc6efd2c13d4fb133 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | 5806028ba15a6a9c54a34f90117bc3181428dbb0e7ced30874c9f4a953ea5a0e9b2c73e6b1e2545e1b4e5253e9c7691588538b44cdfa666ce6865964b92d2fa8 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | d5327e3b7916c78777b9b69ba0f3758c3a8645c67af80114a0ae52babd7af27bb504febbaf51b1bfe5bd2d74c8c5c573471e1cb449f2429453f4b1be9d5e682a +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | ed58679561197110f366b9109f7afd62c227bfc271918ccf3eea203bb2ab6428eb5db4dd6c965f202a8a636f66da199470269b863815809b99d53d2fa47af2ea +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 7e6c7f1957fcdecec8fef689c5019edbc0d0c11d22dafbfef0a07121d10d8f6273644f73511bd06a9a88b04d81a940bd6645ffb5711422af64af547a45c76273 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | a3618f29967e7a1574917a67f0296e65780321eda484b99aa32bfd4dc9b35acdefce33da952ac52dfb509fbac5bf700cf177431fad2ab4adcab0544538939faa +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 326d3eb521b41bdf489912177f70b8cdd7cd828bb9b3d847ed3694eb27e457f24e0a88b8e51b726eee39800a3c5a40c1b30e3a8ec4a34d8041b3d8ef05d1b749 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | 022d05ebaa66a0332c4fe18cdaf23d14c2c7e4d1f2af7f27baaf1eb042e6890dc3434b4ac8ba58c35d590717956f8c3458112685aff4938b94b18e263c3f4256 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | fa691ed93f07af6bc1cf57e20a30580d6c528f88e5fea3c14f39c1820969dc5a0eb476c5b87b288593d0c086c4dd93aff6165082393283c3f46c210f9bb66d61 -## Changelog since v1.20.0-beta.1 +## Changelog since v1.21.0-beta.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - A bug was fixed in kubelet where exec probe timeouts were not respected. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the ExecProbeTimeout feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] - - For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] - - Kubeadm: - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - - For more details see: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Migrate `pkg/kubelet/(dockershim, network)` to structured logging + Exit code changed from 255 to 1 ([#98939](https://github.com/kubernetes/kubernetes/pull/98939), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Network and Node] + - Migrate `pkg/kubelet/certificate` to structured logging + Exit code changed from 255 to 1 ([#98993](https://github.com/kubernetes/kubernetes/pull/98993), [@SataQiu](https://github.com/SataQiu)) [SIG Auth and Node] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Package pkg/kubelet/server migrated to structured logging + Exit code changed from 255 to 1 ([#99838](https://github.com/kubernetes/kubernetes/pull/99838), [@adisky](https://github.com/adisky)) [SIG Node] + - Pkg/kubelet/kuberuntime/kuberuntime_manager.go migrated to structured logging + Exit code changed from 255 to 1 ([#99841](https://github.com/kubernetes/kubernetes/pull/99841), [@adisky](https://github.com/adisky)) [SIG Instrumentation and Node] ## Changes by Kind ### Deprecation -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Remove deprecated --generator --replicas --service-generator --service-overrides --schedule from kubectl run + Deprecate --serviceaccount --hostport --requests --limits in kubectl run ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `diskformat` stroage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] ### API Change -- API priority and fairness graduated to beta - 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- Add LoadBalancerIPMode feature gate ([#92312](https://github.com/kubernetes/kubernetes/pull/92312), [@Sh4d1](https://github.com/Sh4d1)) [SIG Apps, CLI, Cloud Provider and Network] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a 'serving' and `terminating` condition to the EndpointSlice API. - - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- Fewer candidates are enumerated for preemption to improve performance in large clusters ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) [SIG Scheduling] -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBSVC is enabled. - "action required" - The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add a default metadata name labels for selecting any namespace by its name. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed` ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Clarified NetworkPolicy policyTypes documentation ([#97216](https://github.com/kubernetes/kubernetes/pull/97216), [@joejulian](https://github.com/joejulian)) [SIG Network] +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- EndpointSlice API is now GA. The EndpointSlice topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The discovery.k8s.io/v1alpha1 API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network and Testing] +- EndpointSlice Controllers are now GA. The EndpointSlice Controller will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, Apps, Auth, Network and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration(previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) [SIG API Machinery, CLI and Network] +- Introduce conditions for PodDisruptionBudget ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- Jobs API has a new .spec.suspend field that can be used to suspend and resume Jobs ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) [SIG API Machinery, Apps, Node, Scheduling and Testing] +- Kubelet Graceful Node Shutdown feature is now beta. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) [SIG Node] +- Limit the quest value of hugepage to integer multiple of page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Setting loadBalancerClass in load balancer type of service is available with this PR. + Users who want to use a custom load balancer can specify loadBalancerClass to achieve it. ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) [SIG API Machinery, Apps, Cloud Provider and Network] +- Storage capacity tracking (= the CSIStorageCapacity feature) is beta, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, Scheduling, Storage and Testing] +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The apiserver now resets managedFields that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) [SIG API Machinery and Testing] +- `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a pod compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) [SIG Apps] ### Feature -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Cluster Lifecycle] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric got additional buckets of 60, 300, 600, 900 and 1200 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- Add --use-protocol-buffers flag to kubectl top pods and nodes ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) [SIG CLI] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, and a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps and Instrumentation] +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. + - To enable: export KUBECTL_COMMAND_HEADERS=1; kubectl ... ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) [SIG API Machinery and CLI] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- Disruption controller only sends one event per PodDisruptionBudget if scale can't be computed ([#98128](https://github.com/kubernetes/kubernetes/pull/98128), [@mortent](https://github.com/mortent)) [SIG Apps] +- EndpointSliceNodeName will always be enabled, so NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] +- Graduate CRIContainerLogRotation feature gate to GA. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) [SIG Node and Testing] +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: promote IPv6DualStack feature gate to Beta ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Remove cAdvisor json metrics api collected by Kubelet ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Sysctls is now GA and locked to default ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) [SIG Node] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The endpointslice mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation endpoints.kubernetes.io/last-change-trigger-time is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) [SIG Apps, Network and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] ### Bug or Regression -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Fix --ignore-errors does not take effect if multiple logs are printed and unfollowed ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) [SIG CLI] +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix smb mount PermissionDenied issue on Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider, Storage and Windows] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixes add-on manager leader election ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider] +- Improved update time of pod statuses following new probe results. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) [SIG Node and Testing] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using dockershim as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) [SIG Node] +- Stdin is now only passed to client-go exec credential plugins when it is detected to be an interactive terminal. Previously, it was passed to client-go exec plugins when **stdout*- was detected to be an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Updates the commands + - kubectl kustomize {arg} + - kubectl apply -k {arg} + to use same code as kustomize CLI v4.0.5 + - [v4.0.5]: https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5 ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) [SIG API Machinery, Architecture, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node and Storage] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When using Containerd on Windows, the "C:\Windows\System32\drivers\etc\hosts" file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node and Windows] +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### Other (Cleanup or Flake) -- Handle slow cronjob lister in cronjob controller v2 and improve memory footprint. ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- NONE ([#96179](https://github.com/kubernetes/kubernetes/pull/96179), [@bbyrne5](https://github.com/bbyrne5)) [SIG Network] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Apiserver_request_total is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy is promoted to GA ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] +- Migrate `pkg/kubelet/(eviction)` to structured logging ([#99032](https://github.com/kubernetes/kubernetes/pull/99032), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate deployment controller log messages to structured logging ([#97507](https://github.com/kubernetes/kubernetes/pull/97507), [@aldudko](https://github.com/aldudko)) [SIG Apps] +- Migrate pkg/kubelet/cloudresource to structured logging ([#98999](https://github.com/kubernetes/kubernetes/pull/98999), [@sladyn98](https://github.com/sladyn98)) [SIG Node] +- Migrate pkg/kubelet/cri/remote logs to structured logging ([#98589](https://github.com/kubernetes/kubernetes/pull/98589), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime/kuberuntime_container.go logs to structured logging ([#96973](https://github.com/kubernetes/kubernetes/pull/96973), [@chenyw1990](https://github.com/chenyw1990)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/status to structured logging ([#99836](https://github.com/kubernetes/kubernetes/pull/99836), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/token to structured logging ([#99264](https://github.com/kubernetes/kubernetes/pull/99264), [@palnabarun](https://github.com/palnabarun)) [SIG Auth, Instrumentation and Node] +- Migrate pkg/kubelet/util to structured logging ([#99823](https://github.com/kubernetes/kubernetes/pull/99823), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate proxy/userspace/proxier.go logs to structured logging ([#97837](https://github.com/kubernetes/kubernetes/pull/97837), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some kubelet/metrics log messages to structured logging ([#98627](https://github.com/kubernetes/kubernetes/pull/98627), [@jialaijun](https://github.com/jialaijun)) [SIG Instrumentation and Node] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] + +### Uncategorized + +- Migrate pkg/kubelet/stats to structured logging ([#99607](https://github.com/kubernetes/kubernetes/pull/99607), [@krzysiekg](https://github.com/krzysiekg)) [SIG Node] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## Dependencies ### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.35.5 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.35.5...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.4](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.4) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 8b50664 → d219536 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.12 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: b3cf1e8 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/creack/pty: [v1.1.7 → v1.1.9](https://github.com/creack/pty/compare/v1.1.7...v1.1.9) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/google/cadvisor: [v0.38.7 → v0.38.8](https://github.com/google/cadvisor/compare/v0.38.7...v0.38.8) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: 113979e → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.3.0 → v1.4.0 ### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-beta.1 +# v1.21.0-beta.0 -## Downloads for v1.20.0-beta.1 +## Downloads for v1.21.0-beta.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes.tar.gz) | 4eddf4850c2d57751696f352d0667309339090aeb30ff93e8db8a22c6cdebf74cb2d5dc78d4ae384c4e25491efc39413e2e420a804b76b421a9ad934e56b0667 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-src.tar.gz) | 59de5221162e9b6d88f5abbdb99765cb2b2e501498ea853fb65f2abe390211e28d9f21e0d87be3ade550a5ea6395d04552cf093d2ce2f99fd45ad46545dd13cb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes.tar.gz) | 69b73a03b70b0ed006e9fef3f5b9bc68f0eb8dc40db6cc04777c03a2cb83a008c783012ca186b1c48357fb192403dbcf6960f120924785e2076e215b9012d546 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-src.tar.gz) | 9620fb6d37634271bdd423c09f33f3bd29e74298aa82c47dffc8cb6bd2ff44fa8987a53c53bc529db4ca96ec41503aa81cc8d0c3ac106f3b06c4720de933a8e6 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | d69ffed19b034a4221fc084e43ac293cf392e98febf5bf580f8d92307a8421d8b3aab18f9ca70608937e836b42c7a34e829f88eba6e040218a4486986e2fca21 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-386.tar.gz) | 1b542e165860c4adcd4550adc19b86c3db8cd75d2a1b8db17becc752da78b730ee48f1b0aaf8068d7bfbb1d8e023741ec293543bc3dd0f4037172a6917db8169 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 90ad52785eecb43a6f9035b92b6ba39fc84e67f8bc91cf098e70f8cfdd405c4b9d5c02dccb21022f21bb5b6ce92fdef304def1da0a7255c308e2c5fb3a9cdaab -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm.tar.gz) | d0cb3322b056e1821679afa70728ffc0d3375e8f3326dabbe8185be2e60f665ab8985b13a1a432e10281b84a929e0f036960253ac0dd6e0b44677d539e98e61b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3aecc8197e0aa368408624add28a2dd5e73f0d8a48e5e33c19edf91d5323071d16a27353a6f3e22df4f66ed7bfbae8e56e0a9050f7bbdf927ce6aeb29bba6374 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | 6ff145058f62d478b98f1e418e272555bfb5c7861834fbbf10a8fb334cc7ff09b32f2666a54b230932ba71d2fc7d3b1c1f5e99e6fe6d6ec83926a9b931cd2474 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | ff7b8bb894076e05a3524f6327a4a6353b990466f3292e84c92826cb64b5c82b3855f48b8e297ccadc8bcc15552bc056419ff6ff8725fc4e640828af9cc1331b -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-386.tar.gz) | 6c6dcac9c725605763a130b5a975f2b560aa976a5c809d4e0887900701b707baccb9ca1aebc10a03cfa7338a6f42922bbf838ccf6800fc2a3e231686a72568b6 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | d12e3a29c960f0ddd1b9aabf5426ac1259863ac6c8f2be1736ebeb57ddca6b1c747ee2c363be19e059e38cf71488c5ea3509ad4d0e67fd5087282a5ad0ae9a48 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | 2a6f3fcd6b571f5ccde56b91e6e179a01899244be496dae16a2a16e0405c9437b75c6dc853b56f9a4876a7c0a60ec624ccd28400bf8fb960258263172f6860ba +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-386.tar.gz) | 78fe9ad9f9a9bc043293327223f0038a2c087ca65e87187a6dcae7a24aef9565fe498d295a4639b0b90524469a04930022fcecd815d0afc742eb87ddd8eb7ef5 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | c025f5e5bd132355e7dd1296cf2ec752264e7f754c4d95fc34b076bd75bef2f571d30872bcb3d138ce95c592111353d275a80eb31f82c07000874b4c56282dbd +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm.tar.gz) | 9975cd2f08fbc202575fb15ba6fc51dab23155ca4d294ebb48516a81efa51f58bab3a87d41c865103756189b554c020371d729ad42880ba788f25047ffc46910 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 56a6836e24471e42e9d9a8488453f2d55598d70c8aca0a307d5116139c930c25c469fd0d1ab5060fbe88dad75a9b5209a08dc11d644af5f3ebebfbcb6c16266c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | b6a6cc9baad0ad85ed079ee80e6d6acc905095cfb440998bbc0f553b94fa80077bd58b8692754de477517663d51161705e6e89a1b6d04aa74819800db3517722 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 7b743481b340f510bf9ae28ea8ea91150aa1e8c37fe104b66d7b3aff62f5e6db3c590d2c13d14dbb5c928de31c7613372def2496075853611d10d6b5fa5b60bd +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-386.tar.gz) | df06c7a524ce84c1f8d7836aa960c550c88dbca0ec4854df4dd0a85b3c84b8ecbc41b54e8c4669ce28ac670659ff0fad795deb1bc539f3c3b3aa885381265f5a +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 4568497b684564f2a94fbea6cbfd778b891231470d9a6956c3b7a3268643d13b855c0fc5ebea5f769300cc0c7719c2c331c387f468816f182f63e515adeaa7a0 ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 904e8c049179e071c6caa65f525f465260bb4d4318a6dd9cc05be2172f39f7cfc69d1672736e01d926045764fe8872e806444e3af77ffef823ede769537b7d20 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 5934959374868aed8d4294de84411972660bca7b2e952201a9403f37e40c60a5c53eaea8001344d0bf4a00c8cd27de6324d88161388de27f263a5761357cb82b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | 4c884585970f80dc5462d9a734d7d5be9558b36c6e326a8a3139423efbd7284fa9f53fb077983647e17e19f03f5cb9bf26201450c78daecf10afa5a1ab5f9efc -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | 235b78b08440350dcb9f13b63f7722bd090c672d8e724ca5d409256e5a5d4f46d431652a1aa908c3affc5b1e162318471de443d38b93286113e79e7f90501a9b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 220fc9351702b3ecdcf79089892ceb26753a8a1deaf46922ffb3d3b62b999c93fef89440e779ca6043372b963081891b3a966d1a5df0cf261bdd44395fd28dce +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 42883cca2d312153baf693fc6024a295359a421e74fd70eefc927413be4e0353debe634e7cca6b9a8f7d8a0cee3717e03ba5d29a306e93139b1c2f3027535a6d +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm.tar.gz) | e0042215e84c769ba4fc4d159ccf67b2c4a26206bfffb0ec5152723dc813ff9c1426aa0e9b963d7bfa2efb266ca43561b596b459152882ebb42102ccf60bd8eb +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | bfad29d43e14152cb9bc7c4df6aa77929c6eca64a294bb832215bdba9fa0ee2195a2b709c0267dc7426bb371b547ee80bb8461a8c678c9bffa0819aa7db96289 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | ca67674c01c6cebdc8160c85b449eab1a23bb0557418665246e0208543fa2eaaf97679685c7b49bee3a4300904c0399c3d762ae34dc3e279fd69ce792c4b07ff +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 285352b628ec754b01b8ad4ef1427223a142d58ebcb46f6861df14d68643133b32330460b213b1ba5bc5362ff2b6dacd8e0c2d20cce6e760fa1954af8a60df8b ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | fe59d3a1f21c47bab126f689687657f77fbcb46a2caeef48eecd073b2b22879f997a466911b5c5c829e9cf27e68a36ecdf18686d42714839d4b97d6c7281578d -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 93e545aa963cfd11e0b2c6d47669b5ef70c5a86ef80c3353c1a074396bff1e8e7371dda25c39d78c7a9e761f2607b8b5ab843fa0c10b8ff9663098fae8d25725 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | 5e0f177f9bec406a668d4b37e69b191208551fdf289c82b5ec898959da4f8a00a2b0695cbf1d2de5acb809321c6e5604f5483d33556543d92b96dcf80e814dd3 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 574412059e4d257eb904cd4892a075b6a2cde27adfa4976ee64c46d6768facece338475f1b652ad94c8df7cfcbb70ebdf0113be109c7099ab76ffdb6f023eefd -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | b1ffaa6d7f77d89885c642663cb14a86f3e2ec2afd223e3bb2000962758cf0f15320969ffc4be93b5826ff22d54fdbae0dbea09f9d8228eda6da50b6fdc88758 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | 388983765213cf3bdc1f8b27103ed79e39028767e5f1571e35ed1f91ed100e49f3027f7b7ff19b53fab7fbb6d723c0439f21fc6ed62be64532c25f5bfa7ee265 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | d92d9b30e7e44134a0cd9db4c01924d365991ea16b3131200b02a82cff89c8701f618cd90e7f1c65427bd4bb5f78b10d540b2262de2c143b401fa44e5b25627b +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 551092f23c27fdea4bb2d0547f6075892534892a96fc2be7786f82b58c93bffdb5e1c20f8f11beb8bed46c24f36d4c18ec5ac9755435489efa28e6ae775739bd +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 26ae7f4163e527349b8818ee38b9ee062314ab417f307afa49c146df8f5a2bd689509b128bd4a1efd3896fd89571149a9955ada91f8ca0c2f599cd863d613c86 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 821fa953f6cebc69d2d481e489f3e90899813d20e2eefbabbcadd019d004108e7540f741fabe60e8e7c6adbb1053ac97898bbdddec3ca19f34a71aa3312e0d4e +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | 22197d4f66205d5aa9de83dfddcc4f2bb3195fd7067cdb5c21e61dbeae217bc112fb7ecff8a539579b60ad92298c2b4c87b9b7c7e6ec1ee1ffa0c6e4bc4412c1 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | 7e22e0d9603562a04dee16a513579f06b1ff6354d97d669bd68f8777ec7f89f6ef027fb23ab0445d7bba0bb689352f0cc748ce90e3f597c6ebe495464a96b860 -## Changelog since v1.20.0-beta.0 +## Changelog since v1.21.0-alpha.3 + +## Urgent Upgrade Notes +### (No, really, you MUST read this before you upgrade) + + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the storage_operation_duration_seconds metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- ACTION REQUIRED: The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42)) [SIG API Machinery, Node and Testing] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] ### API Change -- + `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - + kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] +- Cluster admins can now turn off /debug/pprof and /debug/flags/v endpoint in kubelet by setting enableProfilingHandler and enableDebugFlagsHandler to false in their kubelet configuration file. enableProfilingHandler and enableDebugFlagsHandler can be set to true only when enableDebuggingHandlers is also set to true. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) [SIG Node] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] ### Feature -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] +- A new histogram metric to track the time it took to delete a job by the ttl-after-finished controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Instrumentation] +- AWS cloudprovider supports auto-discovering subnets without any kubernetes.io/cluster/ tags. It also supports additional service annotation service.beta.kubernetes.io/aws-load-balancer-subnets to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Add --permit-address-sharing flag to kube-apiserver to listen with SO_REUSEADDR. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoid waiting for the kernel to release socket in TIME_WAIT state, and hence, considably reducing kube-apiserver restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) [SIG CLI] +- Added support for installing arm64 node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) [SIG Cloud Provider] +- Feature gate RootCAConfigMap is graduated to GA in 1.21 and will be removed in 1.22. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] +- Kubeadm: during "init" and "join" perform preflight validation on the host / node name and throw warnings if a name is not compliant ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=bad_metric1,bad_metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- TTLAfterFinished is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Auth] +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- Turn CronJobControllerV2 on by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) [SIG Apps] +- UDP protocol support for Agnhost connect subcommand ([#98639](https://github.com/kubernetes/kubernetes/pull/98639), [@knabben](https://github.com/knabben)) [SIG Testing] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. Clusters new and existing will not be affected until user starting adding secondary pod and service cidrs cli flags as described here: https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, Cloud Provider, Network and Node] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] +- Fix ALPHA stability level reference link ([#98641](https://github.com/kubernetes/kubernetes/pull/98641), [@Jeffwan](https://github.com/Jeffwan)) [SIG Auth, Cloud Provider, Instrumentation and Storage] + +### Failing Test + +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP NodePort Services where stale conntrack entries may blackhole the traffic directed to the NodePort. ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) [SIG Network] ### Bug or Regression -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] +- Add missing --kube-api-content-type in kubemark hollow template ([#98911](https://github.com/kubernetes/kubernetes/pull/98911), [@Jeffwan](https://github.com/Jeffwan)) [SIG Scalability and Testing] +- Avoid duplicate error messages when runing kubectl edit quota ([#98201](https://github.com/kubernetes/kubernetes/pull/98201), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery and Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fixes spurious errors about IPv6 in kube-proxy logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- In the method that ensures that the docker and containerd are in the correct containers with the proper OOM score set up, fixed the bug of identifying containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] ### Other (Cleanup or Flake) -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] +- APIs for kubelet annotations and labels from k8s.io/kubernetes/pkg/kubelet/apis are now available under k8s.io/kubelet/pkg/apis/ ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) [SIG Apps, Auth and Node] +- Migrate `pkg/kubelet/(pod, pleg)` to structured logging ([#98990](https://github.com/kubernetes/kubernetes/pull/98990), [@gjkim42](https://github.com/gjkim42)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/nodestatus to structured logging ([#99001](https://github.com/kubernetes/kubernetes/pull/99001), [@QiWang19](https://github.com/QiWang19)) [SIG Node] +- Migrate pkg/kubelet/server logs to structured logging ([#98643](https://github.com/kubernetes/kubernetes/pull/98643), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate proxy/winkernel/proxier.go logs to structured logging ([#98001](https://github.com/kubernetes/kubernetes/pull/98001), [@JornShen](https://github.com/JornShen)) [SIG Network and Windows] +- Migrate scheduling_queue.go to structured logging ([#98358](https://github.com/kubernetes/kubernetes/pull/98358), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Several flags related to the deprecated dockershim which are present in the kubelet command line are now deprecated. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) [SIG Node] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] ## Dependencies @@ -1379,763 +1097,530 @@ filename | sha512 hash _Nothing has changed._ ### Changed -_Nothing has changed._ +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.0.3 ### Removed _Nothing has changed._ -# v1.20.0-beta.0 +# v1.21.0-alpha.3 -## Downloads for v1.20.0-beta.0 +## Downloads for v1.21.0-alpha.3 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes.tar.gz) | 385e49e32bbd6996f07bcadbf42285755b8a8ef9826ee1ba42bd82c65827cf13f63e5634b834451b263a93b708299cbb4b4b0b8ddbc688433deaf6bec240aa67 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-src.tar.gz) | 842e80f6dcad461426fb699de8a55fde8621d76a94e54288fe9939cc1a3bbd0f4799abadac2c59bcf3f91d743726dbd17e1755312ae7fec482ef560f336dbcbb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes.tar.gz) | 704ec916a1dbd134c54184d2652671f80ae09274f9d23dbbed312944ebeccbc173e2e6b6949b38bdbbfdaf8aa032844deead5efeda1b3150f9751386d9184bc8 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-src.tar.gz) | 57db9e7560cfc9c10e7059cb5faf9c4bd5eb8f9b7964f44f000a417021cf80873184b774e7c66c80d4aba84c14080c6bc335618db3d2e5f276436ae065e25408 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | bde5e7d9ee3e79d1e69465a3ddb4bb36819a4f281b5c01a7976816d7c784410812dde133cdf941c47e5434e9520701b9c5e8b94d61dca77c172f87488dfaeb26 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-386.tar.gz) | 721bb8444c9e0d7a9f8461e3f5428882d76fcb3def6eb11b8e8e08fae7f7383630699248660d69d4f6a774124d6437888666e1fa81298d5b5518bc4a6a6b2c92 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | 71e4edc41afbd65f813e7ecbc22b27c95f248446f005e288d758138dc4cc708735be7218af51bcf15e8b9893a3598c45d6a685f605b46f50af3762b02c32ed76 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm.tar.gz) | bbefc749156f63898973f2f7c7a6f1467481329fb430d641fe659b497e64d679886482d557ebdddb95932b93de8d1e3e365c91d4bf9f110b68bd94b0ba702ded -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 9803190685058b4b64d002c2fbfb313308bcea4734ed53a8c340cfdae4894d8cb13b3e819ae64051bafe0fbf8b6ecab53a6c1dcf661c57640c75b0eb60041113 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | bcdceea64cba1ae38ea2bab50d8fd77c53f6d673de12566050b0e3c204334610e6c19e4ace763e68b5e48ab9e811521208b852b1741627be30a2b17324fc1daf -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 41e36d00867e90012d5d5adfabfaae8d9f5a9fd32f290811e3c368e11822916b973afaaf43961081197f2cbab234090d97d89774e674aeadc1da61f7a64708a9 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-386.tar.gz) | c50fec5aec2d0e742f851f25c236cb73e76f8fc73b0908049a10ae736c0205b8fff83eb3d29b1748412edd942da00dd738195d9003f25b577d6af8359d84fb2f -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 0fd6777c349908b6d627e849ea2d34c048b8de41f7df8a19898623f597e6debd35b7bcbf8e1d43a1be3a9abb45e4810bc498a0963cf780b109e93211659e9c7e +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | e2706efda92d5cf4f8b69503bb2f7703a8754407eff7f199bb77847838070e720e5f572126c14daa4c0c03b59bb1a63c1dfdeb6e936a40eff1d5497e871e3409 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 007bb23c576356ed0890bdfd25a0f98d552599e0ffec19fb982591183c7c1f216d8a3ffa3abf15216be12ae5c4b91fdcd48a7306a2d26b007b86a6abd553fc61 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | 39504b0c610348beba60e8866fff265bad58034f74504951cd894c151a248db718d10f77ebc83f2c38b2d517f8513a46325b38889eefa261ca6dbffeceba50ff +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | 30bc2c40d0c759365422ad1651a6fb35909be771f463c5b971caf401f9209525d05256ab70c807e88628dd357c2896745eecf13eda0b748464da97d0a5ef2066 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 085cdf574dc8fd33ece667130b8c45830b522a07860e03a2384283b1adea73a9652ef3dfaa566e69ee00aea1a6461608814b3ce7a3f703e4a934304f7ae12f97 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b34b845037d83ea7b3e2d80a9ede4f889b71b17b93b1445f0d936a36e98c13ed6ada125630a68d9243a5fcd311ee37cdcc0c05da484da8488ea5060bc529dbfc +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | c4758adc7a404b776556efaa79655db2a70777c562145d6ea6887f3335988367a0c2fcd4383e469340f2a768b22e786951de212805ca1cb91104d41c21e0c9ce +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-386.tar.gz) | f51edc79702bbd1d9cb3a672852a405e11b20feeab64c5411a7e85c9af304960663eb6b23ef96e0f8c44a722fecf58cb6d700ea2c42c05b3269d8efd5ad803f2 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 6a3507ce4ac40a0dc7e4720538863fa15f8faf025085a032f34b8fa0f6fa4e8c26849baf649b5b32829b9182e04f82721b13950d31cf218c35be6bf1c05d6abf ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 30d982424ca64bf0923503ae8195b2e2a59497096b2d9e58dfd491cd6639633027acfa9750bc7bccf34e1dc116d29d2f87cbd7ae713db4210ce9ac16182f0576 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm.tar.gz) | f08b62be9bc6f0745f820b0083c7a31eedb2ce370a037c768459a59192107b944c8f4345d0bb88fc975f2e7a803ac692c9ac3e16d4a659249d4600e84ff75d9e -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | e3472b5b3dfae0a56e5363d52062b1e4a9fc227a05e0cf5ece38233b2c442f427970aab94a52377fb87e583663c120760d154bc1c4ac22dca1f4d0d1ebb96088 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | 06c254e0a62f755d31bc40093d86c44974f0a60308716cc3214a6b3c249a4d74534d909b82f8a3dd3a3c9720e61465b45d2bb3a327ef85d3caba865750020dfb -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 2edeb4411c26a0de057a66787091ab1044f71774a464aed898ffee26634a40127181c2edddb38e786b6757cca878fd0c3a885880eec6c3448b93c645770abb12 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 19181d162dfb0b30236e2bf1111000e037eece87c037ca2b24622ca94cb88db86aa4da4ca533522518b209bc9983bbfd6b880a7898e0da96b33f3f6c4690539b +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 42a02f9e08a78ad5da6e5fa1ab12bf1e3c967c472fdbdadbd8746586da74dc8093682ba8513ff2a5301393c47ee9021b860e88ada56b13da386ef485708e46ca +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 3c8ba8eb02f70061689bd7fab7813542005efe2edc6cfc6b7aecd03ffedf0b81819ad91d69fff588e83023d595eefbfe636aa55e1856add8733bf42fff3c748f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | cd9e6537450411c39a06fd0b5819db3d16b668d403fb3627ec32c0e32dd1c4860e942934578ca0e1d1b8e6f21f450ff81e37e0cd46ff5c5faf7847ab074aefc5 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | ada3f65e53bc0e0c0229694dd48c425388089d6d77111a62476d1b08f6ad1d8ab3d60b9ed7d95ac1b42c2c6be8dc0618f40679717160769743c43583d8452362 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | cc1d5b94b86070b5e7746d7aaeaeac3b3a5e5ebbff1ec33885f7eeab270a6177d593cb1975b2e56f4430b7859ad42da76f266629f9313e0f688571691ac448ed -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 75e82c7c9122add3b24695b94dcb0723c52420c3956abf47511e37785aa48a1fa8257db090c6601010c4475a325ccfff13eb3352b65e3aa1774f104b09b766b0 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 16ef27c40bf4d678a55fcd3d3f7d09f1597eec2cc58f9950946f0901e52b82287be397ad7f65e8d162d8a9cdb4a34a610b6db8b5d0462be8e27c4b6eb5d6e5e7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 939865f2c4cb6a8934f22a06223e416dec5f768ffc1010314586149470420a1d62aef97527c34d8a636621c9669d6489908ce1caf96f109e8d073cee1c030b50 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | bbfdd844075fb816079af7b73d99bc1a78f41717cdbadb043f6f5872b4dc47bc619f7f95e2680d4b516146db492c630c17424e36879edb45e40c91bc2ae4493c -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | a2b3ea40086fd71aed71a4858fd3fc79fd1907bc9ea8048ff3c82ec56477b0a791b724e5a52d79b3b36338c7fbd93dfd3d03b00ccea9042bda0d270fc891e4ec +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | ae0fec6aa59e49624b55d9a11c12fdf717ddfe04bdfd4f69965d03004a34e52ee4a3e83f7b61d0c6a86f43b72c99f3decb195b39ae529ef30526d18ec5f58f83 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | 9a48c140ab53b7ed8ecec6903988a1a474efc16d2538e5974bc9a12f0c9190be78c4f9e326bf4e982d0b7045a80b99dd0fda7e9b650663be5b89bfd991596746 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | 6912adbc9300344bea470d6435f7b387bfce59767078c11728ce59faf47cd3f72b41b9604fcc5cda45e9816fe939fbe2fb33e52a773e6ff2dfa9a615b4df6141 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | d66dccfe3e6ed6d81567c70703f15375a53992b3a5e2814b98c32e581b861ad95912e03ed2562415d087624c008038bb4a816611fa255442ae752968ea15856b +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | ad8c69a28f1fbafa3f1cb54909bfd3fc22b104bed63d7ca2b296208c9d43eb5f2943a0ff267da4c185186cdd9f7f77b315cd7f5f1bf9858c0bf42eceb9ac3c58 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 91d723aa848a9cb028f5bcb41090ca346fb973961521d025c4399164de2c8029b57ca2c4daca560d3c782c05265d2eb0edb0abcce6f23d3efbecf2316a54d650 -## Changelog since v1.20.0-alpha.3 +## Changelog since v1.21.0-alpha.2 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] - - Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - Remove alpha CSIMigrationXXComplete flag and add alpha InTreePluginXXUnregister flag. Deprecate CSIMigrationvSphereComplete flag and it will be removed in 1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Node and Storage] ## Changes by Kind -### Deprecation - -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] - ### API Change -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] +- Adds support for portRange / EndPort in Network Policy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- Fixes using server-side apply with APIService resources ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Apps and Testing] +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] ### Feature -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] +- A lease can only attach up to 10k objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery] +- Add ignore-errors flag for drain, support none-break drain in group ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) [SIG CLI] +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- Export NewDebuggingRoundTripper function and DebugLevel options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) [SIG API Machinery] +- Kubectl wait ensures that observedGeneration >= generation if applicable ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) [SIG CLI] +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- New admission controller "denyserviceexternalips" is available. Clusters which do not *need- the Service "externalIPs" feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) [SIG API Machinery] +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Pause image upgraded to 3.4.1 in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Testing and Windows] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] - -### Bug or Regression - -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions. ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] - -### Other (Cleanup or Flake) - -- NO ([#95690](https://github.com/kubernetes/kubernetes/pull/95690), [@nikhita](https://github.com/nikhita)) [SIG Release] - -## Dependencies - -### Added -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) - -### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.9.0 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.9.0...v0.9.5) -- github.com/Azure/go-autorest/autorest/mocks: [v0.4.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.4.0...v0.4.1) -- golang.org/x/crypto: 75b2880 → 7f63de1 - -### Removed -_Nothing has changed._ - - - -# v1.20.0-alpha.3 - - -## Downloads for v1.20.0-alpha.3 - -### Source Code - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes.tar.gz) | 542cc9e0cd97732020491456402b6e2b4f54f2714007ee1374a7d363663a1b41e82b50886176a5313aaccfbfd4df2bc611d6b32d19961cdc98b5821b75d6b17c -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-src.tar.gz) | 5e5d725294e552fd1d14fd6716d013222827ac2d4e2d11a7a1fdefb77b3459bbeb69931f38e1597de205dd32a1c9763ab524c2af1551faef4f502ef0890f7fbf - -### Client binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | 60004939727c75d0f06adc4449e16b43303941937c0e9ea9aca7d947e93a5aed5d11e53d1fc94caeb988be66d39acab118d406dc2d6cead61181e1ced6d2be1a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 7edba9c4f1bf38fdf1fa5bff2856c05c0e127333ce19b17edf3119dc9b80462c027404a1f58a5eabf1de73a8f2f20aced043dda1fafd893619db1a188cda550c -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | db1818aa82d072cb3e32a2a988e66d76ecf7cebc6b8a29845fa2d6ec27f14a36e4b9839b1b7ed8c43d2da9cde00215eb672a7e8ee235d2e3107bc93c22e58d38 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | d2922e70d22364b1f5a1e94a0c115f849fe2575b231b1ba268f73a9d86fc0a9fbb78dc713446839a2593acf1341cb5a115992f350870f13c1a472bb107b75af7 -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 2e3ae20e554c7d4fc3a8afdfcafe6bbc81d4c5e9aea036357baac7a3fdc2e8098aa8a8c3dded3951667d57f667ce3fbf37ec5ae5ceb2009a569dc9002d3a92f9 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b54a34e572e6a86221577de376e6f7f9fcd82327f7fe94f2fc8d21f35d302db8a0f3d51e60dc89693999f5df37c96d0c3649a29f07f095efcdd59923ae285c95 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | 5be1b70dc437d3ba88cb0b89cd1bc555f79896c3f5b5f4fa0fb046a0d09d758b994d622ebe5cef8e65bba938c5ae945b81dc297f9dfa0d98f82ea75f344a3a0d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-386.tar.gz) | 88cf3f66168ef3bf9a5d3d2275b7f33799406e8205f2c202997ebec23d449aa4bb48b010356ab1cf52ff7b527b8df7c8b9947a43a82ebe060df83c3d21b7223a -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 87d2d4ea1829da8cfa1a705a03ea26c759a03bd1c4d8b96f2c93264c4d172bb63a91d9ddda65cdc5478b627c30ae8993db5baf8be262c157d83bffcebe85474e - -### Server binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 7af691fc0b13a937797912374e3b3eeb88d5262e4eb7d4ebe92a3b64b3c226cb049aedfd7e39f639f6990444f7bcf2fe58699cf0c29039daebe100d7eebf60de -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 557c47870ecf5c2090b2694c8f0c8e3b4ca23df5455a37945bd037bc6fb5b8f417bf737bb66e6336b285112cb52de0345240fdb2f3ce1c4fb335ca7ef1197f99 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 981de6cf7679d743cdeef1e894314357b68090133814801870504ef30564e32b5675e270db20961e9a731e35241ad9b037bdaf749da87b6c4ce8889eeb1c5855 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | 506578a21601ccff609ae757a55e68634c15cbfecbf13de972c96b32a155ded29bd71aee069c77f5f721416672c7a7ac0b8274de22bfd28e1ecae306313d96c5 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | af0cdcd4a77a7cc8060a076641615730a802f1f02dab084e41926023489efec6102d37681c70ab0dbe7440cd3e72ea0443719a365467985360152b9aae657375 - -### Node binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | 2d92c61596296279de1efae23b2b707415565d9d50cd61a7231b8d10325732b059bcb90f3afb36bef2575d203938c265572721e38df408e8792d3949523bd5d9 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | c298de9b5ac1b8778729a2d8e2793ff86743033254fbc27014333880b03c519de81691caf03aa418c729297ee8942ce9ec89d11b0e34a80576b9936015dc1519 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | daa3c65afda6d7aff206c1494390bbcc205c2c6f8db04c10ca967a690578a01c49d49c6902b85e7158f79fd4d2a87c5d397d56524a75991c9d7db85ac53059a7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | 05661908bb73bfcaf9c2eae96e9a6a793db5a7a100bce6df9e057985dd53a7a5248d72e81b6d13496bd38b9326c17cdb2edaf0e982b6437507245fb846e1efc6 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | 845e518e2c4ef0cef2c3b58f0b9ea5b5fe9b8a249717f789607752484c424c26ae854b263b7c0a004a8426feb9aa3683c177a9ed2567e6c3521f4835ea08c24a -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 530e536574ed2c3e5973d3c0f0fdd2b4d48ef681a7a7c02db13e605001669eeb4f4b8a856fc08fc21436658c27b377f5d04dbcb3aae438098abc953b6eaf5712 - -## Changelog since v1.20.0-alpha.2 - -## Changes by Kind - -### API Change - -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Feat: azure file migration go beta in 1.21. Feature gates CSIMigration to Beta (on by default) and CSIMigrationAzureFile to Beta (off by default since it requires installation of the AzureFile CSI Driver) + The in-tree AzureFile plugin "kubernetes.io/azure-file" is now deprecated and will be removed in 1.23. Users should enable CSIMigration + CSIMigrationAzureFile features and install the AzureFile CSI Driver (https://github.com/kubernetes-sigs/azurefile-csi-driver) to avoid disruption to existing Pod and PVC objects at that time. + Users should start using the AzureFile CSI Driver directly for any new volumes. ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -### Feature +### Failing Test -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] +- Kubelet: the HostPort implementation in dockershim was not taking into consideration the HostIP field, causing that the same HostPort can not be used with different IP addresses. + This bug causes the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### Bug or Regression -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) [SIG API Machinery, Release and Testing] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a panic in the disruption budget controller for PDB objects with invalid selectors ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) [SIG Apps] +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. + - in cases where the client specifies a timeout in the request URL, the overall request deadline is shortened now since the deadline is setup as soon as the request is received by the apiserver. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: fix bad token placeholder text in "config print *-defaults --help" ([#98839](https://github.com/kubernetes/kubernetes/pull/98839), [@Mattias-](https://github.com/Mattias-)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Truncates a message if it hits the NoteLengthLimit when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) [SIG Scheduling] +- We will no longer automatically delete all data when a failure is detected during creation of the volume data file on a CSI volume. Now we will only remove the data file and volume path. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) [SIG Storage] ### Other (Cleanup or Flake) -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] +- Fix the description of command line flags that can override --config ([#98254](https://github.com/kubernetes/kubernetes/pull/98254), [@changshuchao](https://github.com/changshuchao)) [SIG Scheduling] +- Migrate scheduler/taint_manager.go structured logging ([#98259](https://github.com/kubernetes/kubernetes/pull/98259), [@tanjing2020](https://github.com/tanjing2020)) [SIG Apps] +- Migrate staging/src/k8s.io/apiserver/pkg/admission logs to structured logging ([#98138](https://github.com/kubernetes/kubernetes/pull/98138), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] ## Dependencies ### Added -_Nothing has changed._ +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) ### Changed -_Nothing has changed._ +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/tools: c1934b7 → 113979e +- k8s.io/klog/v2: v2.4.0 → v2.5.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 ### Removed -_Nothing has changed._ +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) -# v1.20.0-alpha.2 +# v1.21.0-alpha.2 -## Downloads for v1.20.0-alpha.2 +## Downloads for v1.21.0-alpha.2 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes.tar.gz) | 45089a4d26d56a5d613ecbea64e356869ac738eca3cc71d16b74ea8ae1b4527bcc32f1dc35ff7aa8927e138083c7936603faf063121d965a2f0f8ba28fa128d8 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-src.tar.gz) | 646edd890d6df5858b90aaf68cc6e1b4589b8db09396ae921b5c400f2188234999e6c9633906692add08c6e8b4b09f12b2099132b0a7533443fb2a01cfc2bf81 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes.tar.gz) | 6836f6c8514253fe0831fd171fc4ed92eb6d9a773491c8dc82b90d171a1b10076bd6bfaea56ec1e199c5f46c273265bdb9f174f0b2d99c5af1de4c99b862329e +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-src.tar.gz) | d137694804741a05ab09e5f9a418448b66aba0146c028eafce61bcd9d7c276521e345ce9223ffbc703e8172041d58dfc56a3242a4df3686f24905a4541fcd306 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | c136273883e24a2a50b5093b9654f01cdfe57b97461d34885af4a68c2c4d108c07583c02b1cdf7f57f82e91306e542ce8f3bddb12fcce72b744458bc4796f8eb -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 6ec59f1ed30569fa64ddb2d0de32b1ae04cda4ffe13f339050a7c9d7c63d425ee6f6d963dcf82c17281c4474da3eaf32c08117669052872a8c81bdce2c8a5415 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | 7b40a4c087e2ea7f8d055f297fcd39a3f1cb6c866e7a3981a9408c3c3eb5363c648613491aad11bc7d44d5530b20832f8f96f6ceff43deede911fb74aafad35f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | cda9955feebea5acb8f2b5b87895d24894bbbbde47041453b1f926ebdf47a258ce0496aa27d06bcbf365b5615ce68a20d659b64410c54227216726e2ee432fca -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | f65bd9241c7eb88a4886a285330f732448570aea4ededaebeabcf70d17ea185f51bf8a7218f146ee09fb1adceca7ee71fb3c3683834f2c415163add820fba96e -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 1e377599af100a81d027d9199365fb8208d443a8e0a97affff1a79dc18796e14b78cb53d6e245c1c1e8defd0e050e37bf5f2a23c8a3ff45a6d18d03619709bf5 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 1cdee81478246aa7e7b80ae4efc7f070a5b058083ae278f59fad088b75a8052761b0e15ab261a6e667ddafd6a69fb424fc307072ed47941cad89a85af7aee93d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-386.tar.gz) | d8774167c87b6844c348aa15e92d5033c528d6ab9e95d08a7cb22da68bafd8e46d442cf57a5f6affad62f674c10ae6947d524b94108b5e450ca78f92656d63c0 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | f664b47d8daa6036f8154c1dc1f881bfe683bf57c39d9b491de3848c03d051c50c6644d681baf7f9685eae45f9ce62e4c6dfea2853763cfe8256a61bdd59d894 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | 9478b047a97717953f365c13a098feb7e3cb30a3df22e1b82aa945f2208dcc5cb90afc441ba059a3ae7aafb4ee000ec3a52dc65a8c043a5ac7255a391c875330 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 44c8dd4b1ddfc256d35786c8abf45b0eb5f0794f5e310d2efc865748adddc50e8bf38aa71295ae8a82884cb65f2e0b9b0737b000f96fd8f2d5c19971d7c4d8e8 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | e1291989892769de6b978c17b8612b94da6f3b735a4d895100af622ca9ebb968c75548afea7ab00445869625dd0da3afec979e333afbb445805f5d31c1c13cc7 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | 3c4bcb8cbe73822d68a2f62553a364e20bec56b638c71d0f58679b4f4b277d809142346f18506914e694f6122a3e0f767eab20b7b1c4dbb79e4c5089981ae0f1 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | 9389974a790268522e187f5ba5237f3ee4684118c7db76bc3d4164de71d8208702747ec333b204c7a78073ab42553cbbce13a1883fab4fec617e093b05fab332 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 63399e53a083b5af3816c28ff162c9de6b64c75da4647f0d6bbaf97afdf896823cb1e556f2abac75c6516072293026d3ff9f30676fd75143ac6ca3f4d21f4327 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 50898f197a9d923971ff9046c9f02779b57f7b3cea7da02f3ea9bab8c08d65a9c4a7531a2470fa14783460f52111a52b96ebf916c0a1d8215b4070e4e861c1b0 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-386.tar.gz) | a7743e839e1aa19f5ee20b6ee5000ac8ef9e624ac5be63bb574fad6992e4b9167193ed07e03c9bc524e88bfeed66c95341a38a03bff1b10bc9910345f33019f0 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | 5f1d19c230bd3542866d16051808d184e9dd3e2f8c001ed4cee7b5df91f872380c2bf56a3add8c9413ead9d8c369efce2bcab4412174df9b823d3592677bf74e ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | d6fcb4600be0beb9de222a8da64c35fe22798a0da82d41401d34d0f0fc7e2817512169524c281423d8f4a007cd77452d966317d5a1b67d2717a05ff346e8aa7d -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | 022a76cf10801f8afbabb509572479b68fdb4e683526fa0799cdbd9bab4d3f6ecb76d1d63d0eafee93e3edf6c12892d84b9c771ef2325663b95347728fa3d6c0 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 0679aadd60bbf6f607e5befad74b5267eb2d4c1b55985cc25a97e0f4c5efb7acbb3ede91bfa6a5a5713dae4d7a302f6faaf678fd6b359284c33d9a6aca2a08bb -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 9f2cfeed543b515eafb60d9765a3afff4f3d323c0a5c8a0d75e3de25985b2627817bfcbe59a9a61d969e026e2b861adb974a09eae75b58372ed736ceaaed2a82 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | 937258704d7b9dcd91f35f2d34ee9dd38c18d9d4e867408c05281bfbbb919ad012c95880bee84d2674761aa44cc617fb2fae1124cf63b689289286d6eac1c407 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | ef2cac10febde231aeb6f131e589450c560eeaab8046b49504127a091cddc17bc518c2ad56894a6a033033ab6fc6e121b1cc23691683bc36f45fe6b1dd8e0510 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | d11c9730307f08e80b2b8a7c64c3e9a9e43c622002e377dfe3a386f4541e24adc79a199a6f280f40298bb36793194fd44ed45defe8a3ee54a9cb1386bc26e905 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 28f8c32bf98ee1add7edf5d341c3bac1afc0085f90dcbbfb8b27a92087f13e2b53c327c8935ee29bf1dc3160655b32bbe3e29d5741a8124a3848a777e7d42933 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 99ae8d44b0de3518c27fa8bbddd2ecf053dfb789fb9d65f8a4ecf4c8331cf63d2f09a41c2bcd5573247d5f66a1b2e51944379df1715017d920d521b98589508a +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | f8c0e954a2dfc6845614488dadeed069cc7f3f08e33c351d7a77c6ef97867af590932e8576d12998a820a0e4d35d2eee797c764e2810f09ab1e90a5acaeaad33 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | 076165d745d47879de68f4404eaf432920884be48277eb409e84bf2c61759633bf3575f46b0995f1fc693023d76c0921ed22a01432e756d7f8d9e246a243b126 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 1ff2e2e3e43af41118cdfb70c778e15035bbb1aca833ffd2db83c4bcd44f55693e956deb9e65017ebf3c553f2820ad5cd05f5baa33f3d63f3e00ed980ea4dfed -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | b232c7359b8c635126899beee76998078eec7a1ef6758d92bcdebe8013b0b1e4d7b33ecbf35e3f82824fe29493400845257e70ed63c1635bfa36c8b3b4969f6f -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 51d415a068f554840f4c78d11a4fedebd7cb03c686b0ec864509b24f7a8667ebf54bb0a25debcf2b70f38be1e345e743f520695b11806539a55a3620ce21946f -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | b51c082d8af358233a088b632cf2f6c8cfe5421471c27f5dc9ba4839ae6ea75df25d84298f2042770097554c01742bb7686694b331ad9bafc93c86317b867728 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | 91b9d26620a2dde67a0edead0039814efccbdfd54594dda3597aaced6d89140dc92612ed0727bc21d63468efeef77c845e640153b09e39d8b736062e6eee0c76 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | c5456d50bfbe0d75fb150b3662ed7468a0abd3970792c447824f326894382c47bbd3a2cc5a290f691c8c09585ff6fe505ab86b4aff2b7e5ccee11b5e6354ae6c +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 335b5cd8672e053302fd94d932fb2fa2e48eeeb1799650b3f93acdfa635e03a8453637569ab710c46885c8317759f4c60aaaf24dca9817d9fa47500fe4a3ca53 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | 3ee87dbeed8ace9351ac89bdaf7274dd10b4faec3ceba0825f690ec7a2bb7eb7c634274a1065a0939eec8ff3e43f72385f058f4ec141841550109e775bc5eff9 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 6956f965b8d719b164214ec9195fdb2c776b907fe6d2c524082f00c27872a73475927fd7d2a994045ce78f6ad2aa5aeaf1eb5514df1810d2cfe342fd4e5ce4a1 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | 3b643aa905c709c57083c28dd9e8ffd88cb64466cda1499da7fc54176b775003e08b9c7a07b0964064df67c8142f6f1e6c13bfc261bd65fb064049920bfa57d0 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | b2e6d6fb0091f2541f9925018c2bdbb0138a95bab06b4c6b38abf4b7144b2575422263b78fb3c6fd09e76d90a25a8d35a6d4720dc169794d42c95aa22ecc6d5f -## Changelog since v1.20.0-alpha.1 +## Changelog since v1.21.0-alpha.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- Action-required: kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Action-required: kubeadm: remove the deprecated feature --experimental-kustomize from kubeadm commands. The feature was replaced with --experimental-patches in 1.19. To migrate see the --help description for the --experimental-patches flag. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] +- Remove the TokenRequest and TokenRequestProjection feature gates ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] ### API Change -- GPU metrics provided by kubelet are now disabled by default ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Status of v1beta1 CRDs without "preserveUnknownFields:false" will show violation "spec.preserveUnknownFields: Invalid value: true: must be false" ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) [SIG API Machinery] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl port-forward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] -### Feature +### Documentation -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Allow configuring AWS LoadBalancer health check protocol via service annotations ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kubeadm: do not throw errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. Print warnings instead. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- Send gce node startup scripts logs to console and journal ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) [SIG Cloud Provider and Node] -- Support kubectl delete orphan/foreground/background options ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) [SIG CLI and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### Bug or Regression -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Fix a bug where the endpoint slice controller was not mirroring the parent service labels to its corresponding endpoint slices ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) [SIG Apps and Network] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: warn but do not error out on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Port mapping allows to map the same `containerPort` to multiple `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Network and Node] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Fix azure file migration issue ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) [SIG Auth, Cloud Provider and Storage] +- Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix repeatedly acquire the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- Indentation of `Resource Quota` block in kubectl describe namespaces output gets correct. ([#97946](https://github.com/kubernetes/kubernetes/pull/97946), [@dty1er](https://github.com/dty1er)) [SIG CLI] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubelet should ignore cgroup driver check on Windows node. ([#97764](https://github.com/kubernetes/kubernetes/pull/97764), [@pacoxu](https://github.com/pacoxu)) [SIG Node and Windows] +- Make podTopologyHints protected by lock ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Readjust kubelet_containers_per_pod_count bucket ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) [SIG Instrumentation and Node] +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] ### Other (Cleanup or Flake) -- Added fine grained debugging to the intra-pod conformance test for helping easily resolve networking issues for nodes that might be unhealthy when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) [SIG Network and Testing] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Errors from staticcheck: - vendor/k8s.io/client-go/discovery/cached/memory/memcache_test.go:94:2: this value of g is never used (SA4006) ([#95098](https://github.com/kubernetes/kubernetes/pull/95098), [@phunziker](https://github.com/phunziker)) [SIG API Machinery] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Masks ceph RBD adminSecrets in logs when logLevel >= 4 ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) [SIG Storage] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove offensive words from kubectl cluster-info command ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) [SIG Architecture, CLI and Testing] -- The following new metrics are available. - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) [SIG Instrumentation, Network and Node] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Migrate generic_scheduler.go and types.go to structured logging. ([#98134](https://github.com/kubernetes/kubernetes/pull/98134), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Migrate proxy/winuserspace/proxier.go logs to structured logging ([#97941](https://github.com/kubernetes/kubernetes/pull/97941), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go logs to structured logging. ([#98252](https://github.com/kubernetes/kubernetes/pull/98252), [@lala123912](https://github.com/lala123912)) [SIG API Machinery and Auth] +- Migrate staging\src\k8s.io\apiserver\pkg\endpoints logs to structured logging ([#98093](https://github.com/kubernetes/kubernetes/pull/98093), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Node ([#96552](https://github.com/kubernetes/kubernetes/pull/96552), [@pandaamanda](https://github.com/pandaamanda)) [SIG Apps, Cloud Provider, Node and Scheduling] +- The kubectl alpha debug command was scheduled to be removed in v1.21. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] ## Dependencies ### Added -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) +_Nothing has changed._ ### Changed -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.5](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.5) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- k8s.io/kube-openapi: 6aeccd4 → 8b50664 -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.12 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → b3cf1e8 +- github.com/google/cadvisor: [v0.38.6 → v0.38.7](https://github.com/google/cadvisor/compare/v0.38.6...v0.38.7) +- k8s.io/gengo: 83324d8 → b6c5ce2 ### Removed _Nothing has changed._ -# v1.20.0-alpha.1 +# v1.21.0-alpha.1 -## Downloads for v1.20.0-alpha.1 +## Downloads for v1.21.0-alpha.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes.tar.gz) | e7daed6502ea07816274f2371f96fe1a446d0d7917df4454b722d9eb3b5ff6163bfbbd5b92dfe7a0c1d07328b8c09c4ae966e482310d6b36de8813aaf87380b5 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-src.tar.gz) | e91213a0919647a1215d4691a63b12d89a3e74055463a8ebd71dc1a4cabf4006b3660881067af0189960c8dab74f4a7faf86f594df69021901213ee5b56550ea +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes.tar.gz) | b2bacd5c3fc9f829e6269b7d2006b0c6e464ff848bb0a2a8f2fe52ad2d7c4438f099bd8be847d8d49ac6e4087f4d74d5c3a967acd798e0b0cb4d7a2bdb122997 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-src.tar.gz) | 518ac5acbcf23902fb1b902b69dbf3e86deca5d8a9b5f57488a15f185176d5a109558f3e4df062366af874eca1bcd61751ee8098b0beb9bcdc025d9a1c9be693 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | 1f3add5f826fa989820d715ca38e8864b66f30b59c1abeacbb4bfb96b4e9c694eac6b3f4c1c81e0ee3451082d44828cb7515315d91ad68116959a5efbdaef1e1 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-386.tar.gz) | c62acdc8993b0a950d4b0ce0b45473bf96373d501ce61c88adf4007afb15c1d53da8d53b778a7eccac6c1624f7fdda322be9f3a8bc2d80aaad7b4237c39f5eaf -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1203ababfe00f9bc5be5c059324c17160a96530c1379a152db33564bbe644ccdb94b30eea15a0655bd652efb17895a46c31bbba19d4f5f473c2a0ff62f6e551f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | 31860088596e12d739c7aed94556c2d1e217971699b950c8417a3cea1bed4e78c9ff1717b9f3943354b75b4641d4b906cd910890dbf4278287c0d224837d9a7d -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8d469f37fe20d6e15b5debc13cce4c22e8b7a4f6a4ac787006b96507a85ce761f63b28140d692c54b5f7deb08697f8d5ddb9bbfa8f5ac0d9241fc7de3a3fe3cd -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 0d62ee1729cd5884946b6c73701ad3a570fa4d642190ca0fe5c1db0fb0cba9da3ac86a948788d915b9432d28ab8cc499e28aadc64530b7d549ee752a6ed93ec1 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 0fc0420e134ec0b8e0ab2654e1e102cebec47b48179703f1e1b79d51ee0d6da55a4e7304d8773d3cf830341ac2fe3cede1e6b0460fd88f7595534e0730422d5a -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-386.tar.gz) | 3fb53b5260f4888c77c0e4ff602bbcf6bf38c364d2769850afe2b8d8e8b95f7024807c15e2b0d5603e787c46af8ac53492be9e88c530f578b8a389e3bd50c099 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 2f44c93463d6b5244ce0c82f147e7f32ec2233d0e29c64c3c5759e23533aebd12671bf63e986c0861e9736f9b5259bb8d138574a7c8c8efc822e35cd637416c0 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | eaa7aea84a5ed954df5ec710cbeb6ec88b46465f43cb3d09aabe2f714b84a050a50bf5736089f09dbf1090f2e19b44823d656c917e3c8c877630756c3026f2b6 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-386.tar.gz) | 47f74b8d46ad1779c5b0b5f15aa15d5513a504eeb6f53db4201fbe9ff8956cb986b7c1b0e9d50a99f78e9e2a7f304f3fc1cc2fa239296d9a0dd408eb6069e975 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1a148e282628b008c8abd03dd12ec177ced17584b5115d92cd33dd251e607097d42e9da8c7089bd947134b900f85eb75a4740b6a5dd580c105455b843559df39 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | d13d2feb73bd032dc01f7e2955b98d8215a39fe1107d037a73fa1f7d06c3b93ebaa53ed4952d845c64454ef3cca533edb97132d234d50b6fb3bcbd8a8ad990eb +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8252105a17b09a78e9ad2c024e4e401a69764ac869708a071aaa06f81714c17b9e7c5b2eb8efde33f24d0b59f75c5da607d5e1e72bdf12adfbb8c829205cd1c1 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 297a9082df4988389dc4be30eb636dff49f36f5d87047bab44745884e610f46a17ae3a08401e2cab155b7c439f38057bfd8288418215f7dd3bf6a49dbe61ea0e +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 04c06490dd17cd5dccfd92bafa14acf64280ceaea370d9635f23aeb6984d1beae6d0d1d1506edc6f30f927deeb149b989d3e482b47fbe74008b371f629656e79 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-386.tar.gz) | ec6e9e87a7d685f8751d7e58f24f417753cff5554a7229218cb3a08195d461b2e12409344950228e9fbbc92a8a06d35dd86242da6ff1e6652ec1fae0365a88c1 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 51039e6221d3126b5d15e797002ae01d4f0b10789c5d2056532f27ef13f35c5a2e51be27764fda68e8303219963126559023aed9421313bec275c0827fbcaf8a ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | ae82d14b1214e4100f0cc2c988308b3e1edd040a65267d0eddb9082409f79644e55387889e3c0904a12c710f91206e9383edf510990bee8c9ea2e297b6472551 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | 9a2a5828b7d1ddb16cc19d573e99a4af642f84129408e6203eeeb0558e7b8db77f3269593b5770b6a976fe9df4a64240ed27ad05a4bd43719e55fce1db0abf58 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | ed700dd226c999354ce05b73927388d36d08474c15333ae689427de15de27c84feb6b23c463afd9dd81993315f31eb8265938cfc7ecf6f750247aa42b9b33fa9 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | abb7a9d726538be3ccf5057a0c63ff9732b616e213c6ebb81363f0c49f1e168ce8068b870061ad7cba7ba1d49252f94cf00a5f68cec0f38dc8fce4e24edc5ca6 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 3a51888af1bfdd2d5b0101d173ee589c1f39240e4428165f5f85c610344db219625faa42f00a49a83ce943fb079be873b1a114a62003fae2f328f9bf9d1227a4 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | 4edf820930c88716263560275e3bd7fadb8dc3700b9f8e1d266562e356e0abeb1a913f536377dab91218e3940b447d6bf1da343b85da25c2256dc4dcde5798dd +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | b15213e53a8ab4ba512ce6ef9ad42dd197d419c61615cd23de344227fd846c90448d8f3d98e555b63ba5b565afa627cca6b7e3990ebbbba359c96f2391302df1 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | 5be29cca9a9358fc68351ee63e99d57dc2ffce6e42fc3345753dbbf7542ff2d770c4852424158540435fa6e097ce3afa9b13affc40c8b3b69fe8406798f8068f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | 89fd99ab9ce85db0b94b86709932105efc883cc93959cf7ea9a39e79a4acea23064d7010eeb577450cccabe521c04b7ba47bbec212ed37edeed7cb04bad34518 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 2fbc30862c77d247aa8d96ab9d1a144599505287b0033a3a2d0988958e7bb2f2e8b67f52c1fec74b4ec47d74ba22cd0f6cb5c4228acbaa72b1678d5fece0254d ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | d0f28e3c38ca59a7ff1bfecb48a1ce97116520355d9286afdca1200d346c10018f5bbdf890f130a388654635a2e83e908b263ed45f8a88defca52a7c1d0a7984 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | ed9d3f13028beb3be39bce980c966f82c4b39dc73beaae38cc075fea5be30b0309e555cb2af8196014f2cc9f0df823354213c314b4d6545ff6e30dd2d00ec90e -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | ad5b3268db365dcdded9a9a4bffc90c7df0f844000349accdf2b8fb5f1081e553de9b9e9fb25d5e8a4ef7252d51fa94ef94d36d2ab31d157854e164136f662c2 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | c4de2524e513996def5eeba7b83f7b406f17eaf89d4d557833a93bd035348c81fa9375dcd5c27cfcc55d73995449fc8ee504be1b3bd7b9f108b0b2f153cb05ae -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 9157b44e3e7bd5478af9f72014e54d1afa5cd19b984b4cd8b348b312c385016bb77f29db47f44aea08b58abf47d8a396b92a2d0e03f2fe8acdd30f4f9466cbdb -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 8b40a43c5e6447379ad2ee8aac06e8028555e1b370a995f6001018a62411abe5fbbca6060b3d1682c5cadc07a27d49edd3204e797af46368800d55f4ca8aa1de +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | 95658d321a0a371c0900b401d1469d96915310afbc4e4b9b11f031438bb188513b57d5a60b5316c3b0c18f541cda6f0ac42f59a76495f8abc743a067115da23a +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | f375acfb42aad6c65b833c270e7e3acfe9cd1d6b2441c33874e77faae263957f7acfe86f1b71f14298118595e4cc6952c7dea0c832f7f2e72428336f13034362 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | 43b4baccd58d74e7f48d096ab92f2bbbcdf47e30e7a3d2b56c6cc9f90002cfd4fefaac894f69bd5f9f4dbdb09a4749a77eb76b1b97d91746bd96fe94457879ab +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | e7962b522c6c7c14b9ee4c1d254d8bdd9846b2b33b0443fc9c4a41be6c40e5e6981798b720f0148f36263d5cc45d5a2bb1dd2f9ab2838e3d002e45b9bddeb7bf +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 49ebc97f01829e65f7de15be00b882513c44782eaadd1b1825a227e3bd3c73cc6aca8345af05b303d8c43aa2cb944a069755b2709effb8cc22eae621d25d4ba5 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 6e0fd7724b09e6befbcb53b33574e97f2db089f2eee4bbf391abb7f043103a5e6e32e3014c0531b88f9a3ca88887bbc68625752c44326f98dd53adb3a6d1bed8 -## Changelog since v1.20.0-alpha.0 +## Changelog since v1.20.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Azure blob disk feature(`kind`: `Shared`, `Dedicated`) has been deprecated, you should use `kind`: `Managed` in `kubernetes.io/azure-disk` storage class. ([#92905](https://github.com/kubernetes/kubernetes/pull/92905), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] - - CVE-2020-8559 (Medium): Privilege escalation from compromised node to cluster. See https://github.com/kubernetes/kubernetes/issues/92914 for more details. - The API Server will no longer proxy non-101 responses for upgrade requests. This could break proxied backends (such as an extension API server) that respond to upgrade requests with a non-101 response code. ([#92941](https://github.com/kubernetes/kubernetes/pull/92941), [@tallclair](https://github.com/tallclair)) [SIG API Machinery] + - Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] ## Changes by Kind ### Deprecation -- Kube-apiserver: the componentstatus API is deprecated. This API provided status of etcd, kube-scheduler, and kube-controller-manager components, but only worked when those components were local to the API server, and when kube-scheduler and kube-controller-manager exposed unsecured health endpoints. Instead of this API, etcd health is included in the kube-apiserver health check and kube-scheduler/kube-controller-manager health checks can be made directly against those components' health endpoints. ([#93570](https://github.com/kubernetes/kubernetes/pull/93570), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Cluster Lifecycle] -- Kubeadm: deprecate the "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. ([#92881](https://github.com/kubernetes/kubernetes/pull/92881), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt to `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kubeadm: deprecated command "alpha selfhosting pivot" is removed now. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] ### API Change -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- A new alpha-level field, `SupportsFsGroup`, has been introduced for CSIDrivers to allow them to specify whether they support volume ownership and permission modifications. The `CSIVolumeSupportFSGroup` feature gate must be enabled to allow this field to be used. ([#92001](https://github.com/kubernetes/kubernetes/pull/92001), [@huffmanca](https://github.com/huffmanca)) [SIG API Machinery, CLI and Storage] -- Added pod version skew strategy for seccomp profile to synchronize the deprecated annotations with the new API Server fields. Please see the corresponding section [in the KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190717-seccomp-ga.md#version-skew-strategy) for more detailed explanations. ([#91408](https://github.com/kubernetes/kubernetes/pull/91408), [@saschagrunert](https://github.com/saschagrunert)) [SIG Apps, Auth, CLI and Node] -- Adds the ability to disable Accelerator/GPU metrics collected by Kubelet ([#91930](https://github.com/kubernetes/kubernetes/pull/91930), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- Custom Endpoints are now mirrored to EndpointSlices by a new EndpointSliceMirroring controller. ([#91637](https://github.com/kubernetes/kubernetes/pull/91637), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Cloud Provider, Instrumentation, Network and Testing] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Generic ephemeral volumes, a new alpha feature under the `GenericEphemeralVolume` feature gate, provide a more flexible alternative to `EmptyDir` volumes: as with `EmptyDir`, volumes are created and deleted for each pod automatically by Kubernetes. But because the normal provisioning process is used (`PersistentVolumeClaim`), storage can be provided by third-party storage vendors and all of the usual volume features work. Volumes don't need to be empt; for example, restoring from snapshot is supported. ([#92784](https://github.com/kubernetes/kubernetes/pull/92784), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Instrumentation, Node, Scheduling, Storage and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Kubernetes is now built with golang 1.15.0-rc.1. - - The deprecated, legacy behavior of treating the CommonName field on X.509 serving certificates as a host name when no Subject Alternative Names are present is now disabled by default. It can be temporarily re-enabled by adding the value x509ignoreCN=0 to the GODEBUG environment variable. ([#93264](https://github.com/kubernetes/kubernetes/pull/93264), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The kube-controller-manager managed signers can now have distinct signing certificates and keys. See the help about `--cluster-signing-[signer-name]-{cert,key}-file`. `--cluster-signing-{cert,key}-file` is still the default. ([#90822](https://github.com/kubernetes/kubernetes/pull/90822), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps and Auth] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.tls[*].secretName` values are required to pass validation rules for Secret API object names. ([#93929](https://github.com/kubernetes/kubernetes/pull/93929), [@liggitt](https://github.com/liggitt)) [SIG Network] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] +- Change the APIVersion proto name of BoundObjectRef from aPIVersion to apiVersion. ([#97379](https://github.com/kubernetes/kubernetes/pull/97379), [@kebe7jun](https://github.com/kebe7jun)) [SIG Auth] +- Promote Immutable Secrets/ConfigMaps feature to Stable. + This allows to set `Immutable` field in Secrets or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] ### Feature -- ACTION REQUIRED : In CoreDNS v1.7.0, [metrics names have been changed](https://github.com/coredns/coredns/blob/master/notes/coredns-1.7.0.md#metric-changes) which will be backward incompatible with existing reporting formulas that use the old metrics' names. Adjust your formulas to the new names before upgrading. +- Add flag --lease-max-object-size and metric etcd_lease_object_counts for kube-apiserver to config and observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` + takes into consideration global ip addresses on loopback interfaces when: + - the host has default routes + - there are no global IPs on those interfaces. + in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] + +### Bug or Regression + +- ## Changelog - Kubeadm now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92651](https://github.com/kubernetes/kubernetes/pull/92651), [@rajansandeep](https://github.com/rajansandeep)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Add tags support for Azure File Driver ([#92825](https://github.com/kubernetes/kubernetes/pull/92825), [@ZeroMagic](https://github.com/ZeroMagic)) [SIG Cloud Provider and Storage] -- Added kube-apiserver metrics: apiserver_current_inflight_request_measures and, when API Priority and Fairness is enable, windowed_request_stats. ([#91177](https://github.com/kubernetes/kubernetes/pull/91177), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery, Instrumentation and Testing] -- Audit events for API requests to deprecated API versions now include a `"k8s.io/deprecated": "true"` audit annotation. If a target removal release is identified, the audit event includes a `"k8s.io/removal-release": "."` audit annotation as well. ([#92842](https://github.com/kubernetes/kubernetes/pull/92842), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Instrumentation] -- Cloud node-controller use InstancesV2 ([#91319](https://github.com/kubernetes/kubernetes/pull/91319), [@gongguan](https://github.com/gongguan)) [SIG Apps, Cloud Provider, Scalability and Storage] -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate the "--csr-only" and "--csr-dir" flags of the "kubeadm init phase certs" subcommands. Please use "kubeadm alpha certs generate-csr" instead. This new command allows you to generate new private keys and certificate signing requests for all the control-plane components, so that the certificates can be signed by an external CA. ([#92183](https://github.com/kubernetes/kubernetes/pull/92183), [@wallrj](https://github.com/wallrj)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. + ### General + - Fix priority expander falling back to a random choice even though there is a higher priority option to choose + - Clone `kubernetes/kubernetes` in `update-vendor.sh` shallowly, instead of fetching all revisions + - Speed up binpacking by reducing the number of PreFilter calls (call once per pod instead of #pods*#nodes times) + - Speed up finding unneeded nodes by 5x+ in very large clusters by reducing the number of PreFilter calls + - Expose `--max-nodes-total` as a metric + - Errors in `IncreaseSize` changed from type `apiError` to `cloudProviderError` + - Make `build-in-docker` and `test-in-docker` work on Linux systems with SELinux enabled + - Fix an error where existing nodes were not considered as destinations while finding place for pods in scale-down simulations + - Remove redundant log lines and reduce severity around parsing kubeEnv + - Don't treat nodes created by virtual kubelet as nodes from non-autoscaled node groups + - Remove redundant logging around calculating node utilization + - Add configurable `--network` and `--rm` flags for docker in `Makefile` + - Subtract DaemonSet pods' requests from node allocatable in the denominator while computing node utilization + - Include taints by condition when determining if a node is unready/still starting + - Fix `update-vendor.sh` to work on OSX and zsh + - Add best-effort eviction for DaemonSet pods while scaling down non-empty nodes + - Add build support for ARM64 - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: @mikedanese: - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. + ### AliCloud + - Add missing daemonsets and replicasets to ALI example cluster role - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. + ### Apache CloudStack + - Add support for Apache CloudStack - - make kube::util::find-binary not dependent on bazel-out/ structure + ### AWS + - Regenerate list of EC2 instances + - Fix pricing endpoint in AWS China Region - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node to pod pid isolation - Promote SupportPodPidsLimit to GA to provide ability to limit pids per pod ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node and Testing] -- Rename pod_preemption_metrics to preemption_metrics. ([#93256](https://github.com/kubernetes/kubernetes/pull/93256), [@ahg-g](https://github.com/ahg-g)) [SIG Instrumentation and Scheduling] -- Server-side apply behavior has been regularized in the case where a field is removed from the applied configuration. Removed fields which have no other owners are deleted from the live object, or reset to their default value if they have one. Safe ownership transfers, such as the transfer of a `replicas` field from a user to an HPA without resetting to the default value are documented in [Transferring Ownership](https://kubernetes.io/docs/reference/using-api/api-concepts/#transferring-ownership) ([#92661](https://github.com/kubernetes/kubernetes/pull/92661), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Testing] -- Set CSIMigrationvSphere feature gates to beta. - Users should enable CSIMigration + CSIMigrationvSphere features and install the vSphere CSI Driver (https://github.com/kubernetes-sigs/vsphere-csi-driver) to move workload from the in-tree vSphere plugin "kubernetes.io/vsphere-volume" to vSphere CSI Driver. + ### Azure + - Add optional jitter on initial VMSS VM cache refresh, keep the refreshes spread over time + - Serve from cache for the whole period of ongoing throttling + - Fix unwanted VMSS VMs cache invalidations + - Enforce setting the number of retries if cloud provider backoff is enabled + - Don't update capacity if VMSS provisioning state is updating + - Support allocatable resources overrides via VMSS tags + - Add missing stable labels in template nodes + - Proactively set instance status to deleting on node deletions - Requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15 ([#92816](https://github.com/kubernetes/kubernetes/pull/92816), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support a smooth upgrade from client-side apply to server-side apply without conflicts, as well as support the corresponding downgrade. ([#90187](https://github.com/kubernetes/kubernetes/pull/90187), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery and Testing] -- Trace output in apiserver logs is more organized and comprehensive. Traces are nested, and for all non-long running request endpoints, the entire filter chain is instrumented (e.g. authentication check is included). ([#88936](https://github.com/kubernetes/kubernetes/pull/88936), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Scheduling] -- `kubectl alpha debug` now supports debugging nodes by creating a debugging container running in the node's host namespaces. ([#92310](https://github.com/kubernetes/kubernetes/pull/92310), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] - -### Failing Test - -- Kube-proxy iptables min-sync-period defaults to 1 sec. Previously, it was 0. ([#92836](https://github.com/kubernetes/kubernetes/pull/92836), [@aojea](https://github.com/aojea)) [SIG Network] - -### Bug or Regression - -- A panic in the apiserver caused by the `informer-sync` health checker is now fixed. ([#93600](https://github.com/kubernetes/kubernetes/pull/93600), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery] -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Adding fix to the statefulset controller to wait for pvc deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) [SIG Apps] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Azure: per VMSS VMSS VMs cache to prevent throttling on clusters having many attached VMSS ([#93107](https://github.com/kubernetes/kubernetes/pull/93107), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp field of an audit event take - into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Auth and Instrumentation] -- Build/lib/release: Explicitly use '--platform' in building server images + ### Cluster API + - Migrate interaction with the API from using internal types to using Unstructured + - Improve tests to work better with constrained resources + - Add support for node autodiscovery + - Add support for `--cloud-config` + - Update group identifier to use for Cluster API annotations + + ### Exoscale + - Add support for Exoscale - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. + ### GCE + - Decrease the number of GCE Read Requests made while deleting nodes + - Base pricing of custom instances on their instance family type + - Add pricing information for missing machine types + - Add pricing information for different GPU types + - Ignore the new `topology.gke.io/zone` label when comparing groups + - Add missing stable labels to template nodes - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. + ### HuaweiCloud + - Add auto scaling group support + - Implement node group by AS + - Implement getting desired instance number of node group + - Implement increasing node group size + - Implement TemplateNodeInfo + - Implement caching instances - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. + ### IONOS + - Add support for IONOS - Before: - `FROM ${base_image}` + ### Kubemark + - Skip non-kubemark nodes while computing node infos for node groups. - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- CVE-2020-8557 (Medium): Node-local denial of service via container /etc/hosts file. See https://github.com/kubernetes/kubernetes/issues/93032 for more details. ([#92916](https://github.com/kubernetes/kubernetes/pull/92916), [@joelsmith](https://github.com/joelsmith)) [SIG Node] -- Do not add nodes labeled with kubernetes.azure.com/managed=false to backend pool of load balancer. ([#93034](https://github.com/kubernetes/kubernetes/pull/93034), [@matthias50](https://github.com/matthias50)) [SIG Cloud Provider] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Do not retry volume expansion if CSI driver returns FailedPrecondition error ([#92986](https://github.com/kubernetes/kubernetes/pull/92986), [@gnufied](https://github.com/gnufied)) [SIG Node and Storage] -- Dockershim security: pod sandbox now always run with `no-new-privileges` and `runtime/default` seccomp profile - dockershim seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90948](https://github.com/kubernetes/kubernetes/pull/90948), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Endpoint controller requeues service after an endpoint deletion event occurs to confirm that deleted endpoints are undesired to mitigate the effects of an out of sync endpoint cache. ([#93030](https://github.com/kubernetes/kubernetes/pull/93030), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] -- EndpointSlice controllers now return immediately if they encounter an error creating, updating, or deleting resources. ([#93908](https://github.com/kubernetes/kubernetes/pull/93908), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now copies labels from Endpoints to EndpointSlices. ([#93442](https://github.com/kubernetes/kubernetes/pull/93442), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now mirrors Endpoints that do not have a Service associated with them. ([#94171](https://github.com/kubernetes/kubernetes/pull/94171), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Ensure backoff step is set to 1 for Azure armclient. ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Eviction requests for pods that have a non-zero DeletionTimestamp will always succeed ([#91342](https://github.com/kubernetes/kubernetes/pull/91342), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Extended DSR loadbalancer feature in winkernel kube-proxy to HNS versions 9.3-9.max, 10.2+ ([#93080](https://github.com/kubernetes/kubernetes/pull/93080), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Fix HandleCrash order ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG API Machinery] -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug where loadbalancer deletion gets stuck because of missing resource group #75198 ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) [SIG Cloud Provider] -- Fix calling AttachDisk on a previously attached EBS volume ([#93567](https://github.com/kubernetes/kubernetes/pull/93567), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider, Storage and Testing] -- Fix detection of image filesystem, disk metrics for devicemapper, detection of OOM Kills on 5.0+ linux kernels. ([#92919](https://github.com/kubernetes/kubernetes/pull/92919), [@dashpole](https://github.com/dashpole)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix instance not found issues when an Azure Node is recreated in a short time ([#93316](https://github.com/kubernetes/kubernetes/pull/93316), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix kube-apiserver /readyz to contain "informer-sync" check ensuring that internal informers are synced. ([#93670](https://github.com/kubernetes/kubernetes/pull/93670), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in EndpointSliceTracker for EndpointSliceMirroring controller. ([#93441](https://github.com/kubernetes/kubernetes/pull/93441), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: determine the correct ip config based on ip family ([#93043](https://github.com/kubernetes/kubernetes/pull/93043), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- Fix: initial delay in mounting azure disk & file ([#93052](https://github.com/kubernetes/kubernetes/pull/93052), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed Ceph RBD volume expansion when no ceph.conf exists ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) [SIG Storage] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a bug whereby the allocation of reusable CPUs and devices was not being honored when the TopologyManager was enabled ([#93189](https://github.com/kubernetes/kubernetes/pull/93189), [@klueska](https://github.com/klueska)) [SIG Node] -- Fixed a panic in kubectl debug when pod has multiple init containers or ephemeral containers ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) [SIG CLI] -- Fixed a regression that sometimes prevented `kubectl portforward` to work when TCP and UDP services were configured on the same port ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) [SIG CLI] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixed memory leak in endpointSliceTracker ([#92838](https://github.com/kubernetes/kubernetes/pull/92838), [@tnqn](https://github.com/tnqn)) [SIG Apps and Network] -- Fixed node data lost in kube-scheduler for clusters with imbalance on number of nodes across zones ([#93355](https://github.com/kubernetes/kubernetes/pull/93355), [@maelk](https://github.com/maelk)) [SIG Scheduling] -- Fixed the EndpointSliceController to correctly create endpoints for IPv6-only pods. + ### Magnum + - Add Magnum support in the Cluster Autoscaler helm chart - Fixed the EndpointController to allow IPv6 headless services, if the IPv6DualStack - feature gate is enabled, by specifying `ipFamily: IPv6` on the service. (This already - worked with the EndpointSliceController.) ([#91399](https://github.com/kubernetes/kubernetes/pull/91399), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Fixes a bug evicting pods after a taint with a limited tolerationSeconds toleration is removed from a node ([#93722](https://github.com/kubernetes/kubernetes/pull/93722), [@liggitt](https://github.com/liggitt)) [SIG Apps and Node] -- Fixes a bug where EndpointSlices would not be recreated after rapid Service recreation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Fixes a race condition in kubelet pod handling ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) [SIG Node] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes an issue that can result in namespaced custom resources being orphaned when their namespace is deleted, if the CRD defining the custom resource is removed concurrently with namespaces being deleted, then recreated. ([#93790](https://github.com/kubernetes/kubernetes/pull/93790), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Increased maximum IOPS of AWS EBS io1 volumes to 64,000 (current AWS maximum). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) [SIG Cloud Provider and Storage] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-aggregator certificates are dynamically loaded on change from disk ([#92791](https://github.com/kubernetes/kubernetes/pull/92791), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] -- Kube-apiserver: fixed a bug returning inconsistent results from list requests which set a field or label selector and set a paging limit ([#94002](https://github.com/kubernetes/kubernetes/pull/94002), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Kube-apiserver: jsonpath expressions with consecutive recursive descent operators are no longer evaluated for custom resource printer columns ([#93408](https://github.com/kubernetes/kubernetes/pull/93408), [@joelsmith](https://github.com/joelsmith)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kube-up now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92718](https://github.com/kubernetes/kubernetes/pull/92718), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cloud Provider] -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove duplicate DNS names and IP addresses from generated certificates ([#92753](https://github.com/kubernetes/kubernetes/pull/92753), [@QianChenglong](https://github.com/QianChenglong)) [SIG Cluster Lifecycle] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- Kubelet: fix race condition in pluginWatcher ([#93622](https://github.com/kubernetes/kubernetes/pull/93622), [@knight42](https://github.com/knight42)) [SIG Node] -- Kuberuntime security: pod sandbox now always runs with `runtime/default` seccomp profile - kuberuntime seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90949](https://github.com/kubernetes/kubernetes/pull/90949), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- NONE ([#71269](https://github.com/kubernetes/kubernetes/pull/71269), [@DeliangFan](https://github.com/DeliangFan)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Pods with invalid Affinity/AntiAffinity LabelSelectors will now fail scheduling when these plugins are enabled ([#93660](https://github.com/kubernetes/kubernetes/pull/93660), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Reverted devicemanager for Windows node added in 1.19rc1. ([#93263](https://github.com/kubernetes/kubernetes/pull/93263), [@liggitt](https://github.com/liggitt)) [SIG Node and Windows] -- Scheduler bugfix: Scheduler doesn't lose pod information when nodes are quickly recreated. This could happen when nodes are restarted or quickly recreated reusing a nodename. ([#93938](https://github.com/kubernetes/kubernetes/pull/93938), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scalability, Scheduling and Testing] -- The EndpointSlice controller now waits for EndpointSlice and Node caches to be synced before starting. ([#94086](https://github.com/kubernetes/kubernetes/pull/94086), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- The terminationGracePeriodSeconds from pod spec is respected for the mirror pod. ([#92442](https://github.com/kubernetes/kubernetes/pull/92442), [@tedyu](https://github.com/tedyu)) [SIG Node and Testing] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Updated Cluster Autoscaler to 1.19.0; ([#93577](https://github.com/kubernetes/kubernetes/pull/93577), [@vivekbagade](https://github.com/vivekbagade)) [SIG Autoscaling and Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] + ### Packet + - Allow empty nodepools + - Add support for multiple nodepools + - Add pricing support + + ## Image + Image: `k8s.gcr.io/autoscaling/cluster-autoscaler:v1.20.0` ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) [SIG Cloud Provider] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Add limited lines to log when having tail option ([#93920](https://github.com/kubernetes/kubernetes/pull/93920), [@zhouya0](https://github.com/zhouya0)) [SIG Node] +- Avoid systemd-logind loading configuration warning ([#97950](https://github.com/kubernetes/kubernetes/pull/97950), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Copy annotations with empty value when deployment rolls back ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Detach volumes from vSphere nodes not tracked by attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Fix kubectl label error when local=true is set. ([#97440](https://github.com/kubernetes/kubernetes/pull/97440), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Fix Azure file share not deleted issue when the namespace is deleted ([#97417](https://github.com/kubernetes/kubernetes/pull/97417), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix kubectl-convert import known versions ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) [SIG CLI and Testing] +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Kubeadm now installs version 3.4.13 of etcd when creating a cluster with v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: avoid detection of the container runtime for commands that do not need it ([#97625](https://github.com/kubernetes/kubernetes/pull/97625), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Performance regression [#97685](https://github.com/kubernetes/kubernetes/issues/97685) has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- The current version of the container image publicly exposed IP serving a /metrics endpoint to the Internet. The new version of the container image serves /metrics endpoint on a different port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] - Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.rules[*].http` values are now validated consistently when the `host` field contains a wildcard. ([#93954](https://github.com/kubernetes/kubernetes/pull/93954), [@Miciah](https://github.com/Miciah)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Storage and Testing] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- Warning about using a deprecated volume plugin is logged only once. ([#96751](https://github.com/kubernetes/kubernetes/pull/96751), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] ### Other (Cleanup or Flake) -- --cache-dir sets cache directory for both http and discovery, defaults to $HOME/.kube/cache ([#92910](https://github.com/kubernetes/kubernetes/pull/92910), [@soltysh](https://github.com/soltysh)) [SIG API Machinery and CLI] -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.2 and debian-iptables@v12.1.1 ([#93667](https://github.com/kubernetes/kubernetes/pull/93667), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.3 and debian-iptables@v12.1.2 ([#93916](https://github.com/kubernetes/kubernetes/pull/93916), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to go-runner:buster-v2.0.0 ([#94167](https://github.com/kubernetes/kubernetes/pull/94167), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Fix kubelet to properly log when a container is started. Before, sometimes the log said that a container is dead and was restarted when it was started for the first time. This only happened when using pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) [SIG Node] -- Fix: license issue in blob disk feature ([#92824](https://github.com/kubernetes/kubernetes/pull/92824), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixes the flooding warning messages about setting volume ownership for configmap/secret volumes ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) [SIG Instrumentation, Node and Storage] -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Kube-up: defaults to limiting critical pods to the kube-system namespace to match behavior prior to 1.17 ([#93121](https://github.com/kubernetes/kubernetes/pull/93121), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider and Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove support for the "ci/k8s-master" version label. This label has been removed in the Kubernetes CI release process and would no longer work in kubeadm. You can use the "ci/latest" version label instead. See kubernetes/test-infra#18517 ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubernetes is now built with go1.15.0 ([#93939](https://github.com/kubernetes/kubernetes/pull/93939), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Kubernetes is now built with go1.15.0-rc.2 ([#93827](https://github.com/kubernetes/kubernetes/pull/93827), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update Golang to v1.14.5 - - Update repo-infra to 0.0.7 (to support go1.14.5 and go1.13.13) - - Includes: - - bazelbuild/bazel-toolchains@3.3.2 - - bazelbuild/rules_go@v0.22.7 ([#93088](https://github.com/kubernetes/kubernetes/pull/93088), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update Golang to v1.14.6 - - Update repo-infra to 0.0.8 (to support go1.14.6 and go1.13.14) - - Includes: - - bazelbuild/bazel-toolchains@3.4.0 - - bazelbuild/rules_go@v0.22.8 ([#93198](https://github.com/kubernetes/kubernetes/pull/93198), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.9 ([#92349](https://github.com/kubernetes/kubernetes/pull/92349), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Delete deprecated mixed protocol annotation ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: fix a whitespace issue in the output of the "kubeadm join" command shown as the output of "kubeadm init" and "kubeadm token create --print-join-command" ([#97413](https://github.com/kubernetes/kubernetes/pull/97413), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: improve the error messaging when the user provides an invalid discovery token CA certificate hash. ([#97290](https://github.com/kubernetes/kubernetes/pull/97290), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Migrate log messages in pkg/scheduler/{scheduler.go,factory.go} to structured logging ([#97509](https://github.com/kubernetes/kubernetes/pull/97509), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- Migrate proxy/iptables/proxier.go logs to structured logging ([#97678](https://github.com/kubernetes/kubernetes/pull/97678), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some scheduler log messages to structured logging ([#97349](https://github.com/kubernetes/kubernetes/pull/97349), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- NONE ([#97167](https://github.com/kubernetes/kubernetes/pull/97167), [@geegeea](https://github.com/geegeea)) [SIG Node] +- NetworkPolicy validation framework optimizations for rapidly verifying CNI's work correctly across several pods and namespaces ([#91592](https://github.com/kubernetes/kubernetes/pull/91592), [@jayunit100](https://github.com/jayunit100)) [SIG Network, Storage and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Scheduler plugin validation now provides all errors detected instead of the first one. ([#96745](https://github.com/kubernetes/kubernetes/pull/96745), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node, Scheduling and Testing] +- Storage related e2e testsuite redesign & cleanup ([#96573](https://github.com/kubernetes/kubernetes/pull/96573), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Storage and Testing] +- The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- The `AttachVolumeLimit` feature gate that is GA since v1.17 is now removed. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Storage] +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior`(locked to false) features have been promoted to GA. + To prevent control plane nodes being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Apps, Cloud Provider and Network] + +### Uncategorized + +- Adding Brazilian Portuguese translation for kubectl ([#61595](https://github.com/kubernetes/kubernetes/pull/61595), [@cpanato](https://github.com/cpanato)) [SIG CLI] ## Dependencies ### Added -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/yuin/goldmark: [v1.1.27](https://github.com/yuin/goldmark/tree/v1.1.27) -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 +_Nothing has changed._ ### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.0](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.0) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.0](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.0) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/hcsshim: [v0.8.9 → 5eafd15](https://github.com/Microsoft/hcsshim/compare/v0.8.9...5eafd15) -- github.com/cilium/ebpf: [9f1617e → 1c8d4c9](https://github.com/cilium/ebpf/compare/9f1617e...1c8d4c9) -- github.com/containerd/cgroups: [bf292b2 → 0dbf7f0](https://github.com/containerd/cgroups/compare/bf292b2...0dbf7f0) -- github.com/coredns/corefile-migration: [v1.0.8 → v1.0.10](https://github.com/coredns/corefile-migration/compare/v1.0.8...v1.0.10) -- github.com/evanphx/json-patch: [e83c0a1 → v4.9.0+incompatible](https://github.com/evanphx/json-patch/compare/e83c0a1...v4.9.0) -- github.com/google/cadvisor: [8450c56 → v0.37.0](https://github.com/google/cadvisor/compare/8450c56...v0.37.0) -- github.com/json-iterator/go: [v1.1.9 → v1.1.10](https://github.com/json-iterator/go/compare/v1.1.9...v1.1.10) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [1b94395 → 819fcc6](https://github.com/opencontainers/runc/compare/1b94395...819fcc6) -- github.com/prometheus/client_golang: [v1.6.0 → v1.7.1](https://github.com/prometheus/client_golang/compare/v1.6.0...v1.7.1) -- github.com/prometheus/common: [v0.9.1 → v0.10.0](https://github.com/prometheus/common/compare/v0.9.1...v0.10.0) -- github.com/prometheus/procfs: [v0.0.11 → v0.1.3](https://github.com/prometheus/procfs/compare/v0.0.11...v0.1.3) -- github.com/rubiojr/go-vhd: [0bfd3b3 → 02e2102](https://github.com/rubiojr/go-vhd/compare/0bfd3b3...02e2102) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/urfave/cli: [v1.22.1 → v1.22.2](https://github.com/urfave/cli/compare/v1.22.1...v1.22.2) -- go.etcd.io/etcd: 54ba958 → dd1b699 -- golang.org/x/crypto: bac4c82 → 75b2880 -- golang.org/x/mod: v0.1.0 → v0.3.0 -- golang.org/x/net: d3edc99 → ab34263 -- golang.org/x/tools: c00d67e → c1934b7 -- k8s.io/kube-openapi: 656914f → 6aeccd4 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: 6e3d28b → d5654de +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.38.6](https://github.com/google/cadvisor/compare/v0.38.5...v0.38.6) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- k8s.io/system-validators: v1.2.0 → v1.3.0 ### Removed -- github.com/godbus/dbus: [ade71ed](https://github.com/godbus/dbus/tree/ade71ed) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- sigs.k8s.io/structured-merge-diff/v3: v3.0.0 -- vbom.ml/util: db5cfe1 +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 diff --git a/content/en/docs/tasks/access-application-cluster/access-cluster.md b/content/en/docs/tasks/access-application-cluster/access-cluster.md index 7f74320118a3d..23d4f133a66a3 100644 --- a/content/en/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/access-cluster.md @@ -231,7 +231,7 @@ You have several options for connecting to nodes, pods and services from outside - Use a service with type `NodePort` or `LoadBalancer` to make the service reachable outside the cluster. See the [services](/docs/concepts/services-networking/service/) and [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) documentation. - - Depending on your cluster environment, this may just expose the service to your corporate network, + - Depending on your cluster environment, this may only expose the service to your corporate network, or it may expose it to the internet. Think about whether the service being exposed is secure. Does it do its own authentication? - Place pods behind services. To access one specific pod from a set of replicas, such as for debugging, @@ -280,10 +280,10 @@ at `https://104.197.5.247/api/v1/namespaces/kube-system/services/elasticsearch-l #### Manually constructing apiserver proxy URLs -As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL: +As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you append to the service's proxy URL: `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`service_name[:port_name]`*`/proxy` -If you haven't specified a name for your port, you don't have to specify *port_name* in the URL. +If you haven't specified a name for your port, you don't have to specify *port_name* in the URL. You can also use the port number in place of the *port_name* for both named and unnamed ports. By default, the API server proxies to your service using http. To use https, prefix the service name with `https:`: `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`https:service_name:[port_name]`*`/proxy` @@ -291,9 +291,9 @@ By default, the API server proxies to your service using http. To use https, pre The supported formats for the name segment of the URL are: * `` - proxies to the default or unnamed port using http -* `:` - proxies to the specified port using http +* `:` - proxies to the specified port name or port number using http * `https::` - proxies to the default or unnamed port using https (note the trailing colon) -* `https::` - proxies to the specified port using https +* `https::` - proxies to the specified port name or port number using https ##### Examples @@ -357,7 +357,7 @@ There are several different proxies you may encounter when using Kubernetes: - proxies UDP and TCP - does not understand HTTP - provides load balancing - - is just used to reach services + - is only used to reach services 1. A Proxy/Load-balancer in front of apiserver(s): diff --git a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index 62ddbdcbbc6d6..0a6d352d2c17e 100644 --- a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -7,7 +7,7 @@ min-kubernetes-server-version: v1.10 -This page shows how to use `kubectl port-forward` to connect to a Redis +This page shows how to use `kubectl port-forward` to connect to a MongoDB server running in a Kubernetes cluster. This type of connection can be useful for database debugging. @@ -19,25 +19,25 @@ for database debugging. * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -* Install [redis-cli](http://redis.io/topics/rediscli). +* Install [MongoDB Shell](https://www.mongodb.com/try/download/shell). -## Creating Redis deployment and service +## Creating MongoDB deployment and service -1. Create a Deployment that runs Redis: +1. Create a Deployment that runs MongoDB: ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml ``` The output of a successful command verifies that the deployment was created: ``` - deployment.apps/redis-master created + deployment.apps/mongo created ``` View the pod status to check that it is ready: @@ -49,8 +49,8 @@ for database debugging. The output displays the pod created: ``` - NAME READY STATUS RESTARTS AGE - redis-master-765d459796-258hz 1/1 Running 0 50s + NAME READY STATUS RESTARTS AGE + mongo-75f59d57f4-4nd6q 1/1 Running 0 2m4s ``` View the Deployment's status: @@ -62,8 +62,8 @@ for database debugging. The output displays that the Deployment was created: ``` - NAME READY UP-TO-DATE AVAILABLE AGE - redis-master 1/1 1 1 55s + NAME READY UP-TO-DATE AVAILABLE AGE + mongo 1/1 1 1 2m21s ``` The Deployment automatically manages a ReplicaSet. @@ -76,50 +76,50 @@ for database debugging. The output displays that the ReplicaSet was created: ``` - NAME DESIRED CURRENT READY AGE - redis-master-765d459796 1 1 1 1m + NAME DESIRED CURRENT READY AGE + mongo-75f59d57f4 1 1 1 3m12s ``` -2. Create a Service to expose Redis on the network: +2. Create a Service to expose MongoDB on the network: ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml ``` The output of a successful command verifies that the Service was created: ``` - service/redis-master created + service/mongo created ``` Check the Service created: ```shell - kubectl get service redis-master + kubectl get service mongo ``` The output displays the service created: ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - redis-master ClusterIP 10.0.0.213 6379/TCP 27s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + mongo ClusterIP 10.96.41.183 27017/TCP 11s ``` -3. Verify that the Redis server is running in the Pod, and listening on port 6379: +3. Verify that the MongoDB server is running in the Pod, and listening on port 27017: ```shell - # Change redis-master-765d459796-258hz to the name of the Pod - kubectl get pod redis-master-765d459796-258hz --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' + # Change mongo-75f59d57f4-4nd6q to the name of the Pod + kubectl get pod mongo-75f59d57f4-4nd6q --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' ``` - The output displays the port for Redis in that Pod: + The output displays the port for MongoDB in that Pod: ``` - 6379 + 27017 ``` - (this is the TCP port allocated to Redis on the internet). + (this is the TCP port allocated to MongoDB on the internet). ## Forward a local port to a port on the Pod @@ -127,39 +127,39 @@ for database debugging. ```shell - # Change redis-master-765d459796-258hz to the name of the Pod - kubectl port-forward redis-master-765d459796-258hz 7000:6379 + # Change mongo-75f59d57f4-4nd6q to the name of the Pod + kubectl port-forward mongo-75f59d57f4-4nd6q 28015:27017 ``` which is the same as ```shell - kubectl port-forward pods/redis-master-765d459796-258hz 7000:6379 + kubectl port-forward pods/mongo-75f59d57f4-4nd6q 28015:27017 ``` or ```shell - kubectl port-forward deployment/redis-master 7000:6379 + kubectl port-forward deployment/mongo 28015:27017 ``` or ```shell - kubectl port-forward replicaset/redis-master 7000:6379 + kubectl port-forward replicaset/mongo-75f59d57f4 28015:27017 ``` or ```shell - kubectl port-forward service/redis-master 7000:redis + kubectl port-forward service/mongo 28015:27017 ``` Any of the above commands works. The output is similar to this: ``` - Forwarding from 127.0.0.1:7000 -> 6379 - Forwarding from [::1]:7000 -> 6379 + Forwarding from 127.0.0.1:28015 -> 27017 + Forwarding from [::1]:28015 -> 27017 ``` {{< note >}} @@ -168,22 +168,22 @@ for database debugging. {{< /note >}} -2. Start the Redis command line interface: +2. Start the MongoDB command line interface: ```shell - redis-cli -p 7000 + mongosh --port 28015 ``` -3. At the Redis command line prompt, enter the `ping` command: +3. At the MongoDB command line prompt, enter the `ping` command: ``` - ping + db.runCommand( { ping: 1 } ) ``` A successful ping request returns: ``` - PONG + { ok: 1 } ``` ### Optionally let _kubectl_ choose the local port {#let-kubectl-choose-local-port} @@ -193,15 +193,22 @@ the local port and thus relieve you from having to manage local port conflicts, the slightly simpler syntax: ```shell -kubectl port-forward deployment/redis-master :6379 +kubectl port-forward deployment/mongo :27017 +``` + +The output is similar to this: + +``` +Forwarding from 127.0.0.1:63753 -> 27017 +Forwarding from [::1]:63753 -> 27017 ``` The `kubectl` tool finds a local port number that is not in use (avoiding low ports numbers, because these might be used by other applications). The output is similar to: ``` -Forwarding from 127.0.0.1:62162 -> 6379 -Forwarding from [::1]:62162 -> 6379 +Forwarding from 127.0.0.1:63753 -> 27017 +Forwarding from [::1]:63753 -> 27017 ``` @@ -209,8 +216,8 @@ Forwarding from [::1]:62162 -> 6379 ## Discussion -Connections made to local port 7000 are forwarded to port 6379 of the Pod that -is running the Redis server. With this connection in place, you can use your +Connections made to local port 28015 are forwarded to port 27017 of the Pod that +is running the MongoDB server. With this connection in place, you can use your local workstation to debug the database that is running in the Pod. {{< note >}} diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-api.md b/content/en/docs/tasks/administer-cluster/access-cluster-api.md index ffe200b118200..0275cadabff72 100644 --- a/content/en/docs/tasks/administer-cluster/access-cluster-api.md +++ b/content/en/docs/tasks/administer-cluster/access-cluster-api.md @@ -192,7 +192,7 @@ func main() { } ``` -If the application is deployed as a Pod in the cluster, see [Accessing the API from within a Pod](#accessing-the-api-from-within-a-pod). +If the application is deployed as a Pod in the cluster, see [Accessing the API from within a Pod](/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod). #### Python client @@ -215,7 +215,7 @@ for i in ret.items: #### Java client -* To install the [Java Client](https://github.com/kubernetes-client/java), simply execute : +To install the [Java Client](https://github.com/kubernetes-client/java), run: ```shell # Clone java library @@ -352,102 +352,6 @@ exampleWithKubeConfig = do >>= print ``` +## {{% heading "whatsnext" %}} -### Accessing the API from within a Pod - -When accessing the API from within a Pod, locating and authenticating -to the API server are slightly different to the external client case described above. - -The easiest way to use the Kubernetes API from a Pod is to use -one of the official [client libraries](/docs/reference/using-api/client-libraries/). These -libraries can automatically discover the API server and authenticate. - -#### Using Official Client Libraries - -From within a Pod, the recommended ways to connect to the Kubernetes API are: - - - For a Go client, use the official [Go client library](https://github.com/kubernetes/client-go/). - The `rest.InClusterConfig()` function handles API host discovery and authentication automatically. - See [an example here](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go). - - - For a Python client, use the official [Python client library](https://github.com/kubernetes-client/python/). - The `config.load_incluster_config()` function handles API host discovery and authentication automatically. - See [an example here](https://github.com/kubernetes-client/python/blob/master/examples/in_cluster_config.py). - - - There are a number of other libraries available, please refer to the [Client Libraries](/docs/reference/using-api/client-libraries/) page. - -In each case, the service account credentials of the Pod are used to communicate -securely with the API server. - -#### Directly accessing the REST API - -While running in a Pod, the Kubernetes apiserver is accessible via a Service named -`kubernetes` in the `default` namespace. Therefore, Pods can use the -`kubernetes.default.svc` hostname to query the API server. Official client libraries -do this automatically. - -The recommended way to authenticate to the API server is with a -[service account](/docs/tasks/configure-pod-container/configure-service-account/) credential. By default, a Pod -is associated with a service account, and a credential (token) for that -service account is placed into the filesystem tree of each container in that Pod, -at `/var/run/secrets/kubernetes.io/serviceaccount/token`. - -If available, a certificate bundle is placed into the filesystem tree of each -container at `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`, and should be -used to verify the serving certificate of the API server. - -Finally, the default namespace to be used for namespaced API operations is placed in a file -at `/var/run/secrets/kubernetes.io/serviceaccount/namespace` in each container. - -#### Using kubectl proxy - -If you would like to query the API without an official client library, you can run `kubectl proxy` -as the [command](/docs/tasks/inject-data-application/define-command-argument-container/) -of a new sidecar container in the Pod. This way, `kubectl proxy` will authenticate -to the API and expose it on the `localhost` interface of the Pod, so that other containers -in the Pod can use it directly. - -#### Without using a proxy - -It is possible to avoid using the kubectl proxy by passing the authentication token -directly to the API server. The internal certificate secures the connection. - -```shell -# Point to the internal API server hostname -APISERVER=https://kubernetes.default.svc - -# Path to ServiceAccount token -SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount - -# Read this Pod's namespace -NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) - -# Read the ServiceAccount bearer token -TOKEN=$(cat ${SERVICEACCOUNT}/token) - -# Reference the internal certificate authority (CA) -CACERT=${SERVICEACCOUNT}/ca.crt - -# Explore the API with TOKEN -curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X GET ${APISERVER}/api -``` - -The output will be similar to this: - -```json -{ - "kind": "APIVersions", - "versions": [ - "v1" - ], - "serverAddressByClientCIDRs": [ - { - "clientCIDR": "0.0.0.0/0", - "serverAddress": "10.0.1.149:443" - } - ] -} -``` - - - +* [Accessing the Kubernetes API from a Pod](/docs/tasks/run-application/access-api-from-pod/) diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-services.md b/content/en/docs/tasks/administer-cluster/access-cluster-services.md index c318a3df35388..927e05b77a467 100644 --- a/content/en/docs/tasks/administer-cluster/access-cluster-services.md +++ b/content/en/docs/tasks/administer-cluster/access-cluster-services.md @@ -31,7 +31,7 @@ You have several options for connecting to nodes, pods and services from outside - Use a service with type `NodePort` or `LoadBalancer` to make the service reachable outside the cluster. See the [services](/docs/concepts/services-networking/service/) and [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) documentation. - - Depending on your cluster environment, this may just expose the service to your corporate network, + - Depending on your cluster environment, this may only expose the service to your corporate network, or it may expose it to the internet. Think about whether the service being exposed is secure. Does it do its own authentication? - Place pods behind services. To access one specific pod from a set of replicas, such as for debugging, @@ -83,7 +83,7 @@ See [Access Clusters Using the Kubernetes API](/docs/tasks/administer-cluster/ac #### Manually constructing apiserver proxy URLs -As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL: +As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you append to the service's proxy URL: `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`[https:]service_name[:port_name]`*`/proxy` If you haven't specified a name for your port, you don't have to specify *port_name* in the URL. diff --git a/content/en/docs/tasks/administer-cluster/certificates.md b/content/en/docs/tasks/administer-cluster/certificates.md new file mode 100644 index 0000000000000..6361b20d16992 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/certificates.md @@ -0,0 +1,252 @@ +--- +title: Certificates +content_type: task +weight: 20 +--- + + + + +When using client certificate authentication, you can generate certificates +manually through `easyrsa`, `openssl` or `cfssl`. + + + + + + +### easyrsa + +**easyrsa** can manually generate certificates for your cluster. + +1. Download, unpack, and initialize the patched version of easyrsa3. + + curl -LO https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz + tar xzf easy-rsa.tar.gz + cd easy-rsa-master/easyrsa3 + ./easyrsa init-pki +1. Generate a new certificate authority (CA). `--batch` sets automatic mode; + `--req-cn` specifies the Common Name (CN) for the CA's new root certificate. + + ./easyrsa --batch "--req-cn=${MASTER_IP}@`date +%s`" build-ca nopass +1. Generate server certificate and key. + The argument `--subject-alt-name` sets the possible IPs and DNS names the API server will + be accessed with. The `MASTER_CLUSTER_IP` is usually the first IP from the service CIDR + that is specified as the `--service-cluster-ip-range` argument for both the API server and + the controller manager component. The argument `--days` is used to set the number of days + after which the certificate expires. + The sample below also assumes that you are using `cluster.local` as the default + DNS domain name. + + ./easyrsa --subject-alt-name="IP:${MASTER_IP},"\ + "IP:${MASTER_CLUSTER_IP},"\ + "DNS:kubernetes,"\ + "DNS:kubernetes.default,"\ + "DNS:kubernetes.default.svc,"\ + "DNS:kubernetes.default.svc.cluster,"\ + "DNS:kubernetes.default.svc.cluster.local" \ + --days=10000 \ + build-server-full server nopass +1. Copy `pki/ca.crt`, `pki/issued/server.crt`, and `pki/private/server.key` to your directory. +1. Fill in and add the following parameters into the API server start parameters: + + --client-ca-file=/yourdirectory/ca.crt + --tls-cert-file=/yourdirectory/server.crt + --tls-private-key-file=/yourdirectory/server.key + +### openssl + +**openssl** can manually generate certificates for your cluster. + +1. Generate a ca.key with 2048bit: + + openssl genrsa -out ca.key 2048 +1. According to the ca.key generate a ca.crt (use -days to set the certificate effective time): + + openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt +1. Generate a server.key with 2048bit: + + openssl genrsa -out server.key 2048 +1. Create a config file for generating a Certificate Signing Request (CSR). + Be sure to substitute the values marked with angle brackets (e.g. ``) + with real values before saving this to a file (e.g. `csr.conf`). + Note that the value for `MASTER_CLUSTER_IP` is the service cluster IP for the + API server as described in previous subsection. + The sample below also assumes that you are using `cluster.local` as the default + DNS domain name. + + [ req ] + default_bits = 2048 + prompt = no + default_md = sha256 + req_extensions = req_ext + distinguished_name = dn + + [ dn ] + C = + ST = + L = + O = + OU = + CN = + + [ req_ext ] + subjectAltName = @alt_names + + [ alt_names ] + DNS.1 = kubernetes + DNS.2 = kubernetes.default + DNS.3 = kubernetes.default.svc + DNS.4 = kubernetes.default.svc.cluster + DNS.5 = kubernetes.default.svc.cluster.local + IP.1 = + IP.2 = + + [ v3_ext ] + authorityKeyIdentifier=keyid,issuer:always + basicConstraints=CA:FALSE + keyUsage=keyEncipherment,dataEncipherment + extendedKeyUsage=serverAuth,clientAuth + subjectAltName=@alt_names +1. Generate the certificate signing request based on the config file: + + openssl req -new -key server.key -out server.csr -config csr.conf +1. Generate the server certificate using the ca.key, ca.crt and server.csr: + + openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \ + -CAcreateserial -out server.crt -days 10000 \ + -extensions v3_ext -extfile csr.conf +1. View the certificate: + + openssl x509 -noout -text -in ./server.crt + +Finally, add the same parameters into the API server start parameters. + +### cfssl + +**cfssl** is another tool for certificate generation. + +1. Download, unpack and prepare the command line tools as shown below. + Note that you may need to adapt the sample commands based on the hardware + architecture and cfssl version you are using. + + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64 -o cfssl + chmod +x cfssl + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64 -o cfssljson + chmod +x cfssljson + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl-certinfo_1.5.0_linux_amd64 -o cfssl-certinfo + chmod +x cfssl-certinfo +1. Create a directory to hold the artifacts and initialize cfssl: + + mkdir cert + cd cert + ../cfssl print-defaults config > config.json + ../cfssl print-defaults csr > csr.json +1. Create a JSON config file for generating the CA file, for example, `ca-config.json`: + + { + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "kubernetes": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "8760h" + } + } + } + } +1. Create a JSON config file for CA certificate signing request (CSR), for example, + `ca-csr.json`. Be sure to replace the values marked with angle brackets with + real values you want to use. + + { + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names":[{ + "C": "", + "ST": "", + "L": "", + "O": "", + "OU": "" + }] + } +1. Generate CA key (`ca-key.pem`) and certificate (`ca.pem`): + + ../cfssl gencert -initca ca-csr.json | ../cfssljson -bare ca +1. Create a JSON config file for generating keys and certificates for the API + server, for example, `server-csr.json`. Be sure to replace the values in angle brackets with + real values you want to use. The `MASTER_CLUSTER_IP` is the service cluster + IP for the API server as described in previous subsection. + The sample below also assumes that you are using `cluster.local` as the default + DNS domain name. + + { + "CN": "kubernetes", + "hosts": [ + "127.0.0.1", + "", + "", + "kubernetes", + "kubernetes.default", + "kubernetes.default.svc", + "kubernetes.default.svc.cluster", + "kubernetes.default.svc.cluster.local" + ], + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [{ + "C": "", + "ST": "", + "L": "", + "O": "", + "OU": "" + }] + } +1. Generate the key and certificate for the API server, which are by default + saved into file `server-key.pem` and `server.pem` respectively: + + ../cfssl gencert -ca=ca.pem -ca-key=ca-key.pem \ + --config=ca-config.json -profile=kubernetes \ + server-csr.json | ../cfssljson -bare server + + +## Distributing Self-Signed CA Certificate + +A client node may refuse to recognize a self-signed CA certificate as valid. +For a non-production deployment, or for a deployment that runs behind a company +firewall, you can distribute a self-signed CA certificate to all clients and +refresh the local list for valid certificates. + +On each client, perform the following operations: + +```bash +sudo cp ca.crt /usr/local/share/ca-certificates/kubernetes.crt +sudo update-ca-certificates +``` + +``` +Updating certificates in /etc/ssl/certs... +1 added, 0 removed; done. +Running hooks in /etc/ca-certificates/update.d.... +done. +``` + +## Certificates API + +You can use the `certificates.k8s.io` API to provision +x509 certificates to use for authentication as documented +[here](/docs/tasks/tls/managing-tls-in-a-cluster). + + diff --git a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md index 9c08a2a4ad04d..a365fd4ffccac 100644 --- a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md +++ b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md @@ -32,7 +32,7 @@ for example, it might provision storage that is too expensive. If this is the ca you can either change the default StorageClass or disable it completely to avoid dynamic provisioning of storage. -Simply deleting the default StorageClass may not work, as it may be re-created +Deleting the default StorageClass may not work, as it may be re-created automatically by the addon manager running in your cluster. Please consult the docs for your installation for details about addon manager and how to disable individual addons. @@ -70,7 +70,7 @@ for details about addon manager and how to disable individual addons. 1. Mark a StorageClass as default: - Similarly to the previous step, you need to add/set the annotation + Similar to the previous step, you need to add/set the annotation `storageclass.kubernetes.io/is-default-class=true`. ```bash diff --git a/content/en/docs/tasks/administer-cluster/cluster-upgrade.md b/content/en/docs/tasks/administer-cluster/cluster-upgrade.md index 1e2cc422e4e9c..6e9dc302c445c 100644 --- a/content/en/docs/tasks/administer-cluster/cluster-upgrade.md +++ b/content/en/docs/tasks/administer-cluster/cluster-upgrade.md @@ -34,7 +34,7 @@ If your cluster was deployed using the `kubeadm` tool, refer to for detailed information on how to upgrade the cluster. Once you have upgraded the cluster, remember to -[install the latest version of `kubectl`](/docs/tasks/tools/install-kubectl/). +[install the latest version of `kubectl`](/docs/tasks/tools/). ### Manual deployments @@ -52,7 +52,7 @@ You should manually update the control plane following this sequence: - cloud controller manager, if you use one At this point you should -[install the latest version of `kubectl`](/docs/tasks/tools/install-kubectl/). +[install the latest version of `kubectl`](/docs/tasks/tools/). For each node in your cluster, [drain](/docs/tasks/administer-cluster/safely-drain-node/) that node and then either replace it with a new node that uses the {{< skew latestVersion >}} diff --git a/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md b/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md index 16fce652cc691..ffa688e1b76d0 100644 --- a/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md +++ b/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md @@ -10,35 +10,40 @@ content_type: task {{< glossary_definition term_id="etcd" length="all" prepend="etcd is a ">}} - - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - ## Prerequisites * Run etcd as a cluster of odd members. -* etcd is a leader-based distributed system. Ensure that the leader periodically send heartbeats on time to all followers to keep the cluster stable. +* etcd is a leader-based distributed system. Ensure that the leader + periodically send heartbeats on time to all followers to keep the cluster + stable. * Ensure that no resource starvation occurs. - Performance and stability of the cluster is sensitive to network and disk IO. Any resource starvation can lead to heartbeat timeout, causing instability of the cluster. An unstable etcd indicates that no leader is elected. Under such circumstances, a cluster cannot make any changes to its current state, which implies no new pods can be scheduled. + Performance and stability of the cluster is sensitive to network and disk + I/O. Any resource starvation can lead to heartbeat timeout, causing instability + of the cluster. An unstable etcd indicates that no leader is elected. Under + such circumstances, a cluster cannot make any changes to its current state, + which implies no new pods can be scheduled. -* Keeping stable etcd clusters is critical to the stability of Kubernetes clusters. Therefore, run etcd clusters on dedicated machines or isolated environments for [guaranteed resource requirements](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/hardware.md#hardware-recommendations). +* Keeping etcd clusters stable is critical to the stability of Kubernetes + clusters. Therefore, run etcd clusters on dedicated machines or isolated + environments for [guaranteed resource requirements](https://etcd.io/docs/current/op-guide/hardware/). * The minimum recommended version of etcd to run in production is `3.2.10+`. ## Resource requirements -Operating etcd with limited resources is suitable only for testing purposes. For deploying in production, advanced hardware configuration is required. Before deploying etcd in production, see [resource requirement reference documentation](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/hardware.md#example-hardware-configurations). +Operating etcd with limited resources is suitable only for testing purposes. +For deploying in production, advanced hardware configuration is required. +Before deploying etcd in production, see +[resource requirement reference](https://etcd.io/docs/current/op-guide/hardware/#example-hardware-configurations). ## Starting etcd clusters @@ -50,33 +55,43 @@ Use a single-node etcd cluster only for testing purpose. 1. Run the following: - ```sh - ./etcd --listen-client-urls=http://$PRIVATE_IP:2379 --advertise-client-urls=http://$PRIVATE_IP:2379 - ``` + ```sh + etcd --listen-client-urls=http://$PRIVATE_IP:2379 \ + --advertise-client-urls=http://$PRIVATE_IP:2379 + ``` -2. Start Kubernetes API server with the flag `--etcd-servers=$PRIVATE_IP:2379`. +2. Start the Kubernetes API server with the flag + `--etcd-servers=$PRIVATE_IP:2379`. - Replace `PRIVATE_IP` with your etcd client IP. + Make sure `PRIVATE_IP` is set to your etcd client IP. ### Multi-node etcd cluster -For durability and high availability, run etcd as a multi-node cluster in production and back it up periodically. A five-member cluster is recommended in production. For more information, see [FAQ Documentation](https://github.com/coreos/etcd/blob/master/Documentation/faq.md#what-is-failure-tolerance). +For durability and high availability, run etcd as a multi-node cluster in +production and back it up periodically. A five-member cluster is recommended +in production. For more information, see +[FAQ documentation](https://etcd.io/docs/current/faq/#what-is-failure-tolerance). -Configure an etcd cluster either by static member information or by dynamic discovery. For more information on clustering, see [etcd Clustering Documentation](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/clustering.md). +Configure an etcd cluster either by static member information or by dynamic +discovery. For more information on clustering, see +[etcd clustering documentation](https://etcd.io/docs/current/op-guide/clustering/). -For an example, consider a five-member etcd cluster running with the following client URLs: `http://$IP1:2379`, `http://$IP2:2379`, `http://$IP3:2379`, `http://$IP4:2379`, and `http://$IP5:2379`. To start a Kubernetes API server: +For an example, consider a five-member etcd cluster running with the following +client URLs: `http://$IP1:2379`, `http://$IP2:2379`, `http://$IP3:2379`, +`http://$IP4:2379`, and `http://$IP5:2379`. To start a Kubernetes API server: 1. Run the following: - ```sh - ./etcd --listen-client-urls=http://$IP1:2379, http://$IP2:2379, http://$IP3:2379, http://$IP4:2379, http://$IP5:2379 --advertise-client-urls=http://$IP1:2379, http://$IP2:2379, http://$IP3:2379, http://$IP4:2379, http://$IP5:2379 - ``` + ```shell + etcd --listen-client-urls=http://$IP1:2379,http://$IP2:2379,http://$IP3:2379,http://$IP4:2379,http://$IP5:2379 --advertise-client-urls=http://$IP1:2379,http://$IP2:2379,http://$IP3:2379,http://$IP4:2379,http://$IP5:2379 + ``` -2. Start Kubernetes API servers with the flag `--etcd-servers=$IP1:2379, $IP2:2379, $IP3:2379, $IP4:2379, $IP5:2379`. +2. Start the Kubernetes API servers with the flag + `--etcd-servers=$IP1:2379,$IP2:2379,$IP3:2379,$IP4:2379,$IP5:2379`. - Replace `IP` with your client IP addresses. + Make sure the `IP` variables are set to your client IP addresses. -### Multi-node etcd cluster with load balancer +### Multi-node etcd cluster with load balancer To run a load balancing etcd cluster: @@ -87,92 +102,169 @@ To run a load balancing etcd cluster: ## Securing etcd clusters -Access to etcd is equivalent to root permission in the cluster so ideally only the API server should have access to it. Considering the sensitivity of the data, it is recommended to grant permission to only those nodes that require access to etcd clusters. - -To secure etcd, either set up firewall rules or use the security features provided by etcd. etcd security features depend on x509 Public Key Infrastructure (PKI). To begin, establish secure communication channels by generating a key and certificate pair. For example, use key pairs `peer.key` and `peer.cert` for securing communication between etcd members, and `client.key` and `client.cert` for securing communication between etcd and its clients. See the [example scripts](https://github.com/coreos/etcd/tree/master/hack/tls-setup) provided by the etcd project to generate key pairs and CA files for client authentication. +Access to etcd is equivalent to root permission in the cluster so ideally only +the API server should have access to it. Considering the sensitivity of the +data, it is recommended to grant permission to only those nodes that require +access to etcd clusters. + +To secure etcd, either set up firewall rules or use the security features +provided by etcd. etcd security features depend on x509 Public Key +Infrastructure (PKI). To begin, establish secure communication channels by +generating a key and certificate pair. For example, use key pairs `peer.key` +and `peer.cert` for securing communication between etcd members, and +`client.key` and `client.cert` for securing communication between etcd and its +clients. See the [example scripts](https://github.com/coreos/etcd/tree/master/hack/tls-setup) +provided by the etcd project to generate key pairs and CA files for client +authentication. ### Securing communication -To configure etcd with secure peer communication, specify flags `--peer-key-file=peer.key` and `--peer-cert-file=peer.cert`, and use https as URL schema. +To configure etcd with secure peer communication, specify flags +`--peer-key-file=peer.key` and `--peer-cert-file=peer.cert`, and use HTTPS as +the URL schema. -Similarly, to configure etcd with secure client communication, specify flags `--key-file=k8sclient.key` and `--cert-file=k8sclient.cert`, and use https as URL schema. +Similarly, to configure etcd with secure client communication, specify flags +`--key-file=k8sclient.key` and `--cert-file=k8sclient.cert`, and use HTTPS as +the URL schema. Here is an example on a client command that uses secure +communication: + +``` +ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 \ + --cert=/etc/kubernetes/pki/etcd/server.crt \ + --key=/etc/kubernetes/pki/etcd/server.key \ + --cacert=/etc/kubernetes/pki/etcd/ca.crt \ + member list +``` ### Limiting access of etcd clusters -After configuring secure communication, restrict the access of etcd cluster to only the Kubernetes API server. Use TLS authentication to do so. +After configuring secure communication, restrict the access of etcd cluster to +only the Kubernetes API servers. Use TLS authentication to do so. -For example, consider key pairs `k8sclient.key` and `k8sclient.cert` that are trusted by the CA `etcd.ca`. When etcd is configured with `--client-cert-auth` along with TLS, it verifies the certificates from clients by using system CAs or the CA passed in by `--trusted-ca-file` flag. Specifying flags `--client-cert-auth=true` and `--trusted-ca-file=etcd.ca` will restrict the access to clients with the certificate `k8sclient.cert`. +For example, consider key pairs `k8sclient.key` and `k8sclient.cert` that are +trusted by the CA `etcd.ca`. When etcd is configured with `--client-cert-auth` +along with TLS, it verifies the certificates from clients by using system CAs +or the CA passed in by `--trusted-ca-file` flag. Specifying flags +`--client-cert-auth=true` and `--trusted-ca-file=etcd.ca` will restrict the +access to clients with the certificate `k8sclient.cert`. -Once etcd is configured correctly, only clients with valid certificates can access it. To give Kubernetes API server the access, configure it with the flags `--etcd-certfile=k8sclient.cert`,`--etcd-keyfile=k8sclient.key` and `--etcd-cafile=ca.cert`. +Once etcd is configured correctly, only clients with valid certificates can +access it. To give Kubernetes API servers the access, configure them with the +flags `--etcd-certfile=k8sclient.cert`,`--etcd-keyfile=k8sclient.key` and +`--etcd-cafile=ca.cert`. {{< note >}} -etcd authentication is not currently supported by Kubernetes. For more information, see the related issue [Support Basic Auth for Etcd v2](https://github.com/kubernetes/kubernetes/issues/23398). +etcd authentication is not currently supported by Kubernetes. For more +information, see the related issue +[Support Basic Auth for Etcd v2](https://github.com/kubernetes/kubernetes/issues/23398). {{< /note >}} ## Replacing a failed etcd member -etcd cluster achieves high availability by tolerating minor member failures. However, to improve the overall health of the cluster, replace failed members immediately. When multiple members fail, replace them one by one. Replacing a failed member involves two steps: removing the failed member and adding a new member. +etcd cluster achieves high availability by tolerating minor member failures. +However, to improve the overall health of the cluster, replace failed members +immediately. When multiple members fail, replace them one by one. Replacing a +failed member involves two steps: removing the failed member and adding a new +member. -Though etcd keeps unique member IDs internally, it is recommended to use a unique name for each member to avoid human errors. For example, consider a three-member etcd cluster. Let the URLs be, member1=http://10.0.0.1, member2=http://10.0.0.2, and member3=http://10.0.0.3. When member1 fails, replace it with member4=http://10.0.0.4. +Though etcd keeps unique member IDs internally, it is recommended to use a +unique name for each member to avoid human errors. For example, consider a +three-member etcd cluster. Let the URLs be, `member1=http://10.0.0.1`, +`member2=http://10.0.0.2`, and `member3=http://10.0.0.3`. When `member1` fails, +replace it with `member4=http://10.0.0.4`. -1. Get the member ID of the failed member1: +1. Get the member ID of the failed `member1`: - `etcdctl --endpoints=http://10.0.0.2,http://10.0.0.3 member list` + ```shell + etcdctl --endpoints=http://10.0.0.2,http://10.0.0.3 member list + ``` - The following message is displayed: + The following message is displayed: - 8211f1d0f64f3269, started, member1, http://10.0.0.1:2380, http://10.0.0.1:2379 - 91bc3c398fb3c146, started, member2, http://10.0.0.2:2380, http://10.0.0.2:2379 - fd422379fda50e48, started, member3, http://10.0.0.3:2380, http://10.0.0.3:2379 + ```console + 8211f1d0f64f3269, started, member1, http://10.0.0.1:2380, http://10.0.0.1:2379 + 91bc3c398fb3c146, started, member2, http://10.0.0.2:2380, http://10.0.0.2:2379 + fd422379fda50e48, started, member3, http://10.0.0.3:2380, http://10.0.0.3:2379 + ``` 2. Remove the failed member: - `etcdctl member remove 8211f1d0f64f3269` + ```shell + etcdctl member remove 8211f1d0f64f3269 + ``` - The following message is displayed: + The following message is displayed: - Removed member 8211f1d0f64f3269 from cluster + ```console + Removed member 8211f1d0f64f3269 from cluster + ``` 3. Add the new member: - `./etcdctl member add member4 --peer-urls=http://10.0.0.4:2380` + ```shell + etcdctl member add member4 --peer-urls=http://10.0.0.4:2380 + ``` - The following message is displayed: + The following message is displayed: - Member 2be1eb8f84b7f63e added to cluster ef37ad9dc622a7c4 + ```console + Member 2be1eb8f84b7f63e added to cluster ef37ad9dc622a7c4 + ``` 4. Start the newly added member on a machine with the IP `10.0.0.4`: - export ETCD_NAME="member4" - export ETCD_INITIAL_CLUSTER="member2=http://10.0.0.2:2380,member3=http://10.0.0.3:2380,member4=http://10.0.0.4:2380" - export ETCD_INITIAL_CLUSTER_STATE=existing - etcd [flags] + ```shell + export ETCD_NAME="member4" + export ETCD_INITIAL_CLUSTER="member2=http://10.0.0.2:2380,member3=http://10.0.0.3:2380,member4=http://10.0.0.4:2380" + export ETCD_INITIAL_CLUSTER_STATE=existing + etcd [flags] + ``` 5. Do either of the following: - 1. Update its `--etcd-servers` flag to make Kubernetes aware of the configuration changes, then restart the Kubernetes API server. - 2. Update the load balancer configuration if a load balancer is used in the deployment. + 1. Update the `--etcd-servers` flag for the Kubernetes API servers to make + Kubernetes aware of the configuration changes, then restart the + Kubernetes API servers. + 2. Update the load balancer configuration if a load balancer is used in the + deployment. -For more information on cluster reconfiguration, see [etcd Reconfiguration Documentation](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#remove-a-member). +For more information on cluster reconfiguration, see +[etcd reconfiguration documentation](https://etcd.io/docs/current/op-guide/runtime-configuration/#remove-a-member). ## Backing up an etcd cluster -All Kubernetes objects are stored on etcd. Periodically backing up the etcd cluster data is important to recover Kubernetes clusters under disaster scenarios, such as losing all master nodes. The snapshot file contains all the Kubernetes states and critical information. In order to keep the sensitive Kubernetes data safe, encrypt the snapshot files. +All Kubernetes objects are stored on etcd. Periodically backing up the etcd +cluster data is important to recover Kubernetes clusters under disaster +scenarios, such as losing all control plane nodes. The snapshot file contains +all the Kubernetes states and critical information. In order to keep the +sensitive Kubernetes data safe, encrypt the snapshot files. -Backing up an etcd cluster can be accomplished in two ways: etcd built-in snapshot and volume snapshot. +Backing up an etcd cluster can be accomplished in two ways: etcd built-in +snapshot and volume snapshot. ### Built-in snapshot -etcd supports built-in snapshot, so backing up an etcd cluster is easy. A snapshot may either be taken from a live member with the `etcdctl snapshot save` command or by copying the `member/snap/db` file from an etcd [data directory](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/configuration.md#--data-dir) that is not currently used by an etcd process. Taking the snapshot will normally not affect the performance of the member. +etcd supports built-in snapshot. A snapshot may either be taken from a live +member with the `etcdctl snapshot save` command or by copying the +`member/snap/db` file from an etcd +[data directory](https://etcd.io/docs/current/op-guide/configuration/#--data-dir) +that is not currently used by an etcd process. Taking the snapshot will +not affect the performance of the member. -Below is an example for taking a snapshot of the keyspace served by `$ENDPOINT` to the file `snapshotdb`: +Below is an example for taking a snapshot of the keyspace served by +`$ENDPOINT` to the file `snapshotdb`: -```sh +```shell ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshotdb -# exit 0 +``` -# verify the snapshot +Verify the snapshot: + +```shell ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshotdb +``` + +```console +----------+----------+------------+------------+ | HASH | REVISION | TOTAL KEYS | TOTAL SIZE | +----------+----------+------------+------------+ @@ -182,74 +274,86 @@ ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshotdb ### Volume snapshot -If etcd is running on a storage volume that supports backup, such as Amazon Elastic Block Store, back up etcd data by taking a snapshot of the storage volume. - -## Scaling up etcd clusters +If etcd is running on a storage volume that supports backup, such as Amazon +Elastic Block Store, back up etcd data by taking a snapshot of the storage +volume. -Scaling up etcd clusters increases availability by trading off performance. Scaling does not increase cluster performance nor capability. A general rule is not to scale up or down etcd clusters. Do not configure any auto scaling groups for etcd clusters. It is highly recommended to always run a static five-member etcd cluster for production Kubernetes clusters at any officially supported scale. +### Snapshot using etcdctl options -A reasonable scaling is to upgrade a three-member cluster to a five-member one, when more reliability is desired. See [etcd Reconfiguration Documentation](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#remove-a-member) for information on how to add members into an existing cluster. +We can also take the snapshot using various options given by etcdctl. For example -## Restoring an etcd cluster +```shell +ETCDCTL_API=3 etcdctl -h +``` -etcd supports restoring from snapshots that are taken from an etcd process of the [major.minor](http://semver.org/) version. Restoring a version from a different patch version of etcd also is supported. A restore operation is employed to recover the data of a failed cluster. +will list various options available from etcdctl. For example, you can take a snapshot by specifying +the endpoint, certificates etc as shown below: -Before starting the restore operation, a snapshot file must be present. It can either be a snapshot file from a previous backup operation, or from a remaining [data directory](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/configuration.md#--data-dir). For more information and examples on restoring a cluster from a snapshot file, see [etcd disaster recovery documentation](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/recovery.md#restoring-a-cluster). - -If the access URLs of the restored cluster is changed from the previous cluster, the Kubernetes API server must be reconfigured accordingly. In this case, restart Kubernetes API server with the flag `--etcd-servers=$NEW_ETCD_CLUSTER` instead of the flag `--etcd-servers=$OLD_ETCD_CLUSTER`. Replace `$NEW_ETCD_CLUSTER` and `$OLD_ETCD_CLUSTER` with the respective IP addresses. If a load balancer is used in front of an etcd cluster, you might need to update the load balancer instead. - -If the majority of etcd members have permanently failed, the etcd cluster is considered failed. In this scenario, Kubernetes cannot make any changes to its current state. Although the scheduled pods might continue to run, no new pods can be scheduled. In such cases, recover the etcd cluster and potentially reconfigure Kubernetes API server to fix the issue. - -{{< note >}} -If any API servers are running in your cluster, you should not attempt to restore instances of etcd. -Instead, follow these steps to restore etcd: - -- stop *all* kube-apiserver instances -- restore state in all etcd instances -- restart all kube-apiserver instances - -We also recommend restarting any components (e.g. kube-scheduler, kube-controller-manager, kubelet) to ensure that they don't -rely on some stale data. Note that in practice, the restore takes a bit of time. -During the restoration, critical components will lose leader lock and restart themselves. -{{< /note >}} - -## Upgrading and rolling back etcd clusters +```shell +ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 \ + --cacert= --cert= --key= \ + snapshot save +``` +where `trusted-ca-file`, `cert-file` and `key-file` can be obtained from the description of the etcd Pod. -As of Kubernetes v1.13.0, etcd2 is no longer supported as a storage backend for -new or existing Kubernetes clusters. The timeline for Kubernetes support for -etcd2 and etcd3 is as follows: +## Scaling up etcd clusters -- Kubernetes v1.0: etcd2 only -- Kubernetes v1.5.1: etcd3 support added, new clusters still default to etcd2 -- Kubernetes v1.6.0: new clusters created with `kube-up.sh` default to etcd3, - and `kube-apiserver` defaults to etcd3 -- Kubernetes v1.9.0: deprecation of etcd2 storage backend announced -- Kubernetes v1.13.0: etcd2 storage backend removed, `kube-apiserver` will - refuse to start with `--storage-backend=etcd2`, with the - message `etcd2 is no longer a supported storage backend` +Scaling up etcd clusters increases availability by trading off performance. +Scaling does not increase cluster performance nor capability. A general rule +is not to scale up or down etcd clusters. Do not configure any auto scaling +groups for etcd clusters. It is highly recommended to always run a static +five-member etcd cluster for production Kubernetes clusters at any officially +supported scale. -Before upgrading a v1.12.x kube-apiserver using `--storage-backend=etcd2` to -v1.13.x, etcd v2 data must be migrated to the v3 storage backend and -kube-apiserver invocations must be changed to use `--storage-backend=etcd3`. +A reasonable scaling is to upgrade a three-member cluster to a five-member +one, when more reliability is desired. See +[etcd reconfiguration documentation](https://etcd.io/docs/current/op-guide/runtime-configuration/#remove-a-member) +for information on how to add members into an existing cluster. -The process for migrating from etcd2 to etcd3 is highly dependent on how the -etcd cluster was deployed and configured, as well as how the Kubernetes -cluster was deployed and configured. We recommend that you consult your cluster -provider's documentation to see if there is a predefined solution. +## Restoring an etcd cluster -If your cluster was created via `kube-up.sh` and is still using etcd2 as its -storage backend, please consult the [Kubernetes v1.12 etcd cluster upgrade docs](https://v1-12.docs.kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#upgrading-and-rolling-back-etcd-clusters) +etcd supports restoring from snapshots that are taken from an etcd process of +the [major.minor](http://semver.org/) version. Restoring a version from a +different patch version of etcd also is supported. A restore operation is +employed to recover the data of a failed cluster. -## Known issue: etcd client balancer with secure endpoints +Before starting the restore operation, a snapshot file must be present. It can +either be a snapshot file from a previous backup operation, or from a remaining +[data directory]( https://etcd.io/docs/current/op-guide/configuration/#--data-dir). +Here is an example: -The etcd v3 client, released in etcd v3.3.13 or earlier, has a [critical bug](https://github.com/kubernetes/kubernetes/issues/72102) which affects the kube-apiserver and HA deployments. The etcd client balancer failover does not properly work against secure endpoints. As a result, etcd servers may fail or disconnect briefly from the kube-apiserver. This affects kube-apiserver HA deployments. +```shell +ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 snapshot restore snapshotdb +``` -The fix was made in [etcd v3.4](https://github.com/etcd-io/etcd/pull/10911) (and backported to v3.3.14 or later): the new client now creates its own credential bundle to correctly set authority target in dial function. +For more information and examples on restoring a cluster from a snapshot file, see +[etcd disaster recovery documentation](https://etcd.io/docs/current/op-guide/recovery/#restoring-a-cluster). -Because the fix requires gRPC dependency upgrade (to v1.23.0), downstream Kubernetes [did not backport etcd upgrades](https://github.com/kubernetes/kubernetes/issues/72102#issuecomment-526645978). Which means the [etcd fix in kube-apiserver](https://github.com/etcd-io/etcd/pull/10911/commits/db61ee106ca9363ba3f188ecf27d1a8843da33ab) is only available from Kubernetes 1.16. +If the access URLs of the restored cluster is changed from the previous +cluster, the Kubernetes API server must be reconfigured accordingly. In this +case, restart Kubernetes API servers with the flag +`--etcd-servers=$NEW_ETCD_CLUSTER` instead of the flag +`--etcd-servers=$OLD_ETCD_CLUSTER`. Replace `$NEW_ETCD_CLUSTER` and +`$OLD_ETCD_CLUSTER` with the respective IP addresses. If a load balancer is +used in front of an etcd cluster, you might need to update the load balancer +instead. -To urgently fix this bug for Kubernetes 1.15 or earlier, build a custom kube-apiserver. You can make local changes to [`vendor/google.golang.org/grpc/credentials/credentials.go`](https://github.com/kubernetes/kubernetes/blob/7b85be021cd2943167cd3d6b7020f44735d9d90b/vendor/google.golang.org/grpc/credentials/credentials.go#L135) with [etcd@db61ee106](https://github.com/etcd-io/etcd/pull/10911/commits/db61ee106ca9363ba3f188ecf27d1a8843da33ab). +If the majority of etcd members have permanently failed, the etcd cluster is +considered failed. In this scenario, Kubernetes cannot make any changes to its +current state. Although the scheduled pods might continue to run, no new pods +can be scheduled. In such cases, recover the etcd cluster and potentially +reconfigure Kubernetes API servers to fix the issue. -See ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available"](https://github.com/kubernetes/kubernetes/issues/72102). +{{< note >}} +If any API servers are running in your cluster, you should not attempt to +restore instances of etcd. Instead, follow these steps to restore etcd: +- stop *all* API server instances +- restore state in all etcd instances +- restart all API server instances +We also recommend restarting any components (e.g. `kube-scheduler`, +`kube-controller-manager`, `kubelet`) to ensure that they don't rely on some +stale data. Note that in practice, the restore takes a bit of time. During the +restoration, critical components will lose leader lock and restart themselves. +{{< /note >}} diff --git a/content/en/docs/tasks/administer-cluster/controller-manager-leader-migration.md b/content/en/docs/tasks/administer-cluster/controller-manager-leader-migration.md new file mode 100644 index 0000000000000..8b141f9da0fbd --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/controller-manager-leader-migration.md @@ -0,0 +1,118 @@ +--- +reviewers: +- jpbetz +- cheftako +title: "Migrate Replicated Control Plane To Use Cloud Controller Manager" +linkTitle: "Migrate Replicated Control Plane To Use Cloud Controller Manager" +content_type: task +--- + + + + +{{< feature-state state="alpha" for_k8s_version="v1.21" >}} + +{{< glossary_definition term_id="cloud-controller-manager" length="all" prepend="The cloud-controller-manager is">}} + +## Background + +As part of the [cloud provider extraction effort](https://kubernetes.io/blog/2019/04/17/the-future-of-cloud-providers-in-kubernetes/), all cloud specific controllers must be moved out of the `kube-controller-manager`. All existing clusters that run cloud controllers in the `kube-controller-manager` must migrate to instead run the controllers in a cloud provider specific `cloud-controller-manager`. + +Leader Migration provides a mechanism in which HA clusters can safely migrate "cloud specific" controllers between the `kube-controller-manager` and the `cloud-controller-manager` via a shared resource lock between the two components while upgrading the replicated control plane. For a single-node control plane, or if unavailability of controller managers can be tolerated during the upgrade, Leader Migration is not needed and this guide can be ignored. + +Leader Migration is an alpha feature that is disabled by default and it requires `--enable-leader-migration` to be set on controller managers. It can be enabled by setting the feature gate `ControllerManagerLeaderMigration` plus `--enable-leader-migration` on `kube-controller-manager` or `cloud-controller-manager`. Leader Migration only applies during the upgrade and can be safely disabled or left enabled after the upgrade is complete. + +This guide walks you through the manual process of upgrading the control plane from `kube-controller-manager` with built-in cloud provider to running both `kube-controller-manager` and `cloud-controller-manager`. If you use a tool to administrator the cluster, please refer to the documentation of the tool and the cloud provider for more details. + +## {{% heading "prerequisites" %}} + +It is assumed that the control plane is running Kubernetes version N and to be upgraded to version N + 1. Although it is possible to migrate within the same version, ideally the migration should be performed as part of a upgrade so that changes of configuration can be aligned to releases. The exact versions of N and N + 1 depend on each cloud provider. For example, if a cloud provider builds a `cloud-controller-manager` to work with Kubernetes 1.22, then N can be 1.21 and N + 1 can be 1.22. + +The control plane nodes should run `kube-controller-manager` with Leader Election enabled through `--leader-elect=true`. As of version N, an in-tree cloud privider must be set with `--cloud-provider` flag and `cloud-controller-manager` should not yet be deployed. + +The out-of-tree cloud provider must have built a `cloud-controller-manager` with Leader Migration implmentation. If the cloud provider imports `k8s.io/cloud-provider` and `k8s.io/controller-manager` of version v0.21.0 or later, Leader Migration will be avaliable. + +This guide assumes that kubelet of each control plane node starts `kube-controller-manager` and `cloud-controller-manager` as static pods defined by their manifests. If the components run in a different setting, please adjust the steps accordingly. + +For authorization, this guide assumes that the cluser uses RBAC. If another authorization mode grants permissions to `kube-controller-manager` and `cloud-controller-manager` components, please grant the needed access in a way that matches the mode. + + + +### Grant access to Migration Lease + +The default permissions of the controller manager allow only accesses to their main Lease. In order for the migration to work, accesses to another Lease are required. + +You can grant `kube-controller-manager` full access to the leases API by modifying the `system::leader-locking-kube-controller-manager` role. This task guide assumes that the name of the migration lease is `cloud-provider-extraction-migration`. + +`kubectl patch -n kube-system role 'system::leader-locking-kube-controller-manager' -p '{"rules": [ {"apiGroups":[ "coordination.k8s.io"], "resources": ["leases"], "resourceNames": ["cloud-provider-extraction-migration"], "verbs": ["create", "list", "get", "update"] } ]}' --type=merge` + +Do the same to the `system::leader-locking-cloud-controller-manager` role. + +`kubectl patch -n kube-system role 'system::leader-locking-cloud-controller-manager' -p '{"rules": [ {"apiGroups":[ "coordination.k8s.io"], "resources": ["leases"], "resourceNames": ["cloud-provider-extraction-migration"], "verbs": ["create", "list", "get", "update"] } ]}' --type=merge` + +### Initial Leader Migration configuration + +Leader Migration requires a configuration file representing the state of controller-to-manager assignment. At this moment, with in-tree cloud provider, `kube-controller-manager` runs `route`, `service`, and `cloud-node-lifecycle`. The following example configuration shows the assignment. + +```yaml +kind: LeaderMigrationConfiguration +apiVersion: controllermanager.config.k8s.io/v1alpha1 +leaderName: cloud-provider-extraction-migration +resourceLock: leases +controllerLeaders: + - name: route + component: kube-controller-manager + - name: service + component: kube-controller-manager + - name: cloud-node-lifecycle + component: kube-controller-manager +``` + +On each control plane node, save the content to `/etc/leadermigration.conf`, and update the manifest of `kube-controller-manager` so that the file is mounted inside the container at the same location. Also, update the same manifest to add the following arguments: + +- `--feature-gates=ControllerManagerLeaderMigration=true` to enable Leader Migration which is an alpha feature +- `--enable-leader-migration` to enable Leader Migration on the controller manager +- `--leader-migration-config=/etc/leadermigration.conf` to set configuration file + +Restart `kube-controller-manager` on each node. At this moment, `kube-controller-manager` has leader migration enabled and is ready for the migration. + +### Deploy Cloud Controller Manager + +In version N + 1, the desired state of controller-to-manager assignment can be represented by a new configuration file, shown as follows. Please note `component` field of each `controllerLeaders` changing from `kube-controller-manager` to `cloud-controller-manager`. + +```yaml +kind: LeaderMigrationConfiguration +apiVersion: controllermanager.config.k8s.io/v1alpha1 +leaderName: cloud-provider-extraction-migration +resourceLock: leases +controllerLeaders: + - name: route + component: cloud-controller-manager + - name: service + component: cloud-controller-manager + - name: cloud-node-lifecycle + component: cloud-controller-manager +``` + +When creating control plane nodes of version N + 1, the content should be deploy to `/etc/leadermigration.conf`. The manifest of `cloud-controller-manager` should be updated to mount the configuration file in the same manner as `kube-controller-manager` of version N. Similarly, add `--feature-gates=ControllerManagerLeaderMigration=true`,`--enable-leader-migration`, and `--leader-migration-config=/etc/leadermigration.conf` to the arguments of `cloud-controller-manager`. + +Create a new control plane node of version N + 1 with the updated `cloud-controller-manager` manifest, and with the `--cloud-provider` flag unset for `kube-controller-manager`. `kube-controller-manager` of version N + 1 MUST NOT have Leader Migration enabled because, with an external cloud provider, it does not run the migrated controllers anymore and thus it is not involved in the migration. + +Please refer to [Cloud Controller Manager Administration](/docs/tasks/administer-cluster/running-cloud-controller/) for more detail on how to deploy `cloud-controller-manager`. + +### Upgrade Control Plane + +The control plane now contains nodes of both version N and N + 1. The nodes of version N run `kube-controller-manager` only, and these of version N + 1 run both `kube-controller-manager` and `cloud-controller-manager`. The migrated controllers, as specified in the configuration, are running under either `kube-controller-manager` of version N or `cloud-controller-manager` of version N + 1 depending on which controller manager holds the migration lease. No controller will ever be running under both controller managers at any time. + +In a rolling manner, create a new control plane node of version N + 1 and bring down one of version N + 1 until the control plane contains only nodes of version N + 1. +If a rollback from version N + 1 to N is required, add nodes of version N with Leader Migration enabled for `kube-controller-manager` back to the control plane, replacing one of version N + 1 each time until there are only nodes of version N. + +### (Optional) Disable Leader Migration {#disable-leader-migration} + +Now that the control plane has been upgraded to run both `kube-controller-manager` and `cloud-controller-manager` of version N + 1, Leader Migration has finished its job and can be safely disabled to save one Lease resource. It is safe to re-enable Leader Migration for the rollback in the future. + +In a rolling manager, update manifest of `cloud-controller-manager` to unset both `--enable-leader-migration` and `--leader-migration-config=` flag, also remove the mount of `/etc/leadermigration.conf`, and finally remove `/etc/leadermigration.conf`. To re-enable Leader Migration, recreate the configuration file and add its mount and the flags that enable Leader Migration back to `cloud-controller-manager`. + +## {{% heading "whatsnext" %}} + +- Read the [Controller Manager Leader Migration](https://github.com/kubernetes/enhancements/tree/master/keps/sig-cloud-provider/2436-controller-manager-leader-migration) enhancement proposal diff --git a/content/en/docs/tasks/administer-cluster/coredns.md b/content/en/docs/tasks/administer-cluster/coredns.md index 32d4f7d7ecfe9..54163058c8698 100644 --- a/content/en/docs/tasks/administer-cluster/coredns.md +++ b/content/en/docs/tasks/administer-cluster/coredns.md @@ -36,7 +36,7 @@ For manual deployment or replacement of kube-dns, see the documentation at the In Kubernetes version 1.10 and later, you can also move to CoreDNS when you use `kubeadm` to upgrade a cluster that is using `kube-dns`. In this case, `kubeadm` will generate the CoreDNS configuration -("Corefile") based upon the `kube-dns` ConfigMap, preserving configurations for federation, +("Corefile") based upon the `kube-dns` ConfigMap, preserving configurations for stub domains, and upstream name server. If you are moving from kube-dns to CoreDNS, make sure to set the `CoreDNS` feature gate to `true` @@ -46,8 +46,7 @@ kubeadm upgrade apply v1.11.0 --feature-gates=CoreDNS=true ``` In Kubernetes version 1.13 and later the `CoreDNS` feature gate is removed and CoreDNS -is used by default. Follow the guide outlined [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon) if you want -your upgraded cluster to use kube-dns. +is used by default. In versions prior to 1.11 the Corefile will be **overwritten** by the one created during upgrade. **You should save your existing ConfigMap if you have customized it.** You may re-apply your @@ -56,26 +55,7 @@ customizations after the new ConfigMap is up and running. If you are running CoreDNS in Kubernetes version 1.11 and later, during upgrade, your existing Corefile will be retained. - -### Installing kube-dns instead of CoreDNS with kubeadm - -{{< note >}} -In Kubernetes 1.11, CoreDNS has graduated to General Availability (GA) -and is installed by default. -{{< /note >}} - -{{< warning >}} -In Kubernetes 1.18, kube-dns usage with kubeadm has been deprecated and will be removed in a future version. -{{< /warning >}} - -To install kube-dns on versions prior to 1.13, set the `CoreDNS` feature gate -value to `false`: - -``` -kubeadm init --feature-gates=CoreDNS=false -``` - -For versions 1.13 and later, follow the guide outlined [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon). +In Kubernetes version 1.21, support for `kube-dns` is removed from kubeadm. ## Upgrading CoreDNS diff --git a/content/en/docs/tasks/administer-cluster/declare-network-policy.md b/content/en/docs/tasks/administer-cluster/declare-network-policy.md index fed4a77f9de69..7acbaa9e7d50b 100644 --- a/content/en/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/declare-network-policy.md @@ -18,6 +18,7 @@ This document helps you get started using the Kubernetes [NetworkPolicy API](/do Make sure you've configured a network provider with network policy support. There are a number of network providers that support NetworkPolicy, including: +* [Antrea](/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy/) * [Calico](/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy/) * [Cilium](/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy/) * [Kube-router](/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy/) diff --git a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md index 06de85b305c6d..308b066651200 100644 --- a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -34,7 +34,7 @@ As of Kubernetes v1.12, CoreDNS is the recommended DNS Server, replacing kube-dn originally used kube-dns, you may still have `kube-dns` deployed rather than CoreDNS. {{< note >}} -Both the CoreDNS and kube-dns Service are named `kube-dns` in the `metadata.name` field. +The CoreDNS Service is named `kube-dns` in the `metadata.name` field. This is so that there is greater interoperability with workloads that relied on the legacy `kube-dns` Service name to resolve addresses internal to the cluster. Using a Service named `kube-dns` abstracts away the implementation detail of which DNS provider is running behind that common name. {{< /note >}} @@ -179,17 +179,14 @@ During translation, all FQDN nameservers will be omitted from the CoreDNS config CoreDNS supports the features of kube-dns and more. A ConfigMap created for kube-dns to support `StubDomains`and `upstreamNameservers` translates to the `forward` plugin in CoreDNS. -Similarly, the `Federations` plugin in kube-dns translates to the `federation` plugin in CoreDNS. ### Example -This example ConfigMap for kube-dns specifies federations, stubdomains and upstreamnameservers: +This example ConfigMap for kube-dns specifies stubdomains and upstreamnameservers: ```yaml apiVersion: v1 data: - federations: | - {"foo" : "foo.feddomain.com"} stubDomains: | {"abc.com" : ["1.2.3.4"], "my.cluster.local" : ["2.3.4.5"]} upstreamNameservers: | @@ -199,13 +196,6 @@ kind: ConfigMap The equivalent configuration in CoreDNS creates a Corefile: -* For federations: -``` -federation cluster.local { - foo foo.feddomain.com -} -``` - * For stubDomains: ```yaml abc.com:53 { diff --git a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md index 8680abad43e6e..faed4e1cb1c87 100644 --- a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md +++ b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md @@ -25,6 +25,12 @@ kube-dns. {{< codenew file="admin/dns/dnsutils.yaml" >}} +{{< note >}} +This example creates a pod in the `default` namespace. DNS name resolution for +services depends on the namespace of the pod. For more information, review +[DNS for Services and Pods](/docs/concepts/services-networking/dns-pod-service/#what-things-get-dns-names). +{{< /note >}} + Use that manifest to create a Pod: ```shell @@ -247,6 +253,27 @@ linux/amd64, go1.10.3, 2e322f6 172.17.0.18:41675 - [07/Sep/2018:15:29:11 +0000] 59925 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd,ra 106 0.000066649s ``` +### Are you in the right namespace for the service? + +DNS queries that don't specify a namespace are limited to the pod's +namespace. + +If the namespace of the pod and service differ, the DNS query must include +the namespace of the service. + +This query is limited to the pod's namespace: +```shell +kubectl exec -i -t dnsutils -- nslookup +``` + +This query specifies the namespace: +```shell +kubectl exec -i -t dnsutils -- nslookup . +``` + +To learn more about name resolution, see +[DNS for Services and Pods](/docs/concepts/services-networking/dns-pod-service/#what-things-get-dns-names). + ## Known issues Some Linux distributions (e.g. Ubuntu) use a local DNS resolver by default (systemd-resolved). diff --git a/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md b/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md deleted file mode 100644 index d984652de8ec0..0000000000000 --- a/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -reviewers: -- bowei -- freehan -title: Enabling EndpointSlices -content_type: task ---- - - -This page provides an overview of enabling EndpointSlices in Kubernetes. - - - -## {{% heading "prerequisites" %}} - - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - - - -## Introduction - -EndpointSlices provide a scalable and extensible alternative to Endpoints in -Kubernetes. They build on top of the base of functionality provided by Endpoints -and extend that in a scalable way. When Services have a large number (>100) of -network endpoints, they will be split into multiple smaller EndpointSlice -resources instead of a single large Endpoints resource. - -## Enabling EndpointSlices - -{{< feature-state for_k8s_version="v1.17" state="beta" >}} - -{{< note >}} -The EndpointSlice resource was designed to address shortcomings in a earlier -resource: Endpoints. Some Kubernetes components and third-party applications -continue to use and rely on Endpoints. Whilst that remains the case, -EndpointSlices should be seen as an addition to Endpoints in a cluster, not as -an outright replacement. -{{< /note >}} - -EndpointSlice functionality in Kubernetes is made up of several different -components, most are enabled by default: -* _The EndpointSlice API_: EndpointSlices are part of the - `discovery.k8s.io/v1beta1` API. This is beta and enabled by default since - Kubernetes 1.17. All components listed below are dependent on this API being - enabled. -* _The EndpointSlice Controller_: This {{< glossary_tooltip text="controller" - term_id="controller" >}} maintains EndpointSlices for Services and the Pods - they reference. This is controlled by the `EndpointSlice` feature gate. It has - been enabled by default since Kubernetes 1.18. -* _The EndpointSliceMirroring Controller_: This {{< glossary_tooltip - text="controller" term_id="controller" >}} mirrors custom Endpoints to - EndpointSlices. This is controlled by the `EndpointSlice` feature gate. It has - been enabled by default since Kubernetes 1.19. -* _Kube-Proxy_: When {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy">}} - is configured to use EndpointSlices, it can support higher numbers of Service - endpoints. This is controlled by the `EndpointSliceProxying` feature gate on - Linux and `WindowsEndpointSliceProxying` on Windows. It has been enabled by - default on Linux since Kubernetes 1.19. It is not enabled by default for - Windows nodes. To configure kube-proxy to use EndpointSlices on Windows, you - can enable the `WindowsEndpointSliceProxying` [feature - gate](/docs/reference/command-line-tools-reference/feature-gates/) on - kube-proxy. - - -## API fields - -Some fields in the EndpointSlice API are feature-gated. - -- The `EndpointSliceNodeName` feature gate controls access to the `nodeName` - field. This is an alpha feature that is disabled by default. -- The `EndpointSliceTerminating` feature gate controls access to the `serving` - and `terminating` condition fields. This is an alpha feature that is disabled - by default. - -## Using EndpointSlices - -With EndpointSlices fully enabled in your cluster, you should see corresponding -EndpointSlice resources for each Endpoints resource. In addition to supporting -existing Endpoints functionality, EndpointSlices will allow for greater -scalability and extensibility of network endpoints in your cluster. - -## {{% heading "whatsnext" %}} - - -* Read about [EndpointSlices](/docs/concepts/services-networking/endpoint-slices/) -* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) - - diff --git a/content/en/docs/tasks/administer-cluster/enabling-service-topology.md b/content/en/docs/tasks/administer-cluster/enabling-service-topology.md index 998bb8b2e5739..8fd39a7e509dd 100644 --- a/content/en/docs/tasks/administer-cluster/enabling-service-topology.md +++ b/content/en/docs/tasks/administer-cluster/enabling-service-topology.md @@ -5,52 +5,51 @@ reviewers: - imroc title: Enabling Service Topology content_type: task +min-kubernetes-server-version: 1.17 --- -This page provides an overview of enabling Service Topology in Kubernetes. +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} +This feature, specifically the alpha `topologyKeys` field, is deprecated since +Kubernetes v1.21. +[Topology Aware Hints](/docs/concepts/services-networking/topology-aware-hints/), +introduced in Kubernetes v1.21, provide similar functionality. - -## {{% heading "prerequisites" %}} - - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - - - -## Introduction - -_Service Topology_ enables a service to route traffic based upon the Node +_Service Topology_ enables a {{< glossary_tooltip term_id="service">}} to route traffic based upon the Node topology of the cluster. For example, a service can specify that traffic be preferentially routed to endpoints that are on the same Node as the client, or in the same availability zone. -## Prerequisites +## {{% heading "prerequisites" %}} + + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} The following prerequisites are needed in order to enable topology aware service routing: - * Kubernetes 1.17 or later - * {{< glossary_tooltip text="Kube-proxy" term_id="kube-proxy" >}} running in iptables mode or IPVS mode - * Enable [Endpoint Slices](/docs/concepts/services-networking/endpoint-slices/) + * Kubernetes v1.17 or later + * Configure {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}} to run in iptables mode or IPVS mode + + + ## Enable Service Topology -{{< feature-state for_k8s_version="v1.17" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} -To enable service topology, enable the `ServiceTopology` and `EndpointSlice` feature gate for all Kubernetes components: +To enable service topology, enable the `ServiceTopology` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for all Kubernetes components: ``` ---feature-gates="ServiceTopology=true,EndpointSlice=true" +--feature-gates="ServiceTopology=true` ``` - ## {{% heading "whatsnext" %}} - -* Read about the [Service Topology](/docs/concepts/services-networking/service-topology) concept -* Read about [Endpoint Slices](/docs/concepts/services-networking/endpoint-slices) +* Read about [Topology Aware Hints](/docs/concepts/services-networking/topology-aware-hints/), the replacement for the `topologyKeys` field. +* Read about [EndpointSlices](/docs/concepts/services-networking/endpoint-slices/) +* Read about the [Service Topology](/docs/concepts/services-networking/service-topology/) concept * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/en/docs/tasks/administer-cluster/enabling-topology-aware-hints.md b/content/en/docs/tasks/administer-cluster/enabling-topology-aware-hints.md new file mode 100644 index 0000000000000..dadc653f4e196 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/enabling-topology-aware-hints.md @@ -0,0 +1,40 @@ +--- +reviewers: +- robscott +title: Enabling Topology Aware Hints +content_type: task +min-kubernetes-server-version: 1.21 +--- + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +_Topology Aware Hints_ enable topology aware routing with topology hints +included in {{< glossary_tooltip text="EndpointSlices" term_id="endpoint-slice" >}}. +This approach tries to keep traffic close to where it originated from; +you might do this to reduce costs, or to improve network performance. + +## {{% heading "prerequisites" %}} + + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +The following prerequisite is needed in order to enable topology aware hints: + +* Configure the {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}} to run in + iptables mode or IPVS mode +* Ensure that you have not disabled EndpointSlices + +## Enable Topology Aware Hints + +To enable service topology hints, enable the `TopologyAwareHints` [feature +gate](/docs/reference/command-line-tools-reference/feature-gates/) for the +kube-apiserver, kube-controller-manager, and kube-proxy: + +``` +--feature-gates="TopologyAwareHints=true" +``` + +## {{% heading "whatsnext" %}} + +* Read about [Topology Aware Hints](/docs/concepts/services-networking/topology-aware-hints) for Services +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/en/docs/tasks/administer-cluster/extended-resource-node.md b/content/en/docs/tasks/administer-cluster/extended-resource-node.md index a95a325d5d774..797993f116f67 100644 --- a/content/en/docs/tasks/administer-cluster/extended-resource-node.md +++ b/content/en/docs/tasks/administer-cluster/extended-resource-node.md @@ -54,7 +54,7 @@ Host: k8s-master:8080 ``` Note that Kubernetes does not need to know what a dongle is or what a dongle is for. -The preceding PATCH request just tells Kubernetes that your Node has four things that +The preceding PATCH request tells Kubernetes that your Node has four things that you call dongles. Start a proxy, so that you can easily send requests to the Kubernetes API server: diff --git a/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md b/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md index 0d5b6d4ebe93a..a9aaaacd46adc 100644 --- a/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md +++ b/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md @@ -9,24 +9,17 @@ content_type: concept -In addition to Kubernetes core components like api-server, scheduler, controller-manager running on a master machine -there are a number of add-ons which, for various reasons, must run on a regular cluster node (rather than the Kubernetes master). +Kubernetes core components such as the API server, scheduler, and controller-manager run on a control plane node. However, add-ons must run on a regular cluster node. Some of these add-ons are critical to a fully functional cluster, such as metrics-server, DNS, and UI. A cluster may stop working properly if a critical add-on is evicted (either manually or as a side effect of another operation like upgrade) and becomes pending (for example when the cluster is highly utilized and either there are other pending pods that schedule into the space vacated by the evicted critical add-on pod or the amount of resources available on the node changed for some other reason). Note that marking a pod as critical is not meant to prevent evictions entirely; it only prevents the pod from becoming permanently unavailable. -For static pods, this means it can't be evicted, but for non-static pods, it just means they will always be rescheduled. - - - +A static pod marked as critical, can't be evicted. However, a non-static pods marked as critical are always rescheduled. - ### Marking pod as critical To mark a Pod as critical, set priorityClassName for that Pod to `system-cluster-critical` or `system-node-critical`. `system-node-critical` is the highest available priority, even higher than `system-cluster-critical`. - - diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index 3897d6566ea7d..aad5f139091c8 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -1,7 +1,9 @@ --- reviewers: -- michmike -- patricklang +- jayunit100 +- jsturtevant +- marosset +- perithompson title: Adding Windows nodes min-kubernetes-server-version: 1.17 content_type: tutorial @@ -72,7 +74,7 @@ Once you have a Linux-based Kubernetes control-plane node you are ready to choos "Network": "10.244.0.0/16", "Backend": { "Type": "vxlan", - "VNI" : 4096, + "VNI": 4096, "Port": 4789 } } diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md b/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md new file mode 100644 index 0000000000000..9ed45bd07f1db --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md @@ -0,0 +1,127 @@ +--- +title: Configuring a cgroup driver +content_type: task +weight: 10 +--- + + + +This page explains how to configure the kubelet cgroup driver to match the container +runtime cgroup driver for kubeadm clusters. + +## {{% heading "prerequisites" %}} + +You should be familiar with the Kubernetes +[container runtime requirements](/docs/setup/production-environment/container-runtimes). + + + +## Configuring the container runtime cgroup driver + +The [Container runtimes](/docs/setup/production-environment/container-runtimes) page +explains that the `systemd` driver is recommended for kubeadm based setups instead +of the `cgroupfs` driver, because kubeadm manages the kubelet as a systemd service. + +The page also provides details on how to setup a number of different container runtimes with the +`systemd` driver by default. + +## Configuring the kubelet cgroup driver + +kubeadm allows you to pass a `KubeletConfiguration` structure during `kubeadm init`. +This `KubeletConfiguration` can include the `cgroupDriver` field which controls the cgroup +driver of the kubelet. + +{{< note >}} + +{{< feature-state for_k8s_version="v1.21" state="stable" >}} + +If the user is not setting the `cgroupDriver` field under `KubeletConfiguration`, +`kubeadm init` will default it to `systemd`. +{{< /note >}} + +A minimal example of configuring the field explicitly: + +```yaml +# kubeadm-config.yaml +kind: ClusterConfiguration +apiVersion: kubeadm.k8s.io/v1beta2 +kubernetesVersion: v1.21.0 +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: systemd +``` + +Such a configuration file can then be passed to the kubeadm command: + +```shell +kubeadm init --config kubeadm-config.yaml +``` + +{{< note >}} +Kubeadm uses the same `KubeletConfiguration` for all nodes in the cluster. +The `KubeletConfiguration` is stored in a [ConfigMap](/docs/concepts/configuration/configmap) +object under the `kube-system` namespace. + +Executing the sub commands `init`, `join` and `upgrade` would result in kubeadm +writing the `KubeletConfiguration` as a file under `/var/lib/kubelet/config.yaml` +and passing it to the local node kubelet. +{{< /note >}} + +## Using the `cgroupfs` driver + +As this guide explains using the `cgroupfs` driver with kubeadm is not recommended. + +To continue using `cgroupfs` and to prevent `kubeadm upgrade` from modifying the +`KubeletConfiguration` cgroup driver on existing setups, you must be explicit +about its value. This applies to a case where you do not wish future versions +of kubeadm to apply the `systemd` driver by default. + +See the below section on "Modify the kubelet ConfigMap" for details on +how to be explicit about the value. + +If you wish to configure a container runtime to use the `cgroupfs` driver, +you must refer to the documentation of the container runtime of your choice. + +## Migrating to the `systemd` driver + +To change the cgroup driver of an existing kubeadm cluster to `systemd` in-place, +a similar procedure to a kubelet upgrade is required. This must include both +steps outlined below. + +{{< note >}} +Alternatively, it is possible to replace the old nodes in the cluster with new ones +that use the `systemd` driver. This requires executing only the first step below +before joining the new nodes and ensuring the workloads can safely move to the new +nodes before deleting the old nodes. +{{< /note >}} + +### Modify the kubelet ConfigMap + +- Find the kubelet ConfigMap name using `kubectl get cm -n kube-system | grep kubelet-config`. +- Call `kubectl edit cm kubelet-config-x.yy -n kube-system` (replace `x.yy` with +the Kubernetes version). +- Either modify the existing `cgroupDriver` value or add a new field that looks like this: + + ```yaml + cgroupDriver: systemd + ``` + This field must be present under the `kubelet:` section of the ConfigMap. + +### Update the cgroup driver on all nodes + +For each node in the cluster: + +- [Drain the node](/docs/tasks/administer-cluster/safely-drain-node) using `kubectl drain --ignore-daemonsets` +- Stop the kubelet using `systemctl stop kubelet` +- Stop the container runtime +- Modify the container runtime cgroup driver to `systemd` +- Set `cgroupDriver: systemd` in `/var/lib/kubelet/config.yaml` +- Start the container runtime +- Start the kubelet using `systemctl start kubelet` +- [Uncordon the node](/docs/tasks/administer-cluster/safely-drain-node) using `kubectl uncordon ` + +Execute these steps on nodes one at a time to ensure workloads +have sufficient time to schedule on different nodes. + +Once the process is complete ensure that all nodes and workloads are healthy. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index d9d8a5929ea9c..62e66d1a8f5ec 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -35,7 +35,7 @@ and kubeadm will use this CA for signing the rest of the certificates. ## External CA mode {#external-ca-mode} -It is also possible to provide just the `ca.crt` file and not the +It is also possible to provide only the `ca.crt` file and not the `ca.key` file (this is only available for the root CA file, not other cert pairs). If all other certificates and kubeconfig files are in place, kubeadm recognizes this condition and activates the "External CA" mode. kubeadm will proceed without the @@ -170,36 +170,7 @@ controllerManager: ### Create certificate signing requests (CSR) -You can create the certificate signing requests for the Kubernetes certificates API with `kubeadm certs renew --use-api`. - -If you set up an external signer such as [cert-manager](https://github.com/jetstack/cert-manager), certificate signing requests (CSRs) are automatically approved. -Otherwise, you must manually approve certificates with the [`kubectl certificate`](/docs/setup/best-practices/certificates/) command. -The following kubeadm command outputs the name of the certificate to approve, then blocks and waits for approval to occur: - -```shell -sudo kubeadm certs renew apiserver --use-api & -``` -The output is similar to this: -``` -[1] 2890 -[certs] certificate request "kubeadm-cert-kube-apiserver-ld526" created -``` - -### Approve certificate signing requests (CSR) - -If you set up an external signer, certificate signing requests (CSRs) are automatically approved. - -Otherwise, you must manually approve certificates with the [`kubectl certificate`](/docs/setup/best-practices/certificates/) command. e.g. - -```shell -kubectl certificate approve kubeadm-cert-kube-apiserver-ld526 -``` -The output is similar to this: -```shell -certificatesigningrequest.certificates.k8s.io/kubeadm-cert-kube-apiserver-ld526 approved -``` - -You can view a list of pending certificates with `kubectl get csr`. +See [Create CertificateSigningRequest](/docs/reference/access-authn-authz/certificate-signing-requests/#create-certificatesigningrequest) for creating CSRs with the Kubernetes API. ## Renew certificates with external CA @@ -236,3 +207,71 @@ After a certificate is signed using your preferred method, the certificate and t Kubeadm does not support rotation or replacement of CA certificates out of the box. For more information about manual rotation or replacement of CA, see [manual rotation of CA certificates](/docs/tasks/tls/manual-rotation-of-ca-certificates/). + +## Enabling signed kubelet serving certificates {#kubelet-serving-certs} + +By default the kubelet serving certificate deployed by kubeadm is self-signed. +This means a connection from external services like the +[metrics-server](https://github.com/kubernetes-sigs/metrics-server) to a +kubelet cannot be secured with TLS. + +To configure the kubelets in a new kubeadm cluster to obtain properly signed serving +certificates you must pass the following minimal configuration to `kubeadm init`: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serverTLSBootstrap: true +``` + +If you have already created the cluster you must adapt it by doing the following: + - Find and edit the `kubelet-config-{{< skew latestVersion >}}` ConfigMap in the `kube-system` namespace. +In that ConfigMap, the `config` key has a +[KubeletConfiguration](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +document as its value. Edit the KubeletConfiguration document to set `serverTLSBootstrap: true`. +- On each node, add the `serverTLSBootstrap: true` field in `/var/lib/kubelet/config.yaml` +and restart the kubelet with `systemctl restart kubelet` + +The field `serverTLSBootstrap: true` will enable the bootstrap of kubelet serving +certificates by requesting them from the `certificates.k8s.io` API. One known limitation +is that the CSRs (Certificate Signing Requests) for these certificates cannot be automatically +approved by the default signer in the kube-controller-manager - +[`kubernetes.io/kubelet-serving`](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers). +This will require action from the user or a third party controller. + +These CSRs can be viewed using: + +```shell +kubectl get csr +NAME AGE SIGNERNAME REQUESTOR CONDITION +csr-9wvgt 112s kubernetes.io/kubelet-serving system:node:worker-1 Pending +csr-lz97v 1m58s kubernetes.io/kubelet-serving system:node:control-plane-1 Pending +``` + +To approve them you can do the following: +```shell +kubectl certificate approve +``` + +By default, these serving certificate will expire after one year. Kubeadm sets the +`KubeletConfiguration` field `rotateCertificates` to `true`, which means that close +to expiration a new set of CSRs for the serving certificates will be created and must +be approved to complete the rotation. To understand more see +[Certificate Rotation](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#certificate-rotation). + +If you are looking for a solution for automatic approval of these CSRs it is recommended +that you contact your cloud provider and ask if they have a CSR signer that verifies +the node identity with an out of band mechanism. + +{{% thirdparty-content %}} + +Third party custom controllers can be used: +- [kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp) + +Such a controller is not a secure mechanism unless it not only verifies the CommonName +in the CSR but also verifies the requested IPs and domain names. This would prevent +a malicious actor that has access to a kubelet client certificate to create +CSRs requesting serving certificates for any IP or domain name. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index 5af9d27b82488..da07ed672e14d 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -37,7 +37,7 @@ The upgrade workflow at high level is the following: ### Additional information -- [Draining nodes](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) before kubelet MINOR version +- [Draining nodes](/docs/tasks/administer-cluster/safely-drain-node/) before kubelet MINOR version upgrades is required. In the case of control plane nodes, they could be running CoreDNS Pods or other critical workloads. - All containers are restarted after upgrade, because the container spec hash value is changed. @@ -328,7 +328,7 @@ and post-upgrade manifest file for a certain component, a backup file for it wil - Makes sure the control plane images are available or available to pull to the machine. - Generates replacements and/or uses user supplied overwrites if component configs require version upgrades. - Upgrades the control plane components or rollbacks if any of them fails to come up. -- Applies the new `kube-dns` and `kube-proxy` manifests and makes sure that all necessary RBAC rules are created. +- Applies the new `CoreDNS` and `kube-proxy` manifests and makes sure that all necessary RBAC rules are created. - Creates new certificate and key files of the API server and backs up old files if they're about to expire in 180 days. `kubeadm upgrade node` does the following on additional control plane nodes: diff --git a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md index 54cd8373705b2..b49c84220a9c6 100644 --- a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md +++ b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md @@ -7,31 +7,21 @@ content_type: task --- -{{< feature-state for_k8s_version="v1.10" state="beta" >}} A subset of the Kubelet's configuration parameters may be set via an on-disk config file, as a substitute for command-line flags. -This functionality is considered beta in v1.10. Providing parameters via a config file is the recommended approach because it simplifies node deployment and configuration management. - - -## {{% heading "prerequisites" %}} - - -- A v1.10 or higher Kubelet binary must be installed for beta functionality. - - - ## Create the config file The subset of the Kubelet's configuration that can be configured via a file -is defined by the `KubeletConfiguration` struct -[here (v1beta1)](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go). +is defined by the +[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/) +struct. The configuration file must be a JSON or YAML representation of the parameters in this struct. Make sure the Kubelet has read permissions on the file. @@ -53,6 +43,11 @@ For a trick to generate a configuration file from a live node, see ## Start a Kubelet process configured via the config file +{{< note >}} +If you use kubeadm to initialize your cluster, use the kubelet-config while creating your cluster with `kubeadmin init`. +See [configuring kubelet using kubeadm](/docs/setup/production-environment/tools/kubeadm/kubelet-integration/) for details. +{{< /note >}} + Start the Kubelet with the `--config` flag set to the path of the Kubelet's config file. The Kubelet will then load its config from this file. @@ -68,8 +63,6 @@ If `--config` is provided and the values are not specified via the command line, defaults for the `KubeletConfiguration` version apply. In the above example, this version is `kubelet.config.k8s.io/v1beta1`. - - ## Relationship to Dynamic Kubelet Config @@ -78,6 +71,9 @@ If you are using the [Dynamic Kubelet Configuration](/docs/tasks/administer-clus feature, the combination of configuration provided via `--config` and any flags which override these values is considered the default "last known good" configuration by the automatic rollback mechanism. +## {{% heading "whatsnext" %}} - +- Learn more about kubelet configuration by checking the + [`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/) + reference. diff --git a/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md b/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md index 89f130a010244..c982a9cb7cc40 100644 --- a/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md +++ b/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md @@ -5,7 +5,7 @@ content_type: task -This example demonstrates an easy way to limit the amount of storage consumed in a namespace. +This example demonstrates how to limit the amount of storage consumed in a namespace. The following resources are used in the demonstration: [ResourceQuota](/docs/concepts/policy/resource-quotas/), [LimitRange](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/), diff --git a/content/en/docs/tasks/administer-cluster/memory-manager.md b/content/en/docs/tasks/administer-cluster/memory-manager.md new file mode 100644 index 0000000000000..60f2ded206325 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/memory-manager.md @@ -0,0 +1,380 @@ +--- +title: Memory Manager + +reviewers: +- klueska +- derekwaynecarr + +content_type: task +min-kubernetes-server-version: v1.21 +--- + + + +{{< feature-state state="alpha" for_k8s_version="v1.21" >}} + +The Kubernetes *Memory Manager* enables the feature of guaranteed memory (and hugepages) allocation for pods in the `Guaranteed` {{< glossary_tooltip text="QoS class" term_id="qos-class" >}}. + +The Memory Manager employs hint generation protocol to yield the most suitable NUMA affinity for a pod. The Memory Manager feeds the central manager (*Topology Manager*) with these affinity hints. Based on both the hints and Topology Manager policy, the pod is rejected or admitted to the node. + +Moreover, the Memory Manager ensures that the memory which a pod requests is allocated from a minimum number of NUMA nodes. + +The Memory Manager is only pertinent to Linux based hosts. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +To align memory resources with other requested resources in a Pod Spec: +- the CPU Manager should be enabled and proper CPU Manager policy should be configured on a Node. See [control CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/); +- the Topology Manager should be enabled and proper Topology Manager policy should be configured on a Node. See [control Topology Management Policies](/docs/tasks/administer-cluster/topology-manager/). + +Support for the Memory Manager requires `MemoryManager` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. + +That is, the `kubelet` must be started with the following flag: + +`--feature-gates=MemoryManager=true` + +## How Memory Manager Operates? + +The Memory Manager currently offers the guaranteed memory (and hugepages) allocation for Pods in Guaranteed QoS class. To immediately put the Memory Manager into operation follow the guidelines in the section [Memory Manager configuration](#memory-manager-configuration), and subsequently, prepare and deploy a `Guaranteed` pod as illustrated in the section [Placing a Pod in the Guaranteed QoS class](#placing-a-pod-in-the-guaranteed-qos-class). + +The Memory Manager is a Hint Provider, and it provides topology hints for the Topology Manager which then aligns the requested resources according to these topology hints. It also enforces `cgroups` (i.e. `cpuset.mems`) for pods. The complete flow diagram concerning pod admission and deployment process is illustrated in [Memory Manager KEP: Design Overview][4] and below: + +![Memory Manager in the pod admission and deployment process](/images/docs/memory-manager-diagram.svg) + +During this process, the Memory Manager updates its internal counters stored in [Node Map and Memory Maps][2] to manage guaranteed memory allocation. + +The Memory Manager updates the Node Map during the startup and runtime as follows. + +### Startup + +This occurs once a node administrator employs `--reserved-memory` (section [Reserved memory flag](#reserved-memory-flag)). In this case, the Node Map becomes updated to reflect this reservation as illustrated in [Memory Manager KEP: Memory Maps at start-up (with examples)][5]. + +The administrator must provide `--reserved-memory` flag when `static` policy is configured. + +### Runtime + +Reference [Memory Manager KEP: Memory Maps at runtime (with examples)][6] illustrates how a successful pod deployment affects the Node Map, and it also relates to how potential Out-of-Memory (OOM) situations are handled further by Kubernetes or operating system. + +Important topic in the context of Memory Manager operation is the management of NUMA groups. Each time pod's memory request is in excess of single NUMA node capacity, the Memory Manager attempts to create a group that comprises several NUMA nodes and features extend memory capacity. The problem has been solved as elaborated in [Memory Manager KEP: How to enable the guaranteed memory allocation over many NUMA nodes?][3]. Also, reference [Memory Manager KEP: Simulation - how the Memory Manager works? (by examples)][1] illustrates how the management of groups occurs. + +## Memory Manager configuration + +Other Managers should be first pre-configured (section [Pre-configuration](#pre-configuration)). Next, the Memory Manger feature should be enabled (section [Enable the Memory Manager feature](#enable-the-memory-manager-feature)) and be run with `static` policy (section [static policy](#static-policy)). Optionally, some amount of memory can be reserved for system or kubelet processes to increase node stability (section [Reserved memory flag](#reserved-memory-flag)). + +### Policies + +Memory Manager supports two policies. You can select a policy via a `kubelet` flag `--memory-manager-policy`. + +Two policies can be selected: + +* `none` (default) +* `static` + +#### none policy {#policy-none} + +This is the default policy and does not affect the memory allocation in any way. +It acts the same as if the Memory Manager is not present at all. + +The `none` policy returns default topology hint. This special hint denotes that Hint Provider (Memory Manger in this case) has no preference for NUMA affinity with any resource. + +#### static policy {#policy-static} + +In the case of the `Guaranteed` pod, the `static` Memory Manger policy returns topology hints relating to the set of NUMA nodes where the memory can be guaranteed, and reserves the memory through updating the internal [NodeMap][2] object. + +In the case of the `BestEffort` or `Burstable` pod, the `static` Memory Manager policy sends back the default topology hint as there is no request for the guaranteed memory, and does not reserve the memory in the internal [NodeMap][2] object. + +### Reserved memory flag + +The [Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/) mechanism is commonly used by node administrators to reserve K8S node system resources for the kubelet or operating system processes in order to enhance the node stability. A dedicated set of flags can be used for this purpose to set the total amount of reserved memory for a node. This pre-configured value is subsequently utilized to calculate the real amount of node's "allocatable" memory available to pods. + +The Kubernetes scheduler incorporates "allocatable" to optimise pod scheduling process. The foregoing flags include `--kube-reserved`, `--system-reserved` and `--eviction-threshold`. The sum of their values will account for the total amount of reserved memory. + + +A new `--reserved-memory` flag was added to Memory Manager to allow for this total reserved memory to be split (by a node administrator) and accordingly reserved across many NUMA nodes. + +The flag specifies a comma-separated list of memory reservations per NUMA node. +This parameter is only useful in the context of the Memory Manager feature. +The Memory Manager will not use this reserved memory for the allocation of container workloads. + +For example, if you have a NUMA node "NUMA0" with `10Gi` of memory available, and the `--reserved-memory` was specified to reserve `1Gi` of memory at "NUMA0", the Memory Manager assumes that only `9Gi` is available for containers. + +You can omit this parameter, however, you should be aware that the quantity of reserved memory from all NUMA nodes should be equal to the quantity of memory specified by the [Node Allocatable feature](/docs/tasks/administer-cluster/reserve-compute-resources/). If at least one node allocatable parameter is non-zero, you will need to specify `--reserved-memory` for at least one NUMA node. In fact, `eviction-hard` threshold value is equal to `100Mi` by default, so if `static` policy is used, `--reserved-memory` is obligatory. + +Also, avoid the following configurations: +1. duplicates, i.e. the same NUMA node or memory type, but with a different value; +2. setting zero limit for any of memory types; +3. NUMA node IDs that do not exist in the machine hardware; +4. memory type names different than `memory` or `hugepages-` (hugepages of particular `` should also exist). + +Syntax: + +`--reserved-memory N:memory-type1=value1,memory-type2=value2,...` +* `N` (integer) - NUMA node index, e.g. `0` +* `memory-type` (string) - represents memory type: + * `memory` - conventional memory + * `hugepages-2Mi` or `hugepages-1Gi` - hugepages +* `value` (string) - the quantity of reserved memory, e.g. `1Gi` + +Example usage: + +`--reserved-memory 0:memory=1Gi,hugepages-1Gi=2Gi` + +or + +`--reserved-memory 0:memory=1Gi --reserved-memory 1:memory=2Gi` + +When you specify values for `--reserved-memory` flag, you must comply with the setting that you prior provided via Node Allocatable Feature flags. That is, the following rule must be obeyed for each memory type: + +`sum(reserved-memory(i)) = kube-reserved + system-reserved + eviction-threshold`, + +where `i` is an index of a NUMA node. + +If you do not follow the formula above, the Memory Manager will show an error on startup. + +In other words, the example above illustrates that for the conventional memory (`type=memory`), we reserve `3Gi` in total, i.e.: + +`sum(reserved-memory(i)) = reserved-memory(0) + reserved-memory(1) = 1Gi + 2Gi = 3Gi` + +An example of kubelet command-line arguments relevant to the node Allocatable configuration: +* `--kube-reserved=cpu=500m,memory=50Mi` +* `--system-reserved=cpu=123m,memory=333Mi` +* `--eviction-hard=memory.available<500Mi` + +{{< note >}} +The default hard eviction threshold is 100MiB, and **not** zero. Remember to increase the quantity of memory that you reserve by setting `--reserved-memory` by that hard eviction threshold. Otherwise, the kubelet will not start Memory Manager and display an error. +{{< /note >}} + +Here is an example of a correct configuration: + +```shell +--feature-gates=MemoryManager=true +--kube-reserved=cpu=4,memory=4Gi +--system-reserved=cpu=1,memory=1Gi +--memory-manager-policy=static +--reserved-memory 0:memory=3Gi --reserved-memory 1:memory=2148Mi +``` +Let us validate the configuration above: +1. `kube-reserved + system-reserved + eviction-hard(default) = reserved-memory(0) + reserved-memory(1)` +2. `4GiB + 1GiB + 100MiB = 3GiB + 2148MiB` +3. `5120MiB + 100MiB = 3072MiB + 2148MiB` +4. `5220MiB = 5220MiB` (which is correct) + +## Placing a Pod in the Guaranteed QoS class + +If the selected policy is anything other than `none`, the Memory Manager identifies pods that are in the `Guaranteed` QoS class. The Memory Manager provides specific topology hints to the Topology Manager for each `Guaranteed` pod. For pods in a QoS class other than `Guaranteed`, the Memory Manager provides default topology hints to the Topology Manager. + +The following excerpts from pod manifests assign a pod to the `Guaranteed` QoS class. + +Pod with integer CPU(s) runs in the `Guaranteed` QoS class, when `requests` are equal to `limits`: + +```yaml +spec: + containers: + - name: nginx + image: nginx + resources: + limits: + memory: "200Mi" + cpu: "2" + example.com/device: "1" + requests: + memory: "200Mi" + cpu: "2" + example.com/device: "1" +``` + +Also, a pod sharing CPU(s) runs in the `Guaranteed` QoS class, when `requests` are equal to `limits`. + +```yaml +spec: + containers: + - name: nginx + image: nginx + resources: + limits: + memory: "200Mi" + cpu: "300m" + example.com/device: "1" + requests: + memory: "200Mi" + cpu: "300m" + example.com/device: "1" +``` + +Notice that both CPU and memory requests must be specified for a Pod to lend it to Guaranteed QoS class. + +## Troubleshooting + +The following means can be used to troubleshoot the reason why a pod could not be deployed or became rejected at a node: +- pod status - indicates topology affinity errors +- system logs - include valuable information for debugging, e.g., about generated hints +- state file - the dump of internal state of the Memory Manager (includes [Node Map and Memory Maps][2]) + +### Pod status (TopologyAffinityError) {#TopologyAffinityError} + +This error typically occurs in the following situations: +* a node has not enough resources available to satisfy the pod's request +* the pod's request is rejected due to particular Topology Manager policy constraints + +The error appears in the status of a pod: +```shell +# kubectl get pods +NAME READY STATUS RESTARTS AGE +guaranteed 0/1 TopologyAffinityError 0 113s +``` + +Use `kubectl describe pod ` or `kubectl get events` to obtain detailed error message: +```shell +Warning TopologyAffinityError 10m kubelet, dell8 Resources cannot be allocated with Topology locality +``` + +### System logs + +Search system logs with respect to a particular pod. + +The set of hints that Memory Manager generated for the pod can be found in the logs. +Also, the set of hints generated by CPU Manager should be present in the logs. + +Topology Manager merges these hints to calculate a single best hint. +The best hint should be also present in the logs. + +The best hint indicates where to allocate all the resources. Topology Manager tests this hint against its current policy, and based on the verdict, it either admits the pod to the node or rejects it. + +Also, search the logs for occurrences associated with the Memory Manager, e.g. to find out information about `cgroups` and `cpuset.mems` updates. + +### Examine the memory manager state on a node + +Let us first deploy a sample `Guaranteed` pod whose specification is as follows: +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: guaranteed +spec: + containers: + - name: guaranteed + image: consumer + imagePullPolicy: Never + resources: + limits: + cpu: "2" + memory: 150Gi + requests: + cpu: "2" + memory: 150Gi + command: ["sleep","infinity"] +``` + +Next, let us log into the node where it was deployed and examine the state file in `/var/lib/kubelet/memory_manager_state`: +```json +{ + "policyName":"static", + "machineState":{ + "0":{ + "numberOfAssignments":1, + "memoryMap":{ + "hugepages-1Gi":{ + "total":0, + "systemReserved":0, + "allocatable":0, + "reserved":0, + "free":0 + }, + "memory":{ + "total":134987354112, + "systemReserved":3221225472, + "allocatable":131766128640, + "reserved":131766128640, + "free":0 + } + }, + "nodes":[ + 0, + 1 + ] + }, + "1":{ + "numberOfAssignments":1, + "memoryMap":{ + "hugepages-1Gi":{ + "total":0, + "systemReserved":0, + "allocatable":0, + "reserved":0, + "free":0 + }, + "memory":{ + "total":135286722560, + "systemReserved":2252341248, + "allocatable":133034381312, + "reserved":29295144960, + "free":103739236352 + } + }, + "nodes":[ + 0, + 1 + ] + } + }, + "entries":{ + "fa9bdd38-6df9-4cf9-aa67-8c4814da37a8":{ + "guaranteed":[ + { + "numaAffinity":[ + 0, + 1 + ], + "type":"memory", + "size":161061273600 + } + ] + } + }, + "checksum":4142013182 +} +``` + +It can be deduced from the state file that the pod was pinned to both NUMA nodes, i.e.: + +```json +"numaAffinity":[ + 0, + 1 +], +``` + +Pinned term means that pod's memory consumption is constrained (through `cgroups` configuration) to these NUMA nodes. + +This automatically implies that Memory Manager instantiated a new group that comprises these two NUMA nodes, i.e. `0` and `1` indexed NUMA nodes. + +Notice that the management of groups is handled in a relatively complex manner, and further elaboration is provided in Memory Manager KEP in [this][1] and [this][3] sections. + +In order to analyse memory resources available in a group, the corresponding entries from NUMA nodes belonging to the group must be added up. + +For example, the total amount of free "conventional" memory in the group can be computed by adding up the free memory available at every NUMA node in the group, i.e., in the `"memory"` section of NUMA node `0` (`"free":0`) and NUMA node `1` (`"free":103739236352`). So, the total amount of free "conventional" memory in this group is equal to `0 + 103739236352` bytes. + +The line `"systemReserved":3221225472` indicates that the administrator of this node reserved `3221225472` bytes (i.e. `3Gi`) to serve kubelet and system processes at NUMA node `0`, by using `--reserved-memory` flag. + +## {{% heading "whatsnext" %}} + +- [Memory Manager KEP: Design Overview][4] + +- [Memory Manager KEP: Memory Maps at start-up (with examples)][5] + +- [Memory Manager KEP: Memory Maps at runtime (with examples)][6] + +- [Memory Manager KEP: Simulation - how the Memory Manager works? (by examples)][1] + +- [Memory Manager KEP: The Concept of Node Map and Memory Maps][2] + +- [Memory Manager KEP: How to enable the guaranteed memory allocation over many NUMA nodes?][3] + +[1]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1769-memory-manager#simulation---how-the-memory-manager-works-by-examples +[2]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1769-memory-manager#the-concept-of-node-map-and-memory-maps +[3]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1769-memory-manager#how-to-enable-the-guaranteed-memory-allocation-over-many-numa-nodes +[4]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1769-memory-manager#design-overview +[5]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1769-memory-manager#memory-maps-at-start-up-with-examples +[6]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1769-memory-manager#memory-maps-at-runtime-with-examples diff --git a/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-deprecation-affects-you.md b/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-deprecation-affects-you.md index 766db38485062..fe9fd8b0c4708 100644 --- a/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-deprecation-affects-you.md +++ b/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-deprecation-affects-you.md @@ -50,7 +50,7 @@ and scheduling of Pods; on each node, the {{< glossary_tooltip text="kubelet" te uses the container runtime interface as an abstraction so that you can use any compatible container runtime. -In its earliest releases, Kubernetes offered compatibility with just one container runtime: Docker. +In its earliest releases, Kubernetes offered compatibility with one container runtime: Docker. Later in the Kubernetes project's history, cluster operators wanted to adopt additional container runtimes. The CRI was designed to allow this kind of flexibility - and the kubelet began supporting CRI. However, because Docker existed before the CRI specification was invented, the Kubernetes project created an @@ -75,7 +75,7 @@ or execute something inside container using `docker exec`. If you're running workloads via Kubernetes, the best way to stop a container is through the Kubernetes API rather than directly through the container runtime (this advice applies -for all container runtimes, not just Docker). +for all container runtimes, not only Docker). {{< /note >}} diff --git a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md index 1d1461ade780b..5d99875527aba 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md +++ b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md @@ -232,7 +232,7 @@ Apply the manifest to create a Deployment ```shell kubectl apply -f https://k8s.io/examples/admin/snowflake-deployment.yaml ``` -We have just created a deployment whose replica size is 2 that is running the pod called `snowflake` with a basic container that just serves the hostname. +We have created a deployment whose replica size is 2 that is running the pod called `snowflake` with a basic container that serves the hostname. ```shell kubectl get deployment diff --git a/content/en/docs/tasks/administer-cluster/namespaces.md b/content/en/docs/tasks/administer-cluster/namespaces.md index 08b2868806f48..2934e1c0f78d9 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces.md +++ b/content/en/docs/tasks/administer-cluster/namespaces.md @@ -196,7 +196,7 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te ```shell kubectl create deployment snowflake --image=k8s.gcr.io/serve_hostname -n=development --replicas=2 ``` - We have just created a deployment whose replica size is 2 that is running the pod called `snowflake` with a basic container that just serves the hostname. + We have created a deployment whose replica size is 2 that is running the pod called `snowflake` with a basic container that serves the hostname. ```shell kubectl get deployment -n=development @@ -302,7 +302,7 @@ Use cases include: When you create a [Service](/docs/concepts/services-networking/service/), it creates a corresponding [DNS entry](/docs/concepts/services-networking/dns-pod-service/). This entry is of the form `..svc.cluster.local`, which means -that if a container just uses `` it will resolve to the service which +that if a container uses `` it will resolve to the service which is local to a namespace. This is useful for using the same configuration across multiple namespaces such as Development, Staging and Production. If you want to reach across namespaces, you need to use the fully qualified domain name (FQDN). diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy.md new file mode 100644 index 0000000000000..36da1839e9950 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy.md @@ -0,0 +1,24 @@ +--- +title: Use Antrea for NetworkPolicy +content_type: task +weight: 10 +--- + + +This page shows how to install and use Antrea CNI plugin on Kubernetes. +For background on Project Antrea, read the [Introduction to Antrea](https://antrea.io/docs/). + +## {{% heading "prerequisites" %}} + +You need to have a Kubernetes cluster. Follow the +[kubeadm getting started guide](/docs/reference/setup-tools/kubeadm/) to bootstrap one. + + + +## Deploying Antrea with kubeadm + +Follow [Getting Started](https://github.com/vmware-tanzu/antrea/blob/main/docs/getting-started.md) guide to deploy Antrea for kubeadm. + +## {{% heading "whatsnext" %}} + +Once your cluster is running, you can follow the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) to try out Kubernetes NetworkPolicy. diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md index 9efdccfb6e242..40733c4c96810 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md @@ -20,7 +20,7 @@ Decide whether you want to deploy a [cloud](#creating-a-calico-cluster-with-goog **Prerequisite**: [gcloud](https://cloud.google.com/sdk/docs/quickstarts). -1. To launch a GKE cluster with Calico, just include the `--enable-network-policy` flag. +1. To launch a GKE cluster with Calico, include the `--enable-network-policy` flag. **Syntax** ```shell diff --git a/content/en/docs/tasks/administer-cluster/out-of-resource.md b/content/en/docs/tasks/administer-cluster/out-of-resource.md index c45a773c4564c..f750dd2585397 100644 --- a/content/en/docs/tasks/administer-cluster/out-of-resource.md +++ b/content/en/docs/tasks/administer-cluster/out-of-resource.md @@ -117,9 +117,10 @@ The `kubelet` has the following default hard eviction threshold: * `memory.available<100Mi` * `nodefs.available<10%` -* `nodefs.inodesFree<5%` * `imagefs.available<15%` +On a Linux node, the default value also includes `nodefs.inodesFree<5%`. + ### Eviction Monitoring Interval The `kubelet` evaluates eviction thresholds per its configured housekeeping interval. @@ -140,6 +141,7 @@ The following node conditions are defined that correspond to the specified evict |-------------------|---------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------| | `MemoryPressure` | `memory.available` | Available memory on the node has satisfied an eviction threshold | | `DiskPressure` | `nodefs.available`, `nodefs.inodesFree`, `imagefs.available`, or `imagefs.inodesFree` | Available disk space and inodes on either the node's root filesystem or image filesystem has satisfied an eviction threshold | +| `PIDPressure` | `pid.available` | Available processes identifiers on the (Linux) node has fallen below an eviction threshold | | The `kubelet` continues to report node status updates at the frequency specified by `--node-status-update-frequency` which defaults to `10s`. diff --git a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md index 7f56e4ec85500..4ec3b428d0b9e 100644 --- a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md +++ b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md @@ -22,8 +22,8 @@ but this is unsafe for some parameters. Before deciding to change a parameter dynamically, you need a strong understanding of how that change will affect your cluster's behavior. Always carefully test configuration changes on a small set of nodes before rolling them out cluster-wide. Advice on configuring specific -fields is available in the inline `KubeletConfiguration` -[type documentation](https://github.com/kubernetes/kubernetes/blob/release-1.11/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go). +fields is available in the inline +[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/). {{< /warning >}} @@ -55,7 +55,7 @@ For each node that you're reconfiguring, you must set the kubelet The basic workflow for configuring a kubelet in a live cluster is as follows: 1. Write a YAML or JSON configuration file containing the -kubelet's configuration. + kubelet's configuration. 2. Wrap this file in a ConfigMap and save it to the Kubernetes control plane. 3. Update the kubelet's corresponding Node object to use this ConfigMap. @@ -135,24 +135,24 @@ To follow the tasks as written, you need to have `jq` installed. You can adapt the steps if you prefer to extract the `kubeletconfig` subobject manually. {{< /note >}} -1. Choose a Node to reconfigure. In this example, the name of this Node is - referred to as `NODE_NAME`. -2. Start the kubectl proxy in the background using the following command: +1. Choose a Node to reconfigure. In this example, the name of this Node is + referred to as `NODE_NAME`. +2. Start the kubectl proxy in the background using the following command: - ```bash - kubectl proxy --port=8001 & - ``` -3. Run the following command to download and unpack the configuration from the - `configz` endpoint. The command is long, so be careful when copying and - pasting. **If you use zsh**, note that common zsh configurations add backslashes - to escape the opening and closing curly braces around the variable name in the URL. - For example: `${NODE_NAME}` will be rewritten as `$\{NODE_NAME\}` during the paste. - You must remove the backslashes before running the command, or the command will fail. + ```shell + kubectl proxy --port=8001 & + ``` +3. Run the following command to download and unpack the configuration from the + `configz` endpoint. The command is long, so be careful when copying and + pasting. **If you use zsh**, note that common zsh configurations add backslashes + to escape the opening and closing curly braces around the variable name in the URL. + For example: `${NODE_NAME}` will be rewritten as `$\{NODE_NAME\}` during the paste. + You must remove the backslashes before running the command, or the command will fail. - ```bash - NODE_NAME="the-name-of-the-node-you-are-reconfiguring"; curl -sSL "http://localhost:8001/api/v1/nodes/${NODE_NAME}/proxy/configz" | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubelet_configz_${NODE_NAME} - ``` + ```bash + NODE_NAME="the-name-of-the-node-you-are-reconfiguring"; curl -sSL "http://localhost:8001/api/v1/nodes/${NODE_NAME}/proxy/configz" | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubelet_configz_${NODE_NAME} + ``` {{< note >}} You need to manually add the `kind` and `apiVersion` to the downloaded @@ -312,8 +312,6 @@ empty, since all config sources have been reset to `nil`, which indicates that the local default config is `assigned`, `active`, and `lastKnownGood`, and no error is reported. - - ## `kubectl patch` example @@ -356,7 +354,7 @@ metadata and checkpoints. The structure of the kubelet's checkpointing directory | - ... ``` -## Understanding Node.Status.Config.Error messages {#understanding-node-config-status-errors} +## Understanding `Node.Status.Config.Error` messages {#understanding-node-config-status-errors} The following table describes error messages that can occur when using Dynamic Kubelet Config. You can search for the identical text @@ -378,6 +376,10 @@ internal failure, see Kubelet log for details | The kubelet encountered some int ## {{% heading "whatsnext" %}} - - For more information on configuring the kubelet via a configuration file, see +- For more information on configuring the kubelet via a configuration file, see [Set kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file). - See the reference documentation for [`NodeConfigSource`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodeconfigsource-v1-core) +- Learn more about kubelet configuration by checking the + [`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/) + reference. + diff --git a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md index b6249f50efa1a..a5661263f23e0 100644 --- a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -90,7 +90,7 @@ In addition to `cpu`, `memory`, and `ephemeral-storage`, `pid` may be specified to reserve the specified number of process IDs for kubernetes system daemons. -To optionally enforce `kube-reserved` on system daemons, specify the parent +To optionally enforce `kube-reserved` on kubernetes system daemons, specify the parent control group for kube daemons as the value for `--kube-reserved-cgroup` kubelet flag. diff --git a/content/en/docs/tasks/administer-cluster/safely-drain-node.md b/content/en/docs/tasks/administer-cluster/safely-drain-node.md index db31cceb8b2b3..0fc0a97ffcd1b 100644 --- a/content/en/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/en/docs/tasks/administer-cluster/safely-drain-node.md @@ -128,8 +128,8 @@ curl -v -H 'Content-type: application/json' https://your-cluster-api-endpoint.ex The API can respond in one of three ways: -- If the eviction is granted, then the Pod is deleted just as if you had sent - a `DELETE` request to the Pod's URL and you get back `200 OK`. +- If the eviction is granted, then the Pod is deleted as if you sent + a `DELETE` request to the Pod's URL and received back `200 OK`. - If the current state of affairs wouldn't allow an eviction by the rules set forth in the budget, you get back `429 Too Many Requests`. This is typically used for generic rate limiting of *any* requests, but here we mean diff --git a/content/en/docs/tasks/administer-cluster/sysctl-cluster.md b/content/en/docs/tasks/administer-cluster/sysctl-cluster.md index 60af560e306cb..f81623982ff5d 100644 --- a/content/en/docs/tasks/administer-cluster/sysctl-cluster.md +++ b/content/en/docs/tasks/administer-cluster/sysctl-cluster.md @@ -6,19 +6,20 @@ content_type: task --- -{{< feature-state for_k8s_version="v1.12" state="beta" >}} + +{{< feature-state for_k8s_version="v1.21" state="stable" >}} This document describes how to configure and use kernel parameters within a Kubernetes cluster using the {{< glossary_tooltip term_id="sysctl" >}} interface. - - ## {{% heading "prerequisites" %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +{{< include "task-tutorial-prereqs.md" >}} +For some steps, you also need to be able to reconfigure the command line +options for the kubelets running on your cluster. @@ -166,6 +167,8 @@ to schedule those pods onto the right nodes. ## PodSecurityPolicy +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} + You can further control which sysctls can be set in pods by specifying lists of sysctls or sysctl patterns in the `forbiddenSysctls` and/or `allowedUnsafeSysctls` fields of the PodSecurityPolicy. A sysctl pattern ends diff --git a/content/en/docs/tasks/administer-cluster/topology-manager.md b/content/en/docs/tasks/administer-cluster/topology-manager.md index 7d3f017940279..e8d2e7c19d6c9 100644 --- a/content/en/docs/tasks/administer-cluster/topology-manager.md +++ b/content/en/docs/tasks/administer-cluster/topology-manager.md @@ -69,6 +69,10 @@ Details on the various `scopes` and `policies` available today can be found belo To align CPU resources with other requested resources in a Pod Spec, the CPU Manager should be enabled and proper CPU Manager policy should be configured on a Node. See [control CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/). {{< /note >}} +{{< note >}} +To align memory (and hugepages) resources with other requested resources in a Pod Spec, the Memory Manager should be enabled and proper Memory Manager policy should be configured on a Node. Examine [Memory Manager](/docs/tasks/administer-cluster/memory-manager/) documentation. +{{< /note >}} + ### Topology Manager Scopes The Topology Manager can deal with the alignment of resources in a couple of distinct scopes: @@ -263,8 +267,4 @@ Using this information the Topology Manager calculates the optimal hint for the ### Known Limitations 1. The maximum number of NUMA nodes that Topology Manager allows is 8. With more than 8 NUMA nodes there will be a state explosion when trying to enumerate the possible NUMA affinities and generating their hints. -2. The scheduler is not topology-aware, so it is possible to be scheduled on a node and then fail on the node due to the Topology Manager. - -3. The Device Manager and the CPU Manager are the only components to adopt the Topology Manager's HintProvider interface. This means that NUMA alignment can only be achieved for resources managed by the CPU Manager and the Device Manager. Memory or Hugepages are not considered by the Topology Manager for NUMA alignment. - - +2. The scheduler is not topology-aware, so it is possible to be scheduled on a node and then fail on the node due to the Topology Manager. \ No newline at end of file diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md index 8ed9730415b55..b405d57baf0ce 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -184,10 +184,10 @@ Where `YWRtaW5pc3RyYXRvcg==` decodes to `administrator`. ## Clean Up -To delete the Secret you have just created: +To delete the Secret you have created: ```shell -kubectl delete secret db-user-pass +kubectl delete secret mysecret ``` ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md index 1e6d88ede481e..293915736eca4 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -115,8 +115,7 @@ accidentally to an onlooker, or from being stored in a terminal log. ## Decoding the Secret {#decoding-secret} -To view the contents of the Secret we just created, you can run the following -command: +To view the contents of the Secret you created, run the following command: ```shell kubectl get secret db-user-pass -o jsonpath='{.data}' @@ -125,10 +124,10 @@ kubectl get secret db-user-pass -o jsonpath='{.data}' The output is similar to: ```json -{"password.txt":"MWYyZDFlMmU2N2Rm","username.txt":"YWRtaW4="} +{"password":"MWYyZDFlMmU2N2Rm","username":"YWRtaW4="} ``` -Now you can decode the `password.txt` data: +Now you can decode the `password` data: ```shell echo 'MWYyZDFlMmU2N2Rm' | base64 --decode @@ -142,7 +141,7 @@ The output is similar to: ## Clean Up -To delete the Secret you have just created: +To delete the Secret you have created: ```shell kubectl delete secret db-user-pass diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md index d7b1f48a4a540..fb257a602683c 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md @@ -92,7 +92,7 @@ kubectl describe secrets/db-user-pass-96mffmfh4k The output is similar to: ``` -Name: db-user-pass +Name: db-user-pass-96mffmfh4k Namespace: default Labels: Annotations: @@ -113,7 +113,7 @@ To check the actual content of the encoded data, please refer to ## Clean Up -To delete the Secret you have just created: +To delete the Secret you have created: ```shell kubectl delete secret db-user-pass-96mffmfh4k diff --git a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md index 243072eff292b..21b02cc000cf6 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -112,7 +112,7 @@ kubectl top pod cpu-demo --namespace=cpu-example ``` This example output shows that the Pod is using 974 milliCPU, which is -just a bit less than the limit of 1 CPU specified in the Pod configuration. +slightly less than the limit of 1 CPU specified in the Pod configuration. ``` NAME CPU(cores) MEMORY(bytes) diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index 45d56531f245c..77a9ac76473e9 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -30,7 +30,7 @@ getting killed by the kubelet before they are up and running. ## {{% heading "prerequisites" %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +{{< include "task-tutorial-prereqs.md" >}} @@ -204,7 +204,7 @@ seconds. In addition to the readiness probe, this configuration includes a liveness probe. The kubelet will run the first liveness probe 15 seconds after the container -starts. Just like the readiness probe, this will attempt to connect to the +starts. Similar to the readiness probe, this will attempt to connect to the `goproxy` container on port 8080. If the liveness probe fails, the container will be restarted. @@ -293,6 +293,10 @@ Services. Readiness probes runs on the container during its whole lifecycle. {{< /note >}} +{{< caution >}} +Liveness probes *do not* wait for readiness probes to succeed. If you want to wait before executing a liveness probe you should use initialDelaySeconds or a startupProbe. +{{< /caution >}} + Readiness probes are configured similarly to liveness probes. The only difference is that you use the `readinessProbe` field instead of the `livenessProbe` field. @@ -423,7 +427,48 @@ For a TCP probe, the kubelet makes the probe connection at the node, not in the means that you can not use a service name in the `host` parameter since the kubelet is unable to resolve it. +### Probe-level `terminationGracePeriodSeconds` + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +Prior to release 1.21, the pod-level `terminationGracePeriodSeconds` was used +for terminating a container that failed its liveness or startup probe. This +coupling was unintended and may have resulted in failed containers taking an +unusually long time to restart when a pod-level `terminationGracePeriodSeconds` +was set. + +In 1.21, when the feature flag `ProbeTerminationGracePeriod` is enabled, users +can specify a probe-level `terminationGracePeriodSeconds` as part of the probe +specification. When the feature flag is enabled, and both a pod- and +probe-level `terminationGracePeriodSeconds` are set, the kubelet will use the +probe-level value. + +For example, + +```yaml +spec: + terminationGracePeriodSeconds: 3600 # pod-level + containers: + - name: test + image: ... + + ports: + - name: liveness-port + containerPort: 8080 + hostPort: 8080 + + livenessProbe: + httpGet: + path: /healthz + port: liveness-port + failureThreshold: 1 + periodSeconds: 60 + # Override pod-level terminationGracePeriodSeconds # + terminationGracePeriodSeconds: 60 +``` +Probe-level `terminationGracePeriodSeconds` cannot be set for readiness probes. +It will be rejected by the API server. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md index 2824cce64261e..40987152e8ab3 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -201,6 +201,9 @@ allow.textmode=true how.nice.to.look=fairlyNice ``` +When `kubectl` creates a ConfigMap from inputs that are not ASCII or UTF-8, the tool puts these into the `binaryData` field of the ConfigMap, and not in `data`. Both text and binary data sources can be combined in one ConfigMap. +If you want to view the `binaryData` keys (and their values) in a ConfigMap, you can run `kubectl get configmap -o jsonpath='{.binaryData}' `. + Use the option `--from-env-file` to create a ConfigMap from an env-file, for example: ```shell @@ -687,4 +690,3 @@ data: * Follow a real world example of [Configuring Redis using a ConfigMap](/docs/tutorials/configuration/configure-redis-using-configmap/). - diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index ca3d0b2966f50..23a76f37521af 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -23,16 +23,10 @@ authenticated by the apiserver as a particular User Account (currently this is usually `admin`, unless your cluster administrator has customized your cluster). Processes in containers inside pods can also contact the apiserver. When they do, they are authenticated as a particular Service Account (for example, `default`). - - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - ## Use the Default Service Account to access the API server. @@ -129,7 +123,7 @@ then you will see that a token has automatically been created and is referenced You may use authorization plugins to [set permissions on service accounts](/docs/reference/access-authn-authz/rbac/#service-account-permissions). -To use a non-default service account, simply set the `spec.serviceAccountName` +To use a non-default service account, set the `spec.serviceAccountName` field of a pod to the name of the service account you wish to use. The service account has to exist at the time the pod is created, or it will be rejected. @@ -329,11 +323,10 @@ The application is responsible for reloading the token when it rotates. Periodic ## Service Account Issuer Discovery -{{< feature-state for_k8s_version="v1.20" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -The Service Account Issuer Discovery feature is enabled by enabling the -`ServiceAccountIssuerDiscovery` [feature gate](/docs/reference/command-line-tools-reference/feature-gates) -and then enabling the Service Account Token Projection feature as described +The Service Account Issuer Discovery feature is enabled when the Service Account +Token Projection feature is enabled, as described [above](#service-account-token-volume-projection). {{< note >}} @@ -355,8 +348,8 @@ Configuration document at `/.well-known/openid-configuration` and the associated JSON Web Key Set (JWKS) at `/openid/v1/jwks`. The OpenID Provider Configuration is sometimes referred to as the _discovery document_. -When enabled, the cluster is also configured with a default RBAC ClusterRole -called `system:service-account-issuer-discovery`. No role bindings are provided +Clusters include a default RBAC ClusterRole called +`system:service-account-issuer-discovery`. No role bindings are provided by default. Administrators may, for example, choose whether to bind the role to `system:authenticated` or `system:unauthenticated` depending on their security requirements and which external systems they intend to federate with. diff --git a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md index ce0b5b3656a4c..697a4c6e0e888 100644 --- a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -9,18 +9,13 @@ weight: 100 This page shows how to create a Pod that uses a Secret to pull an image from a private Docker registry or repository. - - ## {{% heading "prerequisites" %}} - * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * To do this exercise, you need a [Docker ID](https://docs.docker.com/docker-id/) and password. - - ## Log in to Docker @@ -106,7 +101,8 @@ kubectl create secret docker-registry regcred --docker-server=` is your Private Docker Registry FQDN. (https://index.docker.io/v1/ for DockerHub) +* `` is your Private Docker Registry FQDN. + Use `https://index.docker.io/v2/` for DockerHub. * `` is your Docker username. * `` is your Docker password. * `` is your Docker email. @@ -122,7 +118,7 @@ those secrets might also be visible to other users on your PC during the time th ## Inspecting the Secret `regcred` -To understand the contents of the `regcred` Secret you just created, start by viewing the Secret in YAML format: +To understand the contents of the `regcred` Secret you created, start by viewing the Secret in YAML format: ```shell kubectl get secret regcred --output=yaml @@ -192,7 +188,8 @@ your.private.registry.example.com/janedoe/jdoe-private:v1 ``` To pull the image from the private registry, Kubernetes needs credentials. -The `imagePullSecrets` field in the configuration file specifies that Kubernetes should get the credentials from a Secret named `regcred`. +The `imagePullSecrets` field in the configuration file specifies that +Kubernetes should get the credentials from a Secret named `regcred`. Create a Pod that uses your Secret, and verify that the Pod is running: @@ -201,11 +198,8 @@ kubectl apply -f my-private-reg-pod.yaml kubectl get pod private-reg ``` - - ## {{% heading "whatsnext" %}} - * Learn more about [Secrets](/docs/concepts/configuration/secret/). * Learn more about [using a private registry](/docs/concepts/containers/images/#using-a-private-registry). * Learn more about [adding image pull secrets to a service account](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account). @@ -213,5 +207,3 @@ kubectl get pod private-reg * See [Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core). * See the `imagePullSecrets` field of [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). - - diff --git a/content/en/docs/tasks/configure-pod-container/static-pod.md b/content/en/docs/tasks/configure-pod-container/static-pod.md index cf31d822d6ce0..9126243462dcc 100644 --- a/content/en/docs/tasks/configure-pod-container/static-pod.md +++ b/content/en/docs/tasks/configure-pod-container/static-pod.md @@ -22,6 +22,7 @@ The kubelet automatically tries to create a {{< glossary_tooltip text="mirror Po on the Kubernetes API server for each static Pod. This means that the Pods running on a node are visible on the API server, but cannot be controlled from there. +The Pod names will suffixed with the node hostname with a leading hyphen {{< note >}} If you are running clustered Kubernetes and are using static @@ -30,21 +31,14 @@ Pods to run a Pod on every node, you should probably be using a instead. {{< /note >}} - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} This page assumes you're using {{< glossary_tooltip term_id="docker" >}} to run Pods, and that your nodes are running the Fedora operating system. Instructions for other distributions or Kubernetes installations may vary. - - - - ## Create a static pod {#static-pod-creation} @@ -53,7 +47,9 @@ You can configure a static Pod with either a [file system hosted configuration f ### Filesystem-hosted static Pod manifest {#configuration-files} -Manifests are standard Pod definitions in JSON or YAML format in a specific directory. Use the `staticPodPath: ` field in the [kubelet configuration file](/docs/tasks/administer-cluster/kubelet-config-file), which periodically scans the directory and creates/deletes static Pods as YAML/JSON files appear/disappear there. +Manifests are standard Pod definitions in JSON or YAML format in a specific directory. Use the `staticPodPath: ` field in the +[kubelet configuration file](/docs/reference/config-api/kubelet-config.v1beta1/), +which periodically scans the directory and creates/deletes static Pods as YAML/JSON files appear/disappear there. Note that the kubelet will ignore files starting with dots when scanning the specified directory. For example, this is how to start a simple web server as a static Pod: @@ -89,17 +85,18 @@ For example, this is how to start a simple web server as a static Pod: 3. Configure your kubelet on the node to use this directory by running it with `--pod-manifest-path=/etc/kubelet.d/` argument. On Fedora edit `/etc/kubernetes/kubelet` to include this line: - ``` - KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" - ``` - or add the `staticPodPath: ` field in the [kubelet configuration file](/docs/tasks/administer-cluster/kubelet-config-file). + ``` + KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" + ``` + or add the `staticPodPath: ` field in the + [kubelet configuration file](/docs/reference/config-api/kubelet-config.v1beta1/). 4. Restart the kubelet. On Fedora, you would run: - ```shell - # Run this command on the node where the kubelet is running - systemctl restart kubelet - ``` + ```shell + # Run this command on the node where the kubelet is running + systemctl restart kubelet + ``` ### Web-hosted static pod manifest {#pods-created-via-http} @@ -237,4 +234,3 @@ CONTAINER ID IMAGE COMMAND CREATED ... e7a62e3427f1 nginx:latest "nginx -g 'daemon of 27 seconds ago ``` - diff --git a/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md b/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md index 4fadbb3f42ddb..384b709720ee0 100644 --- a/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md +++ b/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md @@ -12,16 +12,10 @@ What's Kompose? It's a conversion tool for all things compose (namely Docker Com More information can be found on the Kompose website at [http://kompose.io](http://kompose.io). - - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - ## Install Kompose @@ -35,13 +29,13 @@ Kompose is released via GitHub on a three-week cycle, you can see all current re ```sh # Linux -curl -L https://github.com/kubernetes/kompose/releases/download/v1.21.0/kompose-linux-amd64 -o kompose +curl -L https://github.com/kubernetes/kompose/releases/download/v1.22.0/kompose-linux-amd64 -o kompose # macOS -curl -L https://github.com/kubernetes/kompose/releases/download/v1.21.0/kompose-darwin-amd64 -o kompose +curl -L https://github.com/kubernetes/kompose/releases/download/v1.22.0/kompose-darwin-amd64 -o kompose # Windows -curl -L https://github.com/kubernetes/kompose/releases/download/v1.21.0/kompose-windows-amd64.exe -o kompose.exe +curl -L https://github.com/kubernetes/kompose/releases/download/v1.22.0/kompose-windows-amd64.exe -o kompose.exe chmod +x kompose sudo mv ./kompose /usr/local/bin/kompose @@ -49,7 +43,6 @@ sudo mv ./kompose /usr/local/bin/kompose Alternatively, you can download the [tarball](https://github.com/kubernetes/kompose/releases). - {{% /tab %}} {{% tab name="Build from source" %}} @@ -74,7 +67,7 @@ sudo yum -y install kompose {{% /tab %}} {{% tab name="Fedora package" %}} -Kompose is in Fedora 24, 25 and 26 repositories. You can install it just like any other package. +Kompose is in Fedora 24, 25 and 26 repositories. You can install it like any other package. ```bash sudo dnf -y install kompose @@ -87,121 +80,127 @@ On macOS you can install latest release via [Homebrew](https://brew.sh): ```bash brew install kompose - ``` + {{% /tab %}} {{< /tabs >}} ## Use Kompose -In just a few steps, we'll take you from Docker Compose to Kubernetes. All +In a few steps, we'll take you from Docker Compose to Kubernetes. All you need is an existing `docker-compose.yml` file. -1. Go to the directory containing your `docker-compose.yml` file. If you don't - have one, test using this one. - - ```yaml - version: "2" - - services: - - redis-master: - image: k8s.gcr.io/redis:e2e - ports: - - "6379" - - redis-slave: - image: gcr.io/google_samples/gb-redisslave:v3 - ports: - - "6379" - environment: - - GET_HOSTS_FROM=dns - - frontend: - image: gcr.io/google-samples/gb-frontend:v4 - ports: - - "80:80" - environment: - - GET_HOSTS_FROM=dns - labels: - kompose.service.type: LoadBalancer - ``` - -2. Run the `kompose up` command to deploy to Kubernetes directly, or skip to - the next step instead to generate a file to use with `kubectl`. - - ```bash - $ kompose up - We are going to create Kubernetes Deployments, Services and PersistentVolumeClaims for your Dockerized application. - If you need different kind of resources, use the 'kompose convert' and 'kubectl apply -f' commands instead. - - INFO Successfully created Service: redis - INFO Successfully created Service: web - INFO Successfully created Deployment: redis - INFO Successfully created Deployment: web - - Your application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods,pvc' for details. - ``` - -3. To convert the `docker-compose.yml` file to files that you can use with - `kubectl`, run `kompose convert` and then `kubectl apply -f `. - - ```bash - $ kompose convert - INFO Kubernetes file "frontend-service.yaml" created - INFO Kubernetes file "redis-master-service.yaml" created - INFO Kubernetes file "redis-slave-service.yaml" created - INFO Kubernetes file "frontend-deployment.yaml" created - INFO Kubernetes file "redis-master-deployment.yaml" created - INFO Kubernetes file "redis-slave-deployment.yaml" created - ``` - - ```bash - $ kubectl apply -f frontend-service.yaml,redis-master-service.yaml,redis-slave-service.yaml,frontend-deployment.yaml,redis-master-deployment.yaml,redis-slave-deployment.yaml - service/frontend created - service/redis-master created - service/redis-slave created - deployment.apps/frontend created - deployment.apps/redis-master created - deployment.apps/redis-slave created - ``` - - Your deployments are running in Kubernetes. - -4. Access your application. - - If you're already using `minikube` for your development process: - - ```bash - $ minikube service frontend - ``` - - Otherwise, let's look up what IP your service is using! - - ```sh - $ kubectl describe svc frontend - Name: frontend - Namespace: default - Labels: service=frontend - Selector: service=frontend - Type: LoadBalancer - IP: 10.0.0.183 - LoadBalancer Ingress: 192.0.2.89 - Port: 80 80/TCP - NodePort: 80 31144/TCP - Endpoints: 172.17.0.4:80 - Session Affinity: None - No events. - - ``` - - If you're using a cloud provider, your IP will be listed next to `LoadBalancer Ingress`. - - ```sh - $ curl http://192.0.2.89 - ``` - - +1. Go to the directory containing your `docker-compose.yml` file. If you don't have one, test using this one. + + ```yaml + version: "2" + + services: + + redis-master: + image: k8s.gcr.io/redis:e2e + ports: + - "6379" + + redis-slave: + image: gcr.io/google_samples/gb-redisslave:v3 + ports: + - "6379" + environment: + - GET_HOSTS_FROM=dns + + frontend: + image: gcr.io/google-samples/gb-frontend:v4 + ports: + - "80:80" + environment: + - GET_HOSTS_FROM=dns + labels: + kompose.service.type: LoadBalancer + ``` + +2. To convert the `docker-compose.yml` file to files that you can use with + `kubectl`, run `kompose convert` and then `kubectl apply -f `. + + ```bash + kompose convert + ``` + + The output is similar to: + + ```none + INFO Kubernetes file "frontend-service.yaml" created + INFO Kubernetes file "frontend-service.yaml" created + INFO Kubernetes file "frontend-service.yaml" created + INFO Kubernetes file "redis-master-service.yaml" created + INFO Kubernetes file "redis-master-service.yaml" created + INFO Kubernetes file "redis-master-service.yaml" created + INFO Kubernetes file "redis-slave-service.yaml" created + INFO Kubernetes file "redis-slave-service.yaml" created + INFO Kubernetes file "redis-slave-service.yaml" created + INFO Kubernetes file "frontend-deployment.yaml" created + INFO Kubernetes file "frontend-deployment.yaml" created + INFO Kubernetes file "frontend-deployment.yaml" created + INFO Kubernetes file "redis-master-deployment.yaml" created + INFO Kubernetes file "redis-master-deployment.yaml" created + INFO Kubernetes file "redis-master-deployment.yaml" created + INFO Kubernetes file "redis-slave-deployment.yaml" created + INFO Kubernetes file "redis-slave-deployment.yaml" created + INFO Kubernetes file "redis-slave-deployment.yaml" created + ``` + + ```bash + kubectl apply -f frontend-service.yaml,redis-master-service.yaml,redis-slave-service.yaml,frontend-deployment.yaml, + ``` + + The output is similar to: + + ```none + redis-master-deployment.yaml,redis-slave-deployment.yaml + service/frontend created + service/redis-master created + service/redis-slave created + deployment.apps/frontend created + deployment.apps/redis-master created + deployment.apps/redis-slave created + ``` + + Your deployments are running in Kubernetes. + +3. Access your application. + + If you're already using `minikube` for your development process: + + ```bash + minikube service frontend + ``` + + Otherwise, let's look up what IP your service is using! + + ```sh + kubectl describe svc frontend + ``` + + ```none + Name: frontend + Namespace: default + Labels: service=frontend + Selector: service=frontend + Type: LoadBalancer + IP: 10.0.0.183 + LoadBalancer Ingress: 192.0.2.89 + Port: 80 80/TCP + NodePort: 80 31144/TCP + Endpoints: 172.17.0.4:80 + Session Affinity: None + No events. + ``` + + If you're using a cloud provider, your IP will be listed next to `LoadBalancer Ingress`. + + ```sh + curl http://192.0.2.89 + ``` @@ -221,15 +220,17 @@ you need is an existing `docker-compose.yml` file. Kompose has support for two providers: OpenShift and Kubernetes. You can choose a targeted provider using global option `--provider`. If no provider is specified, Kubernetes is set by default. - ## `kompose convert` Kompose supports conversion of V1, V2, and V3 Docker Compose files into Kubernetes and OpenShift objects. -### Kubernetes +### Kubernetes `kompose convert` example -```sh -$ kompose --file docker-voting.yml convert +```shell +kompose --file docker-voting.yml convert +``` + +```none WARN Unsupported key networks - ignoring WARN Unsupported key build - ignoring INFO Kubernetes file "worker-svc.yaml" created @@ -242,16 +243,24 @@ INFO Kubernetes file "result-deployment.yaml" created INFO Kubernetes file "vote-deployment.yaml" created INFO Kubernetes file "worker-deployment.yaml" created INFO Kubernetes file "db-deployment.yaml" created +``` -$ ls +```shell +ls +``` + +```none db-deployment.yaml docker-compose.yml docker-gitlab.yml redis-deployment.yaml result-deployment.yaml vote-deployment.yaml worker-deployment.yaml db-svc.yaml docker-voting.yml redis-svc.yaml result-svc.yaml vote-svc.yaml worker-svc.yaml ``` You can also provide multiple docker-compose files at the same time: -```sh -$ kompose -f docker-compose.yml -f docker-guestbook.yml convert +```shell +kompose -f docker-compose.yml -f docker-guestbook.yml convert +``` + +```none INFO Kubernetes file "frontend-service.yaml" created INFO Kubernetes file "mlbparks-service.yaml" created INFO Kubernetes file "mongodb-service.yaml" created @@ -263,8 +272,13 @@ INFO Kubernetes file "mongodb-deployment.yaml" created INFO Kubernetes file "mongodb-claim0-persistentvolumeclaim.yaml" created INFO Kubernetes file "redis-master-deployment.yaml" created INFO Kubernetes file "redis-slave-deployment.yaml" created +``` -$ ls +```shell +ls +``` + +```none mlbparks-deployment.yaml mongodb-service.yaml redis-slave-service.jsonmlbparks-service.yaml frontend-deployment.yaml mongodb-claim0-persistentvolumeclaim.yaml redis-master-service.yaml frontend-service.yaml mongodb-deployment.yaml redis-slave-deployment.yaml @@ -273,10 +287,13 @@ redis-master-deployment.yaml When multiple docker-compose files are provided the configuration is merged. Any configuration that is common will be over ridden by subsequent file. -### OpenShift +### OpenShift `kompose convert` example ```sh -$ kompose --provider openshift --file docker-voting.yml convert +kompose --provider openshift --file docker-voting.yml convert +``` + +```none WARN [worker] Service cannot be created because of missing port. INFO OpenShift file "vote-service.yaml" created INFO OpenShift file "db-service.yaml" created @@ -297,7 +314,10 @@ INFO OpenShift file "result-imagestream.yaml" created It also supports creating buildconfig for build directive in a service. By default, it uses the remote repo for the current git branch as the source repo, and the current branch as the source branch for the build. You can specify a different source repo and branch using ``--build-repo`` and ``--build-branch`` options respectively. ```sh -$ kompose --provider openshift --file buildconfig/docker-compose.yml convert +kompose --provider openshift --file buildconfig/docker-compose.yml convert +``` + +```none WARN [foo] Service cannot be created because of missing port. INFO OpenShift Buildconfig using git@github.com:rtnpro/kompose.git::master as source. INFO OpenShift file "foo-deploymentconfig.yaml" created @@ -313,23 +333,31 @@ If you are manually pushing the OpenShift artifacts using ``oc create -f``, you Kompose supports a straightforward way to deploy your "composed" application to Kubernetes or OpenShift via `kompose up`. +### Kubernetes `kompose up` example -### Kubernetes -```sh -$ kompose --file ./examples/docker-guestbook.yml up +```shell +kompose --file ./examples/docker-guestbook.yml up +``` + +```none We are going to create Kubernetes deployments and services for your Dockerized application. If you need different kind of resources, use the 'kompose convert' and 'kubectl apply -f' commands instead. -INFO Successfully created service: redis-master -INFO Successfully created service: redis-slave -INFO Successfully created service: frontend +INFO Successfully created service: redis-master +INFO Successfully created service: redis-slave +INFO Successfully created service: frontend INFO Successfully created deployment: redis-master INFO Successfully created deployment: redis-slave -INFO Successfully created deployment: frontend +INFO Successfully created deployment: frontend Your application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods' for details. +``` + +```shell +kubectl get deployment,svc,pods +``` -$ kubectl get deployment,svc,pods +```none NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE deployment.extensions/frontend 1 1 1 1 4m deployment.extensions/redis-master 1 1 1 1 4m @@ -347,14 +375,19 @@ pod/redis-master-1432129712-63jn8 1/1 Running 0 4m pod/redis-slave-2504961300-nve7b 1/1 Running 0 4m ``` -**Note**: +{{< note >}} - You must have a running Kubernetes cluster with a pre-configured kubectl context. - Only deployments and services are generated and deployed to Kubernetes. If you need different kind of resources, use the `kompose convert` and `kubectl apply -f` commands instead. +{{< /note >}} -### OpenShift -```sh -$ kompose --file ./examples/docker-guestbook.yml --provider openshift up +### OpenShift `kompose up` example + +```shell +kompose --file ./examples/docker-guestbook.yml --provider openshift up +``` + +```none We are going to create OpenShift DeploymentConfigs and Services for your Dockerized application. If you need different kind of resources, use the 'kompose convert' and 'oc create -f' commands instead. @@ -369,8 +402,13 @@ INFO Successfully created deployment: redis-master INFO Successfully created ImageStream: redis-master Your application has been deployed to OpenShift. You can run 'oc get dc,svc,is' for details. +``` -$ oc get dc,svc,is +```shell +oc get dc,svc,is +``` + +```none NAME REVISION DESIRED CURRENT TRIGGERED BY dc/frontend 0 1 0 config,image(frontend:v4) dc/redis-master 0 1 0 config,image(redis-master:e2e) @@ -385,16 +423,16 @@ is/redis-master 172.30.12.200:5000/fff/redis-master is/redis-slave 172.30.12.200:5000/fff/redis-slave v1 ``` -**Note**: - -- You must have a running OpenShift cluster with a pre-configured `oc` context (`oc login`) +{{< note >}} +You must have a running OpenShift cluster with a pre-configured `oc` context (`oc login`). +{{< /note >}} ## `kompose down` -Once you have deployed "composed" application to Kubernetes, `$ kompose down` will help you to take the application out by deleting its deployments and services. If you need to remove other resources, use the 'kubectl' command. +Once you have deployed "composed" application to Kubernetes, `kompose down` will help you to take the application out by deleting its deployments and services. If you need to remove other resources, use the 'kubectl' command. -```sh -$ kompose --file docker-guestbook.yml down +```shell +kompose --file docker-guestbook.yml down INFO Successfully deleted service: redis-master INFO Successfully deleted deployment: redis-master INFO Successfully deleted service: redis-slave @@ -403,16 +441,16 @@ INFO Successfully deleted service: frontend INFO Successfully deleted deployment: frontend ``` -**Note**: - -- You must have a running Kubernetes cluster with a pre-configured kubectl context. +{{< note >}} +You must have a running Kubernetes cluster with a pre-configured `kubectl` context. +{{< /note >}} ## Build and Push Docker Images Kompose supports both building and pushing Docker images. When using the `build` key within your Docker Compose file, your image will: - - Automatically be built with Docker using the `image` key specified within your file - - Be pushed to the correct Docker repository using local credentials (located at `.docker/config`) +- Automatically be built with Docker using the `image` key specified within your file +- Be pushed to the correct Docker repository using local credentials (located at `.docker/config`) Using an [example Docker Compose file](https://raw.githubusercontent.com/kubernetes/kompose/master/examples/buildconfig/docker-compose.yml): @@ -428,7 +466,7 @@ services: Using `kompose up` with a `build` key: ```none -$ kompose up +kompose up INFO Build key detected. Attempting to build and push image 'docker.io/foo/bar' INFO Building image 'docker.io/foo/bar' from directory 'build' INFO Image 'docker.io/foo/bar' from directory 'build' built successfully @@ -448,10 +486,10 @@ In order to disable the functionality, or choose to use BuildConfig generation ( ```sh # Disable building/pushing Docker images -$ kompose up --build none +kompose up --build none # Generate Build Config artifacts for OpenShift -$ kompose up --provider openshift --build build-config +kompose up --provider openshift --build build-config ``` ## Alternative Conversions @@ -459,45 +497,54 @@ $ kompose up --provider openshift --build build-config The default `kompose` transformation will generate Kubernetes [Deployments](/docs/concepts/workloads/controllers/deployment/) and [Services](/docs/concepts/services-networking/service/), in yaml format. You have alternative option to generate json with `-j`. Also, you can alternatively generate [Replication Controllers](/docs/concepts/workloads/controllers/replicationcontroller/) objects, [Daemon Sets](/docs/concepts/workloads/controllers/daemonset/), or [Helm](https://github.com/helm/helm) charts. ```sh -$ kompose convert -j +kompose convert -j INFO Kubernetes file "redis-svc.json" created INFO Kubernetes file "web-svc.json" created INFO Kubernetes file "redis-deployment.json" created INFO Kubernetes file "web-deployment.json" created ``` + The `*-deployment.json` files contain the Deployment objects. ```sh -$ kompose convert --replication-controller +kompose convert --replication-controller INFO Kubernetes file "redis-svc.yaml" created INFO Kubernetes file "web-svc.yaml" created INFO Kubernetes file "redis-replicationcontroller.yaml" created INFO Kubernetes file "web-replicationcontroller.yaml" created ``` -The `*-replicationcontroller.yaml` files contain the Replication Controller objects. If you want to specify replicas (default is 1), use `--replicas` flag: `$ kompose convert --replication-controller --replicas 3` +The `*-replicationcontroller.yaml` files contain the Replication Controller objects. If you want to specify replicas (default is 1), use `--replicas` flag: `kompose convert --replication-controller --replicas 3` -```sh -$ kompose convert --daemon-set +```shell +kompose convert --daemon-set INFO Kubernetes file "redis-svc.yaml" created INFO Kubernetes file "web-svc.yaml" created INFO Kubernetes file "redis-daemonset.yaml" created INFO Kubernetes file "web-daemonset.yaml" created ``` -The `*-daemonset.yaml` files contain the Daemon Set objects +The `*-daemonset.yaml` files contain the DaemonSet objects -If you want to generate a Chart to be used with [Helm](https://github.com/kubernetes/helm) simply do: +If you want to generate a Chart to be used with [Helm](https://github.com/kubernetes/helm) run: -```sh -$ kompose convert -c +```shell +kompose convert -c +``` + +```none INFO Kubernetes file "web-svc.yaml" created INFO Kubernetes file "redis-svc.yaml" created INFO Kubernetes file "web-deployment.yaml" created INFO Kubernetes file "redis-deployment.yaml" created chart created in "./docker-compose/" +``` -$ tree docker-compose/ +```shell +tree docker-compose/ +``` + +```none docker-compose ├── Chart.yaml ├── README.md @@ -578,7 +625,7 @@ If you want to create normal pods without controllers you can use `restart` cons | `no` | Pod | `Never` | {{< note >}} -The controller object could be `deployment` or `replicationcontroller`, etc. +The controller object could be `deployment` or `replicationcontroller`. {{< /note >}} For example, the `pival` service will become pod down here. This container calculated value of `pi`. @@ -593,7 +640,7 @@ services: restart: "on-failure" ``` -### Warning about Deployment Config's +### Warning about Deployment Configurations If the Docker Compose file has a volume specified for a service, the Deployment (Kubernetes) or DeploymentConfig (OpenShift) strategy is changed to "Recreate" instead of "RollingUpdate" (default). This is done to avoid multiple instances of a service from accessing a volume at the same time. @@ -606,5 +653,3 @@ Please note that changing service name might break some `docker-compose` files. Kompose supports Docker Compose versions: 1, 2 and 3. We have limited support on versions 2.1 and 3.2 due to their experimental nature. A full list on compatibility between all three versions is listed in our [conversion document](https://github.com/kubernetes/kompose/blob/master/docs/conversion.md) including a list of all incompatible Docker Compose keys. - - diff --git a/content/en/docs/tasks/debug-application-cluster/audit.md b/content/en/docs/tasks/debug-application-cluster/audit.md index ab4c0c75f70dd..c44caf66b59e1 100644 --- a/content/en/docs/tasks/debug-application-cluster/audit.md +++ b/content/en/docs/tasks/debug-application-cluster/audit.md @@ -46,7 +46,9 @@ Each request can be recorded with an associated _stage_. The defined stages are: - `Panic` - Events generated when a panic occurred. {{< note >}} -Audit events are different from the +The configuration of an +[Audit Event configuration](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Event) +is different from the [Event](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#event-v1-core) API object. {{< /note >}} @@ -59,7 +61,7 @@ Memory consumption depends on the audit logging configuration. Audit policy defines rules about what events should be recorded and what data they should include. The audit policy object structure is defined in the -[`audit.k8s.io` API group](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go). +[`audit.k8s.io` API group](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Policy). When an event is processed, it's compared against the list of rules in order. The first matching rule sets the _audit level_ of the event. The defined audit levels are: @@ -95,6 +97,9 @@ If you're crafting your own audit profile, you can use the audit profile for Goo [configure-helper.sh](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh) script, which generates an audit policy file. You can see most of the audit policy file by looking directly at the script. +You can also refer to the [`Policy` configuration reference](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Policy) +for details about the fields defined. + ## Audit backends Audit backends persist audit events to an external storage. @@ -104,9 +109,7 @@ Out of the box, the kube-apiserver provides two backends: - Webhook backend, which sends events to an external HTTP API In all cases, audit events follow a structure defined by the Kubernetes API in the -`audit.k8s.io` API group. For Kubernetes {{< param "fullversion" >}}, that -API is at version -[`v1`](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go). +[`audit.k8s.io` API group](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Event). {{< note >}} In case of patches, request body is a JSON array with patch operations, not a JSON object @@ -174,8 +177,6 @@ and finally configure the `hostPath`: ``` - - ### Webhook backend The webhook audit backend sends audit events to a remote web API, which is assumed to @@ -250,3 +251,7 @@ By default truncate is disabled in both `webhook` and `log`, a cluster administr ## {{% heading "whatsnext" %}} * Learn about [Mutating webhook auditing annotations](/docs/reference/access-authn-authz/extensible-admission-controllers/#mutating-webhook-auditing-annotations). +* Learn more about [`Event`](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Event) + and the [`Policy`](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Policy) + resource types by reading the Audit configuration reference. + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md b/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md index 730b9fb00cda4..03ba9d2c025d7 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md @@ -177,7 +177,7 @@ kubectl describe pod nginx-deployment-1370807587-fz9sd Here you can see the event generated by the scheduler saying that the Pod failed to schedule for reason `FailedScheduling` (and possibly others). The message tells us that there were not enough resources for the Pod on any of the nodes. -To correct this situation, you can use `kubectl scale` to update your Deployment to specify four or fewer replicas. (Or you could just leave the one Pod pending, which is harmless.) +To correct this situation, you can use `kubectl scale` to update your Deployment to specify four or fewer replicas. (Or you could leave the one Pod pending, which is harmless.) Events such as the ones you saw at the end of `kubectl describe pod` are persisted in etcd and provide high-level information on what is happening in the cluster. To list all events you can use diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application.md b/content/en/docs/tasks/debug-application-cluster/debug-application.md index f665accb18f3b..477c9b8248135 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application.md @@ -140,24 +140,11 @@ kubectl get pods --selector=name=nginx,type=frontend ``` to list pods that match this selector. Verify that the list matches the Pods that you expect to provide your Service. - -If the list of pods matches expectations, but your endpoints are still empty, it's possible that you don't -have the right ports exposed. If your service has a `containerPort` specified, but the Pods that are -selected don't have that port listed, then they won't be added to the endpoints list. - Verify that the pod's `containerPort` matches up with the Service's `targetPort` #### Network traffic is not forwarded -If you can connect to the service, but the connection is immediately dropped, and there are endpoints -in the endpoints list, it's likely that the proxy can't contact your pods. - -There are three things to -check: - - * Are your pods working correctly? Look for restart count, and [debug pods](#debugging-pods). - * Can you connect to your pods directly? Get the IP address for the Pod, and try to connect directly to that IP. - * Is your application serving on the port that you configured? Kubernetes doesn't do port remapping, so if your application serves on 8080, the `containerPort` field needs to be 8080. +Please see [debugging service](/docs/tasks/debug-application-cluster/debug-service/) for more information. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md index fdde133345f09..391efe4376adf 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md @@ -102,7 +102,7 @@ This is an incomplete list of things that could go wrong, and how to adjust your - Action: Use IaaS providers reliable storage (e.g. GCE PD or AWS EBS volume) for VMs with apiserver+etcd - Mitigates: Apiserver backing storage lost -- Action: Use [high-availability](/docs/admin/high-availability) configuration +- Action: Use [high-availability](/docs/setup/production-environment/tools/kubeadm/high-availability/) configuration - Mitigates: Control plane node shutdown or control plane components (scheduler, API server, controller-manager) crashing - Will tolerate one or more simultaneous node or component failures - Mitigates: API server backing storage (i.e., etcd's data directory) lost diff --git a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 8a972e13651f7..c99182b854db0 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -57,7 +57,7 @@ case you can try several things: will never be scheduled. You can check node capacities with the `kubectl get nodes -o ` - command. Here are some example command lines that extract just the necessary + command. Here are some example command lines that extract the necessary information: ```shell diff --git a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md index 54e474429c395..59a83e87c7107 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md @@ -99,7 +99,7 @@ kubectl run ephemeral-demo --image=k8s.gcr.io/pause:3.1 --restart=Never ``` The examples in this section use the `pause` container image because it does not -contain userland debugging utilities, but this method works with all container +contain debugging utilities, but this method works with all container images. If you attempt to use `kubectl exec` to create a shell you will see an error diff --git a/content/en/docs/tasks/debug-application-cluster/debug-service.md b/content/en/docs/tasks/debug-application-cluster/debug-service.md index 4ee9d6f490af3..3b3b1c60819a2 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-service.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-service.md @@ -18,10 +18,10 @@ you to figure out what's going wrong. ## Running commands in a Pod For many steps here you will want to see what a Pod running in the cluster -sees. The simplest way to do this is to run an interactive alpine Pod: +sees. The simplest way to do this is to run an interactive busybox Pod: ```none -kubectl run -it --rm --restart=Never alpine --image=alpine sh +kubectl run -it --rm --restart=Never busybox --image=gcr.io/google-containers/busybox sh ``` {{< note >}} @@ -111,7 +111,7 @@ kubectl get pods -l app=hostnames \ 10.244.0.7 ``` -The example container used for this walk-through simply serves its own hostname +The example container used for this walk-through serves its own hostname via HTTP on port 9376, but if you are debugging your own app, you'll want to use whatever port number your Pods are listening on. @@ -178,7 +178,7 @@ kubectl expose deployment hostnames --port=80 --target-port=9376 service/hostnames exposed ``` -And read it back, just to be sure: +And read it back: ```shell kubectl get svc hostnames @@ -421,14 +421,13 @@ Earlier you saw that the Pods were running. You can re-check that: kubectl get pods -l app=hostnames ``` ```none -NAME READY STATUS RESTARTS AGE +NAME READY STATUS RESTARTS AGE hostnames-632524106-bbpiw 1/1 Running 0 1h hostnames-632524106-ly40y 1/1 Running 0 1h hostnames-632524106-tlaok 1/1 Running 0 1h ``` -The `-l app=hostnames` argument is a label selector - just like our Service -has. +The `-l app=hostnames` argument is a label selector configured on the Service. The "AGE" column says that these Pods are about an hour old, which implies that they are running fine and not crashing. @@ -607,7 +606,7 @@ iptables-save | grep hostnames -A KUBE-PORTALS-HOST -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j DNAT --to-destination 10.240.115.247:48577 ``` -There should be 2 rules for each port of your Service (just one in this +There should be 2 rules for each port of your Service (only one in this example) - a "KUBE-PORTALS-CONTAINER" and a "KUBE-PORTALS-HOST". Almost nobody should be using the "userspace" mode any more, so you won't spend diff --git a/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md b/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md deleted file mode 100644 index 859c163307eb0..0000000000000 --- a/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -reviewers: -- piosz -- x13n -content_type: concept -title: Events in Stackdriver ---- - - - -Kubernetes events are objects that provide insight into what is happening -inside a cluster, such as what decisions were made by scheduler or why some -pods were evicted from the node. You can read more about using events -for debugging your application in the [Application Introspection and Debugging -](/docs/tasks/debug-application-cluster/debug-application-introspection/) -section. - -Since events are API objects, they are stored in the apiserver on master. To -avoid filling up master's disk, a retention policy is enforced: events are -removed one hour after the last occurrence. To provide longer history -and aggregation capabilities, a third party solution should be installed -to capture events. - -This article describes a solution that exports Kubernetes events to -Stackdriver Logging, where they can be processed and analyzed. - -{{< note >}} -It is not guaranteed that all events happening in a cluster will be -exported to Stackdriver. One possible scenario when events will not be -exported is when event exporter is not running (e.g. during restart or -upgrade). In most cases it's fine to use events for purposes like setting up -[metrics](https://cloud.google.com/logging/docs/logs-based-metrics/) and [alerts](https://cloud.google.com/logging/docs/logs-based-metrics/charts-and-alerts), but you should be aware -of the potential inaccuracy. -{{< /note >}} - - - - - - - -## Deployment - -### Google Kubernetes Engine - -In Google Kubernetes Engine, if cloud logging is enabled, event exporter -is deployed by default to the clusters with master running version 1.7 and -higher. To prevent disturbing your workloads, event exporter does not have -resources set and is in the best effort QOS class, which means that it will -be the first to be killed in the case of resource starvation. If you want -your events to be exported, make sure you have enough resources to facilitate -the event exporter pod. This may vary depending on the workload, but on -average, approximately 100Mb RAM and 100m CPU is needed. - -### Deploying to the Existing Cluster - -Deploy event exporter to your cluster using the following command: - -```shell -kubectl apply -f https://k8s.io/examples/debug/event-exporter.yaml -``` - -Since event exporter accesses the Kubernetes API, it requires permissions to -do so. The following deployment is configured to work with RBAC -authorization. It sets up a service account and a cluster role binding -to allow event exporter to read events. To make sure that event exporter -pod will not be evicted from the node, you can additionally set up resource -requests. As mentioned earlier, 100Mb RAM and 100m CPU should be enough. - -{{< codenew file="debug/event-exporter.yaml" >}} - -## User Guide - -Events are exported to the `GKE Cluster` resource in Stackdriver Logging. -You can find them by selecting an appropriate option from a drop-down menu -of available resources: - -Events location in the Stackdriver Logging interface - -You can filter based on the event object fields using Stackdriver Logging -[filtering mechanism](https://cloud.google.com/logging/docs/view/advanced_filters). -For example, the following query will show events from the scheduler -about pods from deployment `nginx-deployment`: - -``` -resource.type="gke_cluster" -jsonPayload.kind="Event" -jsonPayload.source.component="default-scheduler" -jsonPayload.involvedObject.name:"nginx-deployment" -``` - -{{< figure src="/images/docs/stackdriver-event-exporter-filter.png" alt="Filtered events in the Stackdriver Logging interface" width="500" >}} - - diff --git a/content/en/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md b/content/en/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md deleted file mode 100644 index 17e2ac45a26de..0000000000000 --- a/content/en/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -reviewers: -- piosz -- x13n -content_type: concept -title: Logging Using Elasticsearch and Kibana ---- - - - -On the Google Compute Engine (GCE) platform, the default logging support targets -[Stackdriver Logging](https://cloud.google.com/logging/), which is described in detail -in the [Logging With Stackdriver Logging](/docs/tasks/debug-application-cluster/logging-stackdriver). - -This article describes how to set up a cluster to ingest logs into -[Elasticsearch](https://www.elastic.co/products/elasticsearch) and view -them using [Kibana](https://www.elastic.co/products/kibana), as an alternative to -Stackdriver Logging when running on GCE. - -{{< note >}} -You cannot automatically deploy Elasticsearch and Kibana in the Kubernetes cluster hosted on Google Kubernetes Engine. You have to deploy them manually. -{{< /note >}} - - - - - -To use Elasticsearch and Kibana for cluster logging, you should set the -following environment variable as shown below when creating your cluster with -kube-up.sh: - -```shell -KUBE_LOGGING_DESTINATION=elasticsearch -``` - -You should also ensure that `KUBE_ENABLE_NODE_LOGGING=true` (which is the default for the GCE platform). - -Now, when you create a cluster, a message will indicate that the Fluentd log -collection daemons that run on each node will target Elasticsearch: - -```shell -cluster/kube-up.sh -``` -``` -... -Project: kubernetes-satnam -Zone: us-central1-b -... calling kube-up -Project: kubernetes-satnam -Zone: us-central1-b -+++ Staging server tars to Google Storage: gs://kubernetes-staging-e6d0e81793/devel -+++ kubernetes-server-linux-amd64.tar.gz uploaded (sha1 = 6987c098277871b6d69623141276924ab687f89d) -+++ kubernetes-salt.tar.gz uploaded (sha1 = bdfc83ed6b60fa9e3bff9004b542cfc643464cd0) -Looking for already existing resources -Starting master and configuring firewalls -Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/zones/us-central1-b/disks/kubernetes-master-pd]. -NAME ZONE SIZE_GB TYPE STATUS -kubernetes-master-pd us-central1-b 20 pd-ssd READY -Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/regions/us-central1/addresses/kubernetes-master-ip]. -+++ Logging using Fluentd to elasticsearch -``` - -The per-node Fluentd pods, the Elasticsearch pods, and the Kibana pods should -all be running in the kube-system namespace soon after the cluster comes to -life. - -```shell -kubectl get pods --namespace=kube-system -``` -``` -NAME READY STATUS RESTARTS AGE -elasticsearch-logging-v1-78nog 1/1 Running 0 2h -elasticsearch-logging-v1-nj2nb 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-5oq0 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-6896 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-l1ds 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-lz9j 1/1 Running 0 2h -kibana-logging-v1-bhpo8 1/1 Running 0 2h -kube-dns-v3-7r1l9 3/3 Running 0 2h -monitoring-heapster-v4-yl332 1/1 Running 1 2h -monitoring-influx-grafana-v1-o79xf 2/2 Running 0 2h -``` - -The `fluentd-elasticsearch` pods gather logs from each node and send them to -the `elasticsearch-logging` pods, which are part of a -[service](/docs/concepts/services-networking/service/) named `elasticsearch-logging`. These -Elasticsearch pods store the logs and expose them via a REST API. -The `kibana-logging` pod provides a web UI for reading the logs stored in -Elasticsearch, and is part of a service named `kibana-logging`. - -The Elasticsearch and Kibana services are both in the `kube-system` namespace -and are not directly exposed via a publicly reachable IP address. To reach them, -follow the instructions for -[Accessing services running in a cluster](/docs/tasks/access-application-cluster/access-cluster/#accessing-services-running-on-the-cluster). - -If you try accessing the `elasticsearch-logging` service in your browser, you'll -see a status page that looks something like this: - -![Elasticsearch Status](/images/docs/es-browser.png) - -You can now type Elasticsearch queries directly into the browser, if you'd -like. See [Elasticsearch's documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-uri-request.html) -for more details on how to do so. - -Alternatively, you can view your cluster's logs using Kibana (again using the -[instructions for accessing a service running in the cluster](/docs/tasks/access-application-cluster/access-cluster/#accessing-services-running-on-the-cluster)). -The first time you visit the Kibana URL you will be presented with a page that -asks you to configure your view of the ingested logs. Select the option for -timeseries values and select `@timestamp`. On the following page select the -`Discover` tab and then you should be able to see the ingested logs. -You can set the refresh interval to 5 seconds to have the logs -regularly refreshed. - -Here is a typical view of ingested logs from the Kibana viewer: - -![Kibana logs](/images/docs/kibana-logs.png) - - - -## {{% heading "whatsnext" %}} - - -Kibana opens up all sorts of powerful options for exploring your logs! For some -ideas on how to dig into it, check out [Kibana's documentation](https://www.elastic.co/guide/en/kibana/current/discover.html). - - diff --git a/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md b/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md index 1703bbbe428d7..29ace662f6065 100644 --- a/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md +++ b/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md @@ -294,9 +294,9 @@ a running cluster in the [Deploying section](#deploying). ### Changing `DaemonSet` parameters -When you have the Stackdriver Logging `DaemonSet` in your cluster, you can just modify the -`template` field in its spec, daemonset controller will update the pods for you. For example, -let's assume you've just installed the Stackdriver Logging as described above. Now you want to +When you have the Stackdriver Logging `DaemonSet` in your cluster, you can modify the +`template` field in its spec. The DaemonSet controller manages the pods for you. +For example, assume you've installed the Stackdriver Logging as described above. Now you want to change the memory limit to give fluentd more memory to safely process more logs. Get the spec of `DaemonSet` running in your cluster: diff --git a/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md b/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md index bec423043d7ce..28fd615b459c8 100644 --- a/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md +++ b/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md @@ -12,20 +12,15 @@ content_type: task This guide demonstrates how to install and write extensions for [kubectl](/docs/reference/kubectl/kubectl/). By thinking of core `kubectl` commands as essential building blocks for interacting with a Kubernetes cluster, a cluster administrator can think of plugins as a means of utilizing these building blocks to create more complex behavior. Plugins extend `kubectl` with new sub-commands, allowing for new and custom features not included in the main distribution of `kubectl`. - - ## {{% heading "prerequisites" %}} - You need to have a working `kubectl` binary installed. - - ## Installing kubectl plugins -A plugin is nothing more than a standalone executable file, whose name begins with `kubectl-`. To install a plugin, simply move its executable file to anywhere on your `PATH`. +A plugin is a standalone executable file, whose name begins with `kubectl-`. To install a plugin, move its executable file to anywhere on your `PATH`. You can also discover and install kubectl plugins available in the open source using [Krew](https://krew.dev/). Krew is a plugin manager maintained by @@ -60,9 +55,9 @@ You can write a plugin in any programming language or script that allows you to There is no plugin installation or pre-loading required. Plugin executables receive the inherited environment from the `kubectl` binary. -A plugin determines which command path it wishes to implement based on its name. For -example, a plugin wanting to provide a new command `kubectl foo`, would simply be named -`kubectl-foo`, and live somewhere in your `PATH`. +A plugin determines which command path it wishes to implement based on its name. +For example, a plugin named `kubectl-foo` provides a command `kubectl foo`. You must +install the plugin executable somewhere in your `PATH`. ### Example plugin @@ -88,32 +83,34 @@ echo "I am a plugin named kubectl-foo" ### Using a plugin -To use the above plugin, simply make it executable: +To use a plugin, make the plugin executable: -``` +```shell sudo chmod +x ./kubectl-foo ``` and place it anywhere in your `PATH`: -``` +```shell sudo mv ./kubectl-foo /usr/local/bin ``` You may now invoke your plugin as a `kubectl` command: -``` +```shell kubectl foo ``` + ``` I am a plugin named kubectl-foo ``` All args and flags are passed as-is to the executable: -``` +```shell kubectl foo version ``` + ``` 1.0.0 ``` @@ -124,6 +121,7 @@ All environment variables are also passed as-is to the executable: export KUBECONFIG=~/.kube/config kubectl foo config ``` + ``` /home//.kube/config ``` @@ -131,6 +129,7 @@ kubectl foo config ```shell KUBECONFIG=/etc/kube/config kubectl foo config ``` + ``` /etc/kube/config ``` @@ -376,16 +375,11 @@ set up a build environment (if it needs compiling), and deploy the plugin. If you also make compiled packages available, or use Krew, that will make installs easier. - - ## {{% heading "whatsnext" %}} - * Check the Sample CLI Plugin repository for a [detailed example](https://github.com/kubernetes/sample-cli-plugin) of a plugin written in Go. In case of any questions, feel free to reach out to the [SIG CLI team](https://github.com/kubernetes/community/tree/master/sig-cli). * Read about [Krew](https://krew.dev/), a package manager for kubectl plugins. - - diff --git a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md index b0e272afa0c83..7ad7072fd74fc 100644 --- a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md +++ b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md @@ -12,7 +12,7 @@ weight: 20 Kubernetes ships with a default scheduler that is described [here](/docs/reference/command-line-tools-reference/kube-scheduler/). If the default scheduler does not suit your needs you can implement your own scheduler. -Not just that, you can even run multiple schedulers simultaneously alongside the default +Moreover, you can even run multiple schedulers simultaneously alongside the default scheduler and instruct Kubernetes what scheduler to use for each of your pods. Let's learn how to run multiple schedulers in Kubernetes with an example. @@ -30,7 +30,7 @@ in the Kubernetes source directory for a canonical example. ## Package the scheduler Package your scheduler binary into a container image. For the purposes of this example, -let's just use the default scheduler (kube-scheduler) as our second scheduler as well. +you can use the default scheduler (kube-scheduler) as your second scheduler. Clone the [Kubernetes source code from GitHub](https://github.com/kubernetes/kubernetes) and build the source. @@ -61,9 +61,9 @@ gcloud docker -- push gcr.io/my-gcp-project/my-kube-scheduler:1.0 ## Define a Kubernetes Deployment for the scheduler -Now that we have our scheduler in a container image, we can just create a pod -config for it and run it in our Kubernetes cluster. But instead of creating a pod -directly in the cluster, let's use a [Deployment](/docs/concepts/workloads/controllers/deployment/) +Now that you have your scheduler in a container image, create a pod +configuration for it and run it in your Kubernetes cluster. But instead of creating a pod +directly in the cluster, you can use a [Deployment](/docs/concepts/workloads/controllers/deployment/) for this example. A [Deployment](/docs/concepts/workloads/controllers/deployment/) manages a [Replica Set](/docs/concepts/workloads/controllers/replicaset/) which in turn manages the pods, thereby making the scheduler resilient to failures. Here is the deployment @@ -83,7 +83,7 @@ detailed description of other command line arguments. ## Run the second scheduler in the cluster -In order to run your scheduler in a Kubernetes cluster, just create the deployment +In order to run your scheduler in a Kubernetes cluster, create the deployment specified in the config above in a Kubernetes cluster: ```shell @@ -132,9 +132,9 @@ kubectl edit clusterrole system:kube-scheduler ## Specify schedulers for pods -Now that our second scheduler is running, let's create some pods, and direct them -to be scheduled by either the default scheduler or the one we just deployed. -In order to schedule a given pod using a specific scheduler, we specify the name of the +Now that your second scheduler is running, create some pods, and direct them +to be scheduled by either the default scheduler or the one you deployed. +In order to schedule a given pod using a specific scheduler, specify the name of the scheduler in that pod spec. Let's look at three examples. - Pod spec without any scheduler name @@ -196,10 +196,13 @@ while the other two pods get scheduled. Once we submit the scheduler deployment and our new scheduler starts running, the `annotation-second-scheduler` pod gets scheduled as well. -Alternatively, one could just look at the "Scheduled" entries in the event logs to +Alternatively, you can look at the "Scheduled" entries in the event logs to verify that the pods were scheduled by the desired schedulers. ```shell kubectl get events ``` +You can also use a [custom scheduler configuration](/docs/reference/scheduling/config/#multiple-profiles) +or a custom container image for the cluster's main scheduler by modifying its static pod manifest +on the relevant control plane nodes. diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md index 71ca43d53048f..1e21306e5f2be 100644 --- a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md @@ -72,7 +72,7 @@ after upgrading the objects to a new stored version. Removing an old version: 1. Ensure all clients are fully migrated to the new version. The kube-apiserver - logs can reviewed to help identify any clients that are still accessing via + logs can be reviewed to help identify any clients that are still accessing via the old version. 1. Set `served` to `false` for the old version in the `spec.versions` list. If any clients are still unexpectedly using the old version they may begin reporting @@ -404,7 +404,7 @@ how to [authenticate API servers](/docs/reference/access-authn-authz/extensible- A conversion webhook must not mutate anything inside of `metadata` of the converted object other than `labels` and `annotations`. Attempted changes to `name`, `UID` and `namespace` are rejected and fail the request -which caused the conversion. All other changes are just ignored. +which caused the conversion. All other changes are ignored. ### Deploy the conversion webhook service @@ -583,14 +583,13 @@ and can optionally include a custom CA bundle to use to verify the TLS connectio The `host` should not refer to a service running in the cluster; use a service reference by specifying the `service` field instead. The host might be resolved via external DNS in some apiservers -(i.e., `kube-apiserver` cannot resolve in-cluster DNS as that would +(i.e., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this -webhook. Such installs are likely to be non-portable, i.e., not easy -to turn up in a new cluster. +webhook. Such installations are likely to be non-portable or not readily run in a new cluster. The scheme must be "https"; the URL must begin with "https://". diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md index de9ab8181c4e9..3230b7b73aeb4 100644 --- a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md @@ -520,7 +520,7 @@ CustomResourceDefinition and migrating your objects from one version to another. ### Finalizers *Finalizers* allow controllers to implement asynchronous pre-delete hooks. -Custom objects support finalizers just like built-in objects. +Custom objects support finalizers similar to built-in objects. You can add a finalizer to a custom object like this: @@ -1129,8 +1129,6 @@ resources that have the scale subresource enabled. ### Categories -{{< feature-state state="beta" for_k8s_version="v1.10" >}} - Categories is a list of grouped resources the custom resource belongs to (eg. `all`). You can use `kubectl get ` to list the resources belonging to the category. diff --git a/content/en/docs/tasks/extend-kubernetes/setup-extension-api-server.md b/content/en/docs/tasks/extend-kubernetes/setup-extension-api-server.md index 626ddcab5c751..64c41d9094a04 100644 --- a/content/en/docs/tasks/extend-kubernetes/setup-extension-api-server.md +++ b/content/en/docs/tasks/extend-kubernetes/setup-extension-api-server.md @@ -41,7 +41,7 @@ Alternatively, you can use an existing 3rd party solution, such as [apiserver-bu 1. Make sure that your extension-apiserver loads those certs from that volume and that they are used in the HTTPS handshake. 1. Create a Kubernetes service account in your namespace. 1. Create a Kubernetes cluster role for the operations you want to allow on your resources. -1. Create a Kubernetes cluster role binding from the service account in your namespace to the cluster role you just created. +1. Create a Kubernetes cluster role binding from the service account in your namespace to the cluster role you created. 1. Create a Kubernetes cluster role binding from the service account in your namespace to the `system:auth-delegator` cluster role to delegate auth decisions to the Kubernetes core API server. 1. Create a Kubernetes role binding from the service account in your namespace to the `extension-apiserver-authentication-reader` role. This allows your extension api-server to access the `extension-apiserver-authentication` configmap. 1. Create a Kubernetes apiservice. The CA cert above should be base64 encoded, stripped of new lines and used as the spec.caBundle in the apiservice. This should not be namespaced. If using the [kube-aggregator API](https://github.com/kubernetes/kube-aggregator/), only pass in the PEM encoded CA bundle because the base 64 encoding is done for you. diff --git a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md index 9cefdca03d6f6..02677d6204abf 100644 --- a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -70,8 +70,9 @@ override any environment variables specified in the container image. {{< /note >}} {{< note >}} -The environment variables can reference each other, and cycles are possible, -pay attention to the order before using +Environment variables may reference each other, however ordering is important. +Variables making use of others defined in the same context must come later in +the list. Similarly, avoid circular references. {{< /note >}} ## Using environment variables inside of your config diff --git a/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md index 693e730a093ff..26c7c911345ac 100644 --- a/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -1,6 +1,6 @@ --- title: Running Automated Tasks with a CronJob -min-kubernetes-server-version: v1.8 +min-kubernetes-server-version: v1.21 reviewers: - chenopis content_type: task @@ -9,6 +9,10 @@ weight: 10 +CronJobs was promoted to general availability in Kubernetes v1.21. If you are using an older version of +Kubernetes, please refer to the documentation for the version of Kubernetes that you are using, +so that you see accurate information. Older Kubernetes versions do not support the `batch/v1` CronJob API. + You can use a {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} to run {{< glossary_tooltip text="Jobs" term_id="job" >}} on a time-based schedule. These automated jobs run like [Cron](https://en.wikipedia.org/wiki/Cron) tasks on a Linux or UNIX system. @@ -170,13 +174,12 @@ After the deadline, the cron job does not start the job. Jobs that do not meet their deadline in this way count as failed jobs. If this field is not specified, the jobs have no deadline. -The CronJob controller counts how many missed schedules happen for a cron job. If there are more than 100 missed schedules, the cron job is no longer scheduled. When `.spec.startingDeadlineSeconds` is not set, the CronJob controller counts missed schedules from `status.lastScheduleTime` until now. - -For example, one cron job is supposed to run every minute, the `status.lastScheduleTime` of the cronjob is 5:00am, but now it's 7:00am. That means 120 schedules were missed, so the cron job is no longer scheduled. - -If the `.spec.startingDeadlineSeconds` field is set (not null), the CronJob controller counts how many missed jobs occurred from the value of `.spec.startingDeadlineSeconds` until now. +If the `.spec.startingDeadlineSeconds` field is set (not null), the CronJob +controller measures the time between when a job is expected to be created and +now. If the difference is higher than that limit, it will skip this execution. -For example, if it is set to `200`, it counts how many missed schedules occurred in the last 200 seconds. In that case, if there were more than 100 missed schedules in the last 200 seconds, the cron job is no longer scheduled. +For example, if it is set to `200`, it allows a job to be created for up to 200 +seconds after the actual schedule. ### Concurrency Policy diff --git a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md index 1bbb49a256cd3..2db5d3ecc3ea0 100644 --- a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -2,7 +2,7 @@ title: Coarse Parallel Processing Using a Work Queue min-kubernetes-server-version: v1.8 content_type: task -weight: 30 +weight: 20 --- @@ -19,7 +19,7 @@ Here is an overview of the steps in this example: 1. **Start a message queue service.** In this example, we use RabbitMQ, but you could use another one. In practice you would set up a message queue service once and reuse it for many jobs. 1. **Create a queue, and fill it with messages.** Each message represents one task to be done. In - this example, a message is just an integer that we will do a lengthy computation on. + this example, a message is an integer that we will do a lengthy computation on. 1. **Start a Job that works on tasks from the queue**. The Job starts several pods. Each pod takes one task from the message queue, processes it, and repeats until the end of the queue is reached. @@ -35,7 +35,7 @@ non-parallel, use of [Job](/docs/concepts/workloads/controllers/job/). ## Starting a message queue service -This example uses RabbitMQ, but it should be easy to adapt to another AMQP-type message service. +This example uses RabbitMQ, however, you can adapt the example to use another AMQP-type message service. In practice you could set up a message queue service once in a cluster and reuse it for many jobs, as well as for long-running services. @@ -141,13 +141,12 @@ root@temp-loe07:/# ``` In the last command, the `amqp-consume` tool takes one message (`-c 1`) -from the queue, and passes that message to the standard input of an arbitrary command. In this case, the program `cat` is just printing -out what it gets on the standard input, and the echo is just to add a carriage +from the queue, and passes that message to the standard input of an arbitrary command. In this case, the program `cat` prints out the characters read from standard input, and the echo adds a carriage return so the example is readable. ## Filling the Queue with tasks -Now let's fill the queue with some "tasks". In our example, our tasks are just strings to be +Now let's fill the queue with some "tasks". In our example, our tasks are strings to be printed. In a practice, the content of the messages might be: diff --git a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md index 7f3c30121edec..c5d1d0fa303e3 100644 --- a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md @@ -2,7 +2,7 @@ title: Fine Parallel Processing Using a Work Queue content_type: task min-kubernetes-server-version: v1.8 -weight: 40 +weight: 30 --- @@ -21,7 +21,7 @@ Here is an overview of the steps in this example: detect when a finite-length work queue is empty. In practice you would set up a store such as Redis once and reuse it for the work queues of many jobs, and other things. 1. **Create a queue, and fill it with messages.** Each message represents one task to be done. In - this example, a message is just an integer that we will do a lengthy computation on. + this example, a message is an integer that we will do a lengthy computation on. 1. **Start a Job that works on tasks from the queue**. The Job starts several pods. Each pod takes one task from the message queue, processes it, and repeats until the end of the queue is reached. @@ -55,7 +55,7 @@ You could also download the following files directly: ## Filling the Queue with tasks -Now let's fill the queue with some "tasks". In our example, our tasks are just strings to be +Now let's fill the queue with some "tasks". In our example, our tasks are strings to be printed. Start a temporary interactive pod for running the Redis CLI. diff --git a/content/en/docs/tasks/job/indexed-parallel-processing-static.md b/content/en/docs/tasks/job/indexed-parallel-processing-static.md new file mode 100644 index 0000000000000..b5492eed6ea4c --- /dev/null +++ b/content/en/docs/tasks/job/indexed-parallel-processing-static.md @@ -0,0 +1,190 @@ +--- +title: Indexed Job for Parallel Processing with Static Work Assignment +content_type: task +min-kubernetes-server-version: v1.21 +weight: 30 +--- + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + + + + +In this example, you will run a Kubernetes Job that uses multiple parallel +worker processes. +Each worker is a different container running in its own Pod. The Pods have an +_index number_ that the control plane sets automatically, which allows each Pod +to identify which part of the overall task to work on. + +The pod index is available in the {{< glossary_tooltip text="annotation" term_id="annotation" >}} +`batch.kubernetes.io/job-completion-index` as a string representing its +decimal value. In order for the containerized task process to obtain this index, +you can publish the value of the annotation using the [downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#the-downward-api) +mechanism. +For convenience, the control plane automatically sets the downward API to +expose the index in the `JOB_COMPLETION_INDEX` environment variable. + +Here is an overview of the steps in this example: + +1. **Define a Job manifest using indexed completion**. + The downward API allows you to pass the pod index annotation as an + environment variable or file to the container. +2. **Start an `Indexed` Job based on that manifest**. + +## {{% heading "prerequisites" %}} + +You should already be familiar with the basic, +non-parallel, use of [Job](/docs/concepts/workloads/controllers/job/). + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +To be able to create Indexed Jobs, make sure to enable the `IndexedJob` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) +and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). + + + +## Choose an approach + +To access the work item from the worker program, you have a few options: + +1. Read the `JOB_COMPLETION_INDEX` environment variable. The Job + {{< glossary_tooltip text="controller" term_id="controller" >}} + automatically links this variable to the annotation containing the completion + index. +1. Read a file that contains the completion index. +1. Assuming that you can't modify the program, you can wrap it with a script + that reads the index using any of the methods above and converts it into + something that the program can use as input. + +For this example, imagine that you chose option 3 and you want to run the +[rev](https://man7.org/linux/man-pages/man1/rev.1.html) utility. This +program accepts a file as an argument and prints its content reversed. + +```shell +rev data.txt +``` + +You'll use the `rev` tool from the +[`busybox`](https://hub.docker.com/_/busybox) container image. + +As this is only an example, each Pod only does a tiny piece of work (reversing a short +string). In a real workload you might, for example, create a Job that represents + the +task of producing 60 seconds of video based on scene data. +Each work item in the video rendering Job would be to render a particular +frame of that video clip. Indexed completion would mean that each Pod in +the Job knows which frame to render and publish, by counting frames from +the start of the clip. + +## Define an Indexed Job + +Here is a sample Job manifest that uses `Indexed` completion mode: + +{{< codenew language="yaml" file="application/job/indexed-job.yaml" >}} + +In the example above, you use the builtin `JOB_COMPLETION_INDEX` environment +variable set by the Job controller for all containers. An [init container](/docs/concepts/workloads/pods/init-containers/) +maps the index to a static value and writes it to a file that is shared with the +container running the worker through an [emptyDir volume](/docs/concepts/storage/volumes/#emptydir). +Optionally, you can [define your own environment variable through the downward +API](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/) +to publish the index to containers. You can also choose to load a list of values +from a [ConfigMap as an environment variable or file](/docs/tasks/configure-pod-container/configure-pod-configmap/). + +Alternatively, you can directly [use the downward API to pass the annotation +value as a volume file](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#store-pod-fields), +like shown in the following example: + +{{< codenew language="yaml" file="application/job/indexed-job-vol.yaml" >}} + +## Running the Job + +Now run the Job: + +```shell +# This uses the first approach (relying on $JOB_COMPLETION_INDEX) +kubectl apply -f https://kubernetes.io/examples/application/job/indexed-job.yaml +``` + +When you create this Job, the control plane creates a series of Pods, one for each index you specified. The value of `.spec.parallelism` determines how many can run at once whereas `.spec.completions` determines how many Pods the Job creates in total. + +Because `.spec.parallelism` is less than `.spec.completions`, the control plane waits for some of the first Pods to complete before starting more of them. + +Once you have created the Job, wait a moment then check on progress: + +```shell +kubectl describe jobs/indexed-job +``` + +The output is similar to: + +``` +Name: indexed-job +Namespace: default +Selector: controller-uid=bf865e04-0b67-483b-9a90-74cfc4c3e756 +Labels: controller-uid=bf865e04-0b67-483b-9a90-74cfc4c3e756 + job-name=indexed-job +Annotations: +Parallelism: 3 +Completions: 5 +Start Time: Thu, 11 Mar 2021 15:47:34 +0000 +Pods Statuses: 2 Running / 3 Succeeded / 0 Failed +Completed Indexes: 0-2 +Pod Template: + Labels: controller-uid=bf865e04-0b67-483b-9a90-74cfc4c3e756 + job-name=indexed-job + Init Containers: + input: + Image: docker.io/library/bash + Port: + Host Port: + Command: + bash + -c + items=(foo bar baz qux xyz) + echo ${items[$JOB_COMPLETION_INDEX]} > /input/data.txt + + Environment: + Mounts: + /input from input (rw) + Containers: + worker: + Image: docker.io/library/busybox + Port: + Host Port: + Command: + rev + /input/data.txt + Environment: + Mounts: + /input from input (rw) + Volumes: + input: + Type: EmptyDir (a temporary directory that shares a pod's lifetime) + Medium: + SizeLimit: +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 4s job-controller Created pod: indexed-job-njkjj + Normal SuccessfulCreate 4s job-controller Created pod: indexed-job-9kd4h + Normal SuccessfulCreate 4s job-controller Created pod: indexed-job-qjwsz + Normal SuccessfulCreate 1s job-controller Created pod: indexed-job-fdhq5 + Normal SuccessfulCreate 1s job-controller Created pod: indexed-job-ncslj +``` + +In this example, you run the Job with custom values for each index. You can +inspect the output of one of the pods: + +```shell +kubectl logs indexed-job-fdhq5 # Change this to match the name of a Pod from that Job +``` + + +The output is similar to: + +``` +xuq +``` \ No newline at end of file diff --git a/content/en/docs/tasks/job/parallel-processing-expansion.md b/content/en/docs/tasks/job/parallel-processing-expansion.md index e92fa9f5bb657..fdd309cafbd23 100644 --- a/content/en/docs/tasks/job/parallel-processing-expansion.md +++ b/content/en/docs/tasks/job/parallel-processing-expansion.md @@ -2,7 +2,7 @@ title: Parallel Processing using Expansions content_type: task min-kubernetes-server-version: v1.8 -weight: 20 +weight: 50 --- @@ -12,7 +12,7 @@ based on a common template. You can use this approach to process batches of work parallel. For this example there are only three items: _apple_, _banana_, and _cherry_. -The sample Jobs process each item simply by printing a string then pausing. +The sample Jobs process each item by printing a string then pausing. See [using Jobs in real workloads](#using-jobs-in-real-workloads) to learn about how this pattern fits more realistic use cases. diff --git a/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md b/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md index 05e8060cc9df0..704b01cc9a3ad 100644 --- a/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md +++ b/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md @@ -25,7 +25,7 @@ You should already know how to [perform a rolling update on a ### Step 1: Find the DaemonSet revision you want to roll back to -You can skip this step if you just want to roll back to the last revision. +You can skip this step if you only want to roll back to the last revision. List all revisions of a DaemonSet: diff --git a/content/en/docs/tasks/manage-daemon/update-daemon-set.md b/content/en/docs/tasks/manage-daemon/update-daemon-set.md index f9e35cb0f55c9..2f3001da0f1a3 100644 --- a/content/en/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/en/docs/tasks/manage-daemon/update-daemon-set.md @@ -111,7 +111,7 @@ kubectl edit ds/fluentd-elasticsearch -n kube-system ##### Updating only the container image -If you just need to update the container image in the DaemonSet template, i.e. +If you only need to update the container image in the DaemonSet template, i.e. `.spec.template.spec.containers[*].image`, use `kubectl set image`: ```shell @@ -167,7 +167,7 @@ If the recent DaemonSet template update is broken, for example, the container is crash looping, or the container image doesn't exist (often due to a typo), DaemonSet rollout won't progress. -To fix this, just update the DaemonSet template again. New rollout won't be +To fix this, update the DaemonSet template again. New rollout won't be blocked by previous unhealthy rollouts. #### Clock skew diff --git a/content/en/docs/tasks/manage-gpus/scheduling-gpus.md b/content/en/docs/tasks/manage-gpus/scheduling-gpus.md index 4f8fc434f9cca..997005e9ced01 100644 --- a/content/en/docs/tasks/manage-gpus/scheduling-gpus.md +++ b/content/en/docs/tasks/manage-gpus/scheduling-gpus.md @@ -37,7 +37,7 @@ When the above conditions are true, Kubernetes will expose `amd.com/gpu` or `nvidia.com/gpu` as a schedulable resource. You can consume these GPUs from your containers by requesting -`.com/gpu` just like you request `cpu` or `memory`. +`.com/gpu` the same way you request `cpu` or `memory`. However, there are some limitations in how you specify the resource requirements when using GPUs: diff --git a/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md b/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md index 75c9d56a83d91..643b57cc3b2f0 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md @@ -16,7 +16,7 @@ preview of what changes `apply` will make. ## {{% heading "prerequisites" %}} -Install [`kubectl`](/docs/tasks/tools/install-kubectl/). +Install [`kubectl`](/docs/tasks/tools/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md b/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md index a51b5664baa7c..8e0670a89f14c 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md @@ -12,7 +12,7 @@ explains how those commands are organized and how to use them to manage live obj ## {{% heading "prerequisites" %}} -Install [`kubectl`](/docs/tasks/tools/install-kubectl/). +Install [`kubectl`](/docs/tasks/tools/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md b/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md index 2b97ed271cb33..87cc423da7fdc 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md @@ -13,7 +13,7 @@ This document explains how to define and manage objects using configuration file ## {{% heading "prerequisites" %}} -Install [`kubectl`](/docs/tasks/tools/install-kubectl/). +Install [`kubectl`](/docs/tasks/tools/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md index c1722f694a47c..a41bab673686e 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -8,7 +8,7 @@ weight: 20 [Kustomize](https://github.com/kubernetes-sigs/kustomize) is a standalone tool to customize Kubernetes objects -through a [kustomization file](https://kubernetes-sigs.github.io/kustomize/api-reference/glossary/#kustomization). +through a [kustomization file](https://kubectl.docs.kubernetes.io/references/kustomize/glossary/#kustomization). Since 1.14, Kubectl also supports the management of Kubernetes objects using a kustomization file. @@ -29,7 +29,7 @@ kubectl apply -k ## {{% heading "prerequisites" %}} -Install [`kubectl`](/docs/tasks/tools/install-kubectl/). +Install [`kubectl`](/docs/tasks/tools/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -114,6 +114,98 @@ metadata: name: example-configmap-2-g2hdhfc6tk ``` +To use a generated ConfigMap in a Deployment, reference it by the name of the configMapGenerator. Kustomize will automatically replace this name with the generated name. + +This is an example deployment that uses a generated ConfigMap: + +```yaml +# Create a application.properties file +cat <application.properties +FOO=Bar +EOF + +cat <deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + labels: + app: my-app +spec: + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: app + image: my-app + volumeMount: + - name: config + mountPath: /config + volumes: + - name: config + configMap: + name: example-configmap-1 +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +configMapGenerator: +- name: example-configmap-1 + files: + - application.properties +EOF +``` + +Generate the ConfigMap and Deployment: + +```shell +kubectl kustomize ./ +``` + +The generated Deployment will refer to the generated ConfigMap by name: + +```yaml +apiVersion: v1 +data: + application.properties: | + FOO=Bar +kind: ConfigMap +metadata: + name: example-configmap-1-g4hk9g2ff8 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: my-app + name: my-app +spec: + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - image: my-app + name: app + volumeMount: + - mountPath: /config + name: config + volumes: + - configMap: + name: example-configmap-1-g4hk9g2ff8 + name: config +``` + #### secretGenerator You can generate Secrets from files or literal key-value pairs. To generate a Secret from a file, add an entry to the `files` list in `secretGenerator`. Here is an example of generating a Secret with a data item from a file: @@ -170,6 +262,53 @@ metadata: type: Opaque ``` +Like ConfigMaps, generated Secrets can be used in Deployments by refering to the name of the secretGenerator: + +```shell +# Create a password.txt file +cat <./password.txt +username=admin +password=secret +EOF + +cat <deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + labels: + app: my-app +spec: + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: app + image: my-app + volumeMount: + - name: password + mountPath: /secrets + volumes: + - name: password + secret: + secretName: example-secret-1 +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +secretGenerator: +- name: example-secret-1 + files: + - password.txt +EOF +``` + #### generatorOptions The generated ConfigMaps and Secrets have a content hash suffix appended. This ensures that a new ConfigMap or Secret is generated when the contents are changed. To disable the behavior of appending a suffix, one can use `generatorOptions`. Besides that, it is also possible to specify cross-cutting options for generated ConfigMaps and Secrets. @@ -815,14 +954,14 @@ deployment.apps "dev-my-nginx" deleted | commonLabels | map[string]string | labels to add to all resources and selectors | | commonAnnotations | map[string]string | annotations to add to all resources | | resources | []string | each entry in this list must resolve to an existing resource configuration file | -| configmapGenerator | [][ConfigMapArgs](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L99) | Each entry in this list generates a ConfigMap | -| secretGenerator | [][SecretArgs](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L106) | Each entry in this list generates a Secret | -| generatorOptions | [GeneratorOptions](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L109) | Modify behaviors of all ConfigMap and Secret generator | +| configMapGenerator | [][ConfigMapArgs](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/configmapargs.go#L7) | Each entry in this list generates a ConfigMap | +| secretGenerator | [][SecretArgs](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/secretargs.go#L7) | Each entry in this list generates a Secret | +| generatorOptions | [GeneratorOptions](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/generatoroptions.go#L7) | Modify behaviors of all ConfigMap and Secret generator | | bases | []string | Each entry in this list should resolve to a directory containing a kustomization.yaml file | | patchesStrategicMerge | []string | Each entry in this list should resolve a strategic merge patch of a Kubernetes object | -| patchesJson6902 | [][Json6902](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/patchjson6902.go#L8) | Each entry in this list should resolve to a Kubernetes object and a Json Patch | -| vars | [][Var](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/var.go#L31) | Each entry is to capture text from one resource's field | -| images | [][Image](https://github.com/kubernetes-sigs/kustomize/tree/master/api/types/image.go#L23) | Each entry is to modify the name, tags and/or digest for one image without creating patches | +| patchesJson6902 | [][Patch](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/patch.go#L10) | Each entry in this list should resolve to a Kubernetes object and a Json Patch | +| vars | [][Var](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/var.go#L19) | Each entry is to capture text from one resource's field | +| images | [][Image](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/image.go#L8) | Each entry is to modify the name, tags and/or digest for one image without creating patches | | configurations | []string | Each entry in this list should resolve to a file containing [Kustomize transformer configurations](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs) | | crds | []string | Each entry in this list should resolve to an OpenAPI definition file for Kubernetes types | diff --git a/content/en/docs/tasks/network/validate-dual-stack.md b/content/en/docs/tasks/network/validate-dual-stack.md index 5e11cb3057866..bc90dea4ead0f 100644 --- a/content/en/docs/tasks/network/validate-dual-stack.md +++ b/content/en/docs/tasks/network/validate-dual-stack.md @@ -40,9 +40,10 @@ a00:100::/24 ``` There should be one IPv4 block and one IPv6 block allocated. -Validate that the node has an IPv4 and IPv6 interface detected (replace node name with a valid node from the cluster. In this example the node name is k8s-linuxpool1-34450317-0): +Validate that the node has an IPv4 and IPv6 interface detected. Replace node name with a valid node from the cluster. In this example the node name is `k8s-linuxpool1-34450317-0`: + ```shell -kubectl get nodes k8s-linuxpool1-34450317-0 -o go-template --template='{{range .status.addresses}}{{printf "%s: %s \n" .type .address}}{{end}}' +kubectl get nodes k8s-linuxpool1-34450317-0 -o go-template --template='{{range .status.addresses}}{{printf "%s: %s\n" .type .address}}{{end}}' ``` ``` Hostname: k8s-linuxpool1-34450317-0 @@ -52,9 +53,10 @@ InternalIP: 2001:1234:5678:9abc::5 ### Validate Pod addressing -Validate that a Pod has an IPv4 and IPv6 address assigned. (replace the Pod name with a valid Pod in your cluster. In this example the Pod name is pod01) +Validate that a Pod has an IPv4 and IPv6 address assigned. Replace the Pod name with a valid Pod in your cluster. In this example the Pod name is `pod01`: + ```shell -kubectl get pods pod01 -o go-template --template='{{range .status.podIPs}}{{printf "%s \n" .ip}}{{end}}' +kubectl get pods pod01 -o go-template --template='{{range .status.podIPs}}{{printf "%s\n" .ip}}{{end}}' ``` ``` 10.244.1.4 @@ -72,6 +74,7 @@ You can also validate Pod IPs using the Downward API via the `status.podIPs` fie ``` The following command prints the value of the `MY_POD_IPS` environment variable from within a container. The value is a comma separated list that corresponds to the Pod's IPv4 and IPv6 addresses. + ```shell kubectl exec -it pod01 -- set | grep MY_POD_IPS ``` diff --git a/content/en/docs/tasks/run-application/access-api-from-pod.md b/content/en/docs/tasks/run-application/access-api-from-pod.md new file mode 100644 index 0000000000000..9eb2521f7f43d --- /dev/null +++ b/content/en/docs/tasks/run-application/access-api-from-pod.md @@ -0,0 +1,111 @@ +--- +title: Accessing the Kubernetes API from a Pod +content_type: task +weight: 120 +--- + + + +This guide demonstrates how to access the Kubernetes API from within a pod. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## Accessing the API from within a Pod + +When accessing the API from within a Pod, locating and authenticating +to the API server are slightly different to the external client case. + +The easiest way to use the Kubernetes API from a Pod is to use +one of the official [client libraries](/docs/reference/using-api/client-libraries/). These +libraries can automatically discover the API server and authenticate. + +### Using Official Client Libraries + +From within a Pod, the recommended ways to connect to the Kubernetes API are: + + - For a Go client, use the official [Go client library](https://github.com/kubernetes/client-go/). + The `rest.InClusterConfig()` function handles API host discovery and authentication automatically. + See [an example here](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go). + + - For a Python client, use the official [Python client library](https://github.com/kubernetes-client/python/). + The `config.load_incluster_config()` function handles API host discovery and authentication automatically. + See [an example here](https://github.com/kubernetes-client/python/blob/master/examples/in_cluster_config.py). + + - There are a number of other libraries available, please refer to the [Client Libraries](/docs/reference/using-api/client-libraries/) page. + +In each case, the service account credentials of the Pod are used to communicate +securely with the API server. + +### Directly accessing the REST API + +While running in a Pod, the Kubernetes apiserver is accessible via a Service named +`kubernetes` in the `default` namespace. Therefore, Pods can use the +`kubernetes.default.svc` hostname to query the API server. Official client libraries +do this automatically. + +The recommended way to authenticate to the API server is with a +[service account](/docs/tasks/configure-pod-container/configure-service-account/) credential. By default, a Pod +is associated with a service account, and a credential (token) for that +service account is placed into the filesystem tree of each container in that Pod, +at `/var/run/secrets/kubernetes.io/serviceaccount/token`. + +If available, a certificate bundle is placed into the filesystem tree of each +container at `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`, and should be +used to verify the serving certificate of the API server. + +Finally, the default namespace to be used for namespaced API operations is placed in a file +at `/var/run/secrets/kubernetes.io/serviceaccount/namespace` in each container. + +### Using kubectl proxy + +If you would like to query the API without an official client library, you can run `kubectl proxy` +as the [command](/docs/tasks/inject-data-application/define-command-argument-container/) +of a new sidecar container in the Pod. This way, `kubectl proxy` will authenticate +to the API and expose it on the `localhost` interface of the Pod, so that other containers +in the Pod can use it directly. + +### Without using a proxy + +It is possible to avoid using the kubectl proxy by passing the authentication token +directly to the API server. The internal certificate secures the connection. + +```shell +# Point to the internal API server hostname +APISERVER=https://kubernetes.default.svc + +# Path to ServiceAccount token +SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount + +# Read this Pod's namespace +NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + +# Read the ServiceAccount bearer token +TOKEN=$(cat ${SERVICEACCOUNT}/token) + +# Reference the internal certificate authority (CA) +CACERT=${SERVICEACCOUNT}/ca.crt + +# Explore the API with TOKEN +curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X GET ${APISERVER}/api +``` + +The output will be similar to this: + +```json +{ + "kind": "APIVersions", + "versions": [ + "v1" + ], + "serverAddressByClientCIDRs": [ + { + "clientCIDR": "0.0.0.0/0", + "serverAddress": "10.0.1.149:443" + } + ] +} +``` diff --git a/content/en/docs/tasks/run-application/configure-pdb.md b/content/en/docs/tasks/run-application/configure-pdb.md index 3823cac4ee934..3110c2f807dff 100644 --- a/content/en/docs/tasks/run-application/configure-pdb.md +++ b/content/en/docs/tasks/run-application/configure-pdb.md @@ -2,11 +2,12 @@ title: Specifying a Disruption Budget for your Application content_type: task weight: 110 +min-kubernetes-server-version: v1.21 --- -{{< feature-state for_k8s_version="v1.5" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} This page shows how to limit the number of concurrent disruptions that your application experiences, allowing for higher availability @@ -17,6 +18,8 @@ nodes. ## {{% heading "prerequisites" %}} +{{< version-check >}} + * You are the owner of an application running on a Kubernetes cluster that requires high availability. * You should know how to deploy [Replicated Stateless Applications](/docs/tasks/run-application/run-stateless-application-deployment/) @@ -112,9 +115,9 @@ of the number of pods from that set that can be unavailable after the eviction. It can be either an absolute number or a percentage. {{< note >}} -For versions 1.8 and earlier: When creating a `PodDisruptionBudget` -object using the `kubectl` command line tool, the `minAvailable` field has a -default value of 1 if neither `minAvailable` nor `maxUnavailable` is specified. +The behavior for an empty selector differs between the policy/v1beta1 and policy/v1 APIs for +PodDisruptionBudgets. For policy/v1beta1 an empty selector matches zero pods, while +for policy/v1 an empty selector matches every pod in the namespace. {{< /note >}} You can specify only one of `maxUnavailable` and `minAvailable` in a single `PodDisruptionBudget`. @@ -160,7 +163,7 @@ Example PDB Using minAvailable: {{< codenew file="policy/zookeeper-pod-disruption-budget-minavailable.yaml" >}} -Example PDB Using maxUnavailable (Kubernetes 1.7 or higher): +Example PDB Using maxUnavailable: {{< codenew file="policy/zookeeper-pod-disruption-budget-maxunavailable.yaml" >}} @@ -206,7 +209,7 @@ You can get more information about the status of a PDB with this command: kubectl get poddisruptionbudgets zk-pdb -o yaml ``` ```yaml -apiVersion: policy/v1beta1 +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: annotations: diff --git a/content/en/docs/tasks/run-application/delete-stateful-set.md b/content/en/docs/tasks/run-application/delete-stateful-set.md index 57e54e679722a..94b3c583ebb12 100644 --- a/content/en/docs/tasks/run-application/delete-stateful-set.md +++ b/content/en/docs/tasks/run-application/delete-stateful-set.md @@ -43,8 +43,8 @@ You may need to delete the associated headless service separately after the Stat kubectl delete service ``` -Deleting a StatefulSet through kubectl will scale it down to 0, thereby deleting all pods that are a part of it. -If you want to delete just the StatefulSet and not the pods, use `--cascade=false`. +When deleting a StatefulSet through `kubectl`, the StatefulSet scales down to 0. All Pods that are part of this workload are also deleted. If you want to delete only the StatefulSet and not the Pods, use `--cascade=false`. +For example: ```shell kubectl delete -f --cascade=false @@ -66,7 +66,7 @@ Use caution when deleting a PVC, as it may lead to data loss. ### Complete deletion of a StatefulSet -To simply delete everything in a StatefulSet, including the associated pods, you can run a series of commands similar to the following: +To delete everything in a StatefulSet, including the associated pods, you can run a series of commands similar to the following: ```shell grace=$(kubectl get pods --template '{{.spec.terminationGracePeriodSeconds}}') diff --git a/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md b/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md index cda469f217d46..0001f4c9f4ba5 100644 --- a/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md +++ b/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md @@ -44,9 +44,9 @@ for StatefulSet Pods. Graceful deletion is safe and will ensure that the Pod [shuts down gracefully](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) before the kubelet deletes the name from the apiserver. -Kubernetes (versions 1.5 or newer) will not delete Pods just because a Node is unreachable. +A Pod is not deleted automatically when a node is unreachable. The Pods running on an unreachable Node enter the 'Terminating' or 'Unknown' state after a -[timeout](/docs/concepts/architecture/nodes/#node-condition). +[timeout](/docs/concepts/architecture/nodes/#condition). Pods may also enter these states when the user attempts graceful deletion of a Pod on an unreachable Node. The only ways in which a Pod in such a state can be removed from the apiserver are as follows: diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 49009e1268807..84ae1addd2dd0 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -382,7 +382,7 @@ with *external metrics*. Using external metrics requires knowledge of your monitoring system; the setup is similar to that required when using custom metrics. External metrics allow you to autoscale your cluster -based on any metric available in your monitoring system. Just provide a `metric` block with a +based on any metric available in your monitoring system. Provide a `metric` block with a `name` and `selector`, as above, and use the `External` metric type instead of `Object`. If multiple time series are matched by the `metricSelector`, the sum of their values is used by the HorizontalPodAutoscaler. diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md index ff51598f79abb..5e940274230ae 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -23,9 +23,7 @@ Pod Autoscaling does not apply to objects that can't be scaled, for example, Dae The Horizontal Pod Autoscaler is implemented as a Kubernetes API resource and a controller. The resource determines the behavior of the controller. -The controller periodically adjusts the number of replicas in a replication controller or deployment -to match the observed average CPU utilization to the target specified by user. - +The controller periodically adjusts the number of replicas in a replication controller or deployment to match the observed metrics such as average CPU utilisation, average memory utilisation or any other custom metric to the target specified by the user. @@ -162,7 +160,7 @@ can be fetched, scaling is skipped. This means that the HPA is still capable of scaling up if one or more metrics give a `desiredReplicas` greater than the current value. -Finally, just before HPA scales the target, the scale recommendation is recorded. The +Finally, right before HPA scales the target, the scale recommendation is recorded. The controller considers all recommendations within a configurable window choosing the highest recommendation from within that window. This value can be configured using the `--horizontal-pod-autoscaler-downscale-stabilization` flag, which defaults to 5 minutes. This means that scaledowns will occur gradually, smoothing out the impact of rapidly @@ -191,7 +189,7 @@ We can create a new autoscaler using `kubectl create` command. We can list autoscalers by `kubectl get hpa` and get detailed description by `kubectl describe hpa`. Finally, we can delete an autoscaler using `kubectl delete hpa`. -In addition, there is a special `kubectl autoscale` command for easy creation of a Horizontal Pod Autoscaler. +In addition, there is a special `kubectl autoscale` command for creating a HorizontalPodAutoscaler object. For instance, executing `kubectl autoscale rs foo --min=2 --max=5 --cpu-percent=80` will create an autoscaler for replication set *foo*, with target CPU utilization set to `80%` and the number of replicas between 2 and 5. @@ -221,9 +219,9 @@ the global HPA settings exposed as flags for the `kube-controller-manager` compo Starting from v1.12, a new algorithmic update removes the need for the upscale delay. -- `--horizontal-pod-autoscaler-downscale-stabilization`: The value for this option is a - duration that specifies how long the autoscaler has to wait before another - downscale operation can be performed after the current one has completed. +- `--horizontal-pod-autoscaler-downscale-stabilization`: Specifies the duration of the + downscale stabilization time window. Horizontal Pod Autoscaler remembers + the historical recommended sizes and only acts on the largest size within this time window. The default value is 5 minutes (`5m0s`). {{< note >}} @@ -356,7 +354,7 @@ and [the walkthrough for using external metrics](/docs/tasks/run-application/hor ## Support for configurable scaling behavior Starting from -[v1.18](https://github.com/kubernetes/enhancements/blob/master/keps/sig-autoscaling/20190307-configurable-scale-velocity-for-hpa.md) +[v1.18](https://github.com/kubernetes/enhancements/blob/master/keps/sig-autoscaling/853-configurable-hpa-scale-velocity/README.md) the `v2beta2` API allows scaling behavior to be configured through the HPA `behavior` field. Behaviors are specified separately for scaling up and down in `scaleUp` or `scaleDown` section under the `behavior` field. A stabilization @@ -383,7 +381,12 @@ behavior: periodSeconds: 60 ``` -When the number of pods is more than 40 the second policy will be used for scaling down. +`periodSeconds` indicates the length of time in the past for which the policy must hold true. +The first policy _(Pods)_ allows at most 4 replicas to be scaled down in one minute. The second policy +_(Percent)_ allows at most 10% of the current replicas to be scaled down in one minute. + +Since by default the policy which allows the highest amount of change is selected, the second policy will +only be used when the number of pod replicas is more than 40. With 40 or less replicas, the first policy will be applied. For instance if there are 80 replicas and the target has to be scaled down to 10 replicas then during the first step 8 replicas will be reduced. In the next iteration when the number of replicas is 72, 10% of the pods is 7.2 but the number is rounded up to 8. On each loop of @@ -391,10 +394,6 @@ the autoscaler controller the number of pods to be change is re-calculated based of current replicas. When the number of replicas falls below 40 the first policy _(Pods)_ is applied and 4 replicas will be reduced at a time. -`periodSeconds` indicates the length of time in the past for which the policy must hold true. -The first policy allows at most 4 replicas to be scaled down in one minute. The second policy -allows at most 10% of the current replicas to be scaled down in one minute. - The policy selection can be changed by specifying the `selectPolicy` field for a scaling direction. By setting the value to `Min` which would select the policy which allows the smallest change in the replica count. Setting the value to `Disabled` completely disables @@ -441,7 +440,7 @@ behavior: periodSeconds: 15 selectPolicy: Max ``` -For scaling down the stabilization window is _300_ seconds(or the value of the +For scaling down the stabilization window is _300_ seconds (or the value of the `--horizontal-pod-autoscaler-downscale-stabilization` flag if provided). There is only a single policy for scaling down which allows a 100% of the currently running replicas to be removed which means the scaling target can be scaled down to the minimum allowed replicas. diff --git a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md index f1738ff53eea9..22f929c06ffa6 100644 --- a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md @@ -39,6 +39,7 @@ on general patterns for running stateful applications in Kubernetes. [ConfigMaps](/docs/tasks/configure-pod-container/configure-pod-configmap/). * Some familiarity with MySQL helps, but this tutorial aims to present general patterns that should be useful for other systems. +* You are using the default namespace or another namespace that does not contain any conflicting objects. @@ -171,10 +172,10 @@ properties. The script in the `init-mysql` container also applies either `primary.cnf` or `replica.cnf` from the ConfigMap by copying the contents into `conf.d`. Because the example topology consists of a single primary MySQL server and any number of -replicas, the script simply assigns ordinal `0` to be the primary server, and everyone +replicas, the script assigns ordinal `0` to be the primary server, and everyone else to be replicas. Combined with the StatefulSet controller's -[deployment order guarantee](/docs/concepts/workloads/controllers/statefulset/#deployment-and-scaling-guarantees/), +[deployment order guarantee](/docs/concepts/workloads/controllers/statefulset/#deployment-and-scaling-guarantees), this ensures the primary MySQL server is Ready before creating replicas, so they can begin replicating. @@ -534,10 +535,9 @@ kubectl delete pvc data-mysql-4 * Learn more about [debugging a StatefulSet](/docs/tasks/debug-application-cluster/debug-stateful-set/). * Learn more about [deleting a StatefulSet](/docs/tasks/run-application/delete-stateful-set/). * Learn more about [force deleting StatefulSet Pods](/docs/tasks/run-application/force-delete-stateful-set-pod/). -* Look in the [Helm Charts repository](https://github.com/kubernetes/charts) +* Look in the [Helm Charts repository](https://artifacthub.io/) for other stateful application examples. - diff --git a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md index 4c43948a215c8..bdc3b0c524a4f 100644 --- a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -65,6 +65,8 @@ for a secure solution. kubectl describe deployment mysql + The output is similar to this: + Name: mysql Namespace: default CreationTimestamp: Tue, 01 Nov 2016 11:18:45 -0700 @@ -105,6 +107,8 @@ for a secure solution. kubectl get pods -l app=mysql + The output is similar to this: + NAME READY STATUS RESTARTS AGE mysql-63082529-2z3ki 1/1 Running 0 3m @@ -112,6 +116,8 @@ for a secure solution. kubectl describe pvc mysql-pv-claim + The output is similar to this: + Name: mysql-pv-claim Namespace: default StorageClass: diff --git a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md index df604facf8b7a..62bd984ddc7e3 100644 --- a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md @@ -51,7 +51,6 @@ a Deployment that runs the nginx:1.14.2 Docker image: The output is similar to this: - user@computer:~/website$ kubectl describe deployment nginx-deployment Name: nginx-deployment Namespace: default CreationTimestamp: Tue, 30 Aug 2016 18:11:37 -0700 diff --git a/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md b/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md index d558a271ad3c7..b01f380a1adfc 100644 --- a/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md +++ b/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md @@ -19,7 +19,7 @@ Up to date information on this process can be found at the * You must have a Kubernetes cluster with cluster DNS enabled. * If you are using a cloud-based Kubernetes cluster or {{< glossary_tooltip text="Minikube" term_id="minikube" >}}, you may already have cluster DNS enabled. * If you are using `hack/local-up-cluster.sh`, ensure that the `KUBE_ENABLE_CLUSTER_DNS` environment variable is set, then run the install script. -* [Install and setup kubectl](/docs/tasks/tools/install-kubectl/) v1.7 or higher. Make sure it is configured to connect to the Kubernetes cluster. +* [Install and setup kubectl](/docs/tasks/tools/) v1.7 or higher. Make sure it is configured to connect to the Kubernetes cluster. * Install [Helm](https://helm.sh/) v2.7.0 or newer. * Follow the [Helm install instructions](https://helm.sh/docs/intro/install/). * If you already have an appropriate version of Helm installed, execute `helm init` to install Tiller, the server-side component of Helm. @@ -33,7 +33,7 @@ Up to date information on this process can be found at the Once Helm is installed, add the *service-catalog* Helm repository to your local machine by executing the following command: ```shell -helm repo add svc-cat https://svc-catalog-charts.storage.googleapis.com +helm repo add svc-cat https://kubernetes-sigs.github.io/service-catalog ``` Check to make sure that it installed successfully by executing the following command: diff --git a/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md b/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md index 52a55457a256f..a724d5b17bf0d 100644 --- a/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md +++ b/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md @@ -12,10 +12,7 @@ You can use the GCP [Service Catalog Installer](https://github.com/GoogleCloudPl tool to easily install or uninstall Service Catalog on your Kubernetes cluster, linking it to Google Cloud projects. -Service Catalog itself can work with any kind of managed service, not just Google Cloud. - - - +Service Catalog can work with any kind of managed service, not only Google Cloud. ## {{% heading "prerequisites" %}} @@ -23,7 +20,7 @@ Service Catalog itself can work with any kind of managed service, not just Googl * Install [Go 1.6+](https://golang.org/dl/) and set the `GOPATH`. * Install the [cfssl](https://github.com/cloudflare/cfssl) tool needed for generating SSL artifacts. * Service Catalog requires Kubernetes version 1.7+. -* [Install and setup kubectl](/docs/tasks/tools/install-kubectl/) so that it is configured to connect to a Kubernetes v1.7+ cluster. +* [Install and setup kubectl](/docs/tasks/tools/) so that it is configured to connect to a Kubernetes v1.7+ cluster. * The kubectl user must be bound to the *cluster-admin* role for it to install Service Catalog. To ensure that this is true, run the following command: kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user= diff --git a/content/en/docs/tasks/tls/certificate-rotation.md b/content/en/docs/tasks/tls/certificate-rotation.md index ea3602fbb0f8d..5dd9b8571493e 100644 --- a/content/en/docs/tasks/tls/certificate-rotation.md +++ b/content/en/docs/tasks/tls/certificate-rotation.md @@ -69,8 +69,9 @@ write that to disk, in the location specified by `--cert-dir`. Then the kubelet will use the new certificate to connect to the Kubernetes API. As the expiration of the signed certificate approaches, the kubelet will -automatically issue a new certificate signing request, using the Kubernetes -API. Again, the controller manager will automatically approve the certificate +automatically issue a new certificate signing request, using the Kubernetes API. +This can happen at any point between 30% and 10% of the time remaining on the +certificate. Again, the controller manager will automatically approve the certificate request and attach a signed certificate to the certificate signing request. The kubelet will retrieve the new signed certificate from the Kubernetes API and write that to disk. Then it will update the connections it has to the diff --git a/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md b/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md index 3147ac3a18296..e56322cbeccc8 100644 --- a/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md +++ b/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md @@ -51,12 +51,12 @@ Configurations with a single API server will experience unavailability while the If any pods are started before new CA is used by API servers, they will get this update and trust both old and new CAs. ```shell - base64_encoded_ca="$(base64 )" + base64_encoded_ca="$(base64 -w0 )" for namespace in $(kubectl get ns --no-headers | awk '{print $1}'); do for token in $(kubectl get secrets --namespace "$namespace" --field-selector type=kubernetes.io/service-account-token -o name); do kubectl get $token --namespace "$namespace" -o yaml | \ - /bin/sed "s/\(ca.crt:\).*/\1 ${base64_encoded_ca}" | \ + /bin/sed "s/\(ca.crt:\).*/\1 ${base64_encoded_ca}/" | \ kubectl apply -f - done done @@ -105,8 +105,8 @@ Configurations with a single API server will experience unavailability while the * Make sure control plane components logs no TLS errors. {{< note >}} - To generate certificates and private keys for your cluster using the `openssl` command line tool, see [Certificates (`openssl`)](/docs/concepts/cluster-administration/certificates/#openssl). - You can also use [`cfssl`](/docs/concepts/cluster-administration/certificates/#cfssl). + To generate certificates and private keys for your cluster using the `openssl` command line tool, see [Certificates (`openssl`)](/docs/tasks/administer-cluster/certificates/#openssl). + You can also use [`cfssl`](/docs/tasks/administer-cluster/certificates/#cfssl). {{< /note >}} 1. Annotate any Daemonsets and Deployments to trigger pod replacement in a safer rolling fashion. @@ -132,10 +132,10 @@ Configurations with a single API server will experience unavailability while the 1. If your cluster is using bootstrap tokens to join nodes, update the ConfigMap `cluster-info` in the `kube-public` namespace with new CA. ```shell - base64_encoded_ca="$(base64 /etc/kubernetes/pki/ca.crt)" + base64_encoded_ca="$(base64 -w0 /etc/kubernetes/pki/ca.crt)" kubectl get cm/cluster-info --namespace kube-public -o yaml | \ - /bin/sed "s/\(certificate-authority-data:\).*/\1 ${base64_encoded_ca}" | \ + /bin/sed "s/\(certificate-authority-data:\).*/\1 ${base64_encoded_ca}/" | \ kubectl apply -f - ``` diff --git a/content/en/docs/tasks/tools/_index.md b/content/en/docs/tasks/tools/_index.md index 7bbb2161ec84d..5f1b517141a1d 100755 --- a/content/en/docs/tasks/tools/_index.md +++ b/content/en/docs/tasks/tools/_index.md @@ -7,18 +7,19 @@ no_list: true ## kubectl -The Kubernetes command-line tool, `kubectl`, allows you to run commands against -Kubernetes clusters. You can use `kubectl` to deploy applications, inspect and -manage cluster resources, and view logs. - -See [Install and Set Up `kubectl`](/docs/tasks/tools/install-kubectl/) for -information about how to download and install `kubectl` and set it up for -accessing your cluster. + +The Kubernetes command-line tool, [kubectl](/docs/reference/kubectl/kubectl/), allows +you to run commands against Kubernetes clusters. +You can use kubectl to deploy applications, inspect and manage cluster resources, +and view logs. For more information including a complete list of kubectl operations, see the +[`kubectl` reference documentation](/docs/reference/kubectl/). -View kubectl Install and Set Up Guide +kubectl is installable on a variety of Linux platforms, macOS and Windows. +Find your preferred operating system below. -You can also read the -[`kubectl` reference documentation](/docs/reference/kubectl/). +- [Install kubectl on Linux](/docs/tasks/tools/install-kubectl-linux) +- [Install kubectl on macOS](/docs/tasks/tools/install-kubectl-macos) +- [Install kubectl on Windows](/docs/tasks/tools/install-kubectl-windows) ## kind diff --git a/content/en/docs/tasks/tools/included/_index.md b/content/en/docs/tasks/tools/included/_index.md new file mode 100644 index 0000000000000..2da0437b8235a --- /dev/null +++ b/content/en/docs/tasks/tools/included/_index.md @@ -0,0 +1,6 @@ +--- +title: "Tools Included" +description: "Snippets to be included in the main kubectl-installs-*.md pages." +headless: true +toc_hide: true +--- \ No newline at end of file diff --git a/content/en/docs/tasks/tools/included/install-kubectl-gcloud.md b/content/en/docs/tasks/tools/included/install-kubectl-gcloud.md new file mode 100644 index 0000000000000..dcf8572618380 --- /dev/null +++ b/content/en/docs/tasks/tools/included/install-kubectl-gcloud.md @@ -0,0 +1,21 @@ +--- +title: "gcloud kubectl install" +description: "How to install kubectl with gcloud snippet for inclusion in each OS-specific tab." +headless: true +--- + +You can install kubectl as part of the Google Cloud SDK. + +1. Install the [Google Cloud SDK](https://cloud.google.com/sdk/). + +1. Run the `kubectl` installation command: + + ```shell + gcloud components install kubectl + ``` + +1. Test to ensure the version you installed is up-to-date: + + ```shell + kubectl version --client + ``` \ No newline at end of file diff --git a/content/en/docs/tasks/tools/included/kubectl-whats-next.md b/content/en/docs/tasks/tools/included/kubectl-whats-next.md new file mode 100644 index 0000000000000..4b0da49bbcd97 --- /dev/null +++ b/content/en/docs/tasks/tools/included/kubectl-whats-next.md @@ -0,0 +1,12 @@ +--- +title: "What's next?" +description: "What's next after installing kubectl." +headless: true +--- + +* [Install Minikube](https://minikube.sigs.k8s.io/docs/start/) +* See the [getting started guides](/docs/setup/) for more about creating clusters. +* [Learn how to launch and expose your application.](/docs/tasks/access-application-cluster/service-access-application-cluster/) +* If you need access to a cluster you didn't create, see the + [Sharing Cluster Access document](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). +* Read the [kubectl reference docs](/docs/reference/kubectl/kubectl/) diff --git a/content/en/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md b/content/en/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md new file mode 100644 index 0000000000000..949f1922c4ecc --- /dev/null +++ b/content/en/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md @@ -0,0 +1,54 @@ +--- +title: "bash auto-completion on Linux" +description: "Some optional configuration for bash auto-completion on Linux." +headless: true +--- + +### Introduction + +The kubectl completion script for Bash can be generated with the command `kubectl completion bash`. Sourcing the completion script in your shell enables kubectl autocompletion. + +However, the completion script depends on [**bash-completion**](https://github.com/scop/bash-completion), which means that you have to install this software first (you can test if you have bash-completion already installed by running `type _init_completion`). + +### Install bash-completion + +bash-completion is provided by many package managers (see [here](https://github.com/scop/bash-completion#installation)). You can install it with `apt-get install bash-completion` or `yum install bash-completion`, etc. + +The above commands create `/usr/share/bash-completion/bash_completion`, which is the main script of bash-completion. Depending on your package manager, you have to manually source this file in your `~/.bashrc` file. + +To find out, reload your shell and run `type _init_completion`. If the command succeeds, you're already set, otherwise add the following to your `~/.bashrc` file: + +```bash +source /usr/share/bash-completion/bash_completion +``` + +Reload your shell and verify that bash-completion is correctly installed by typing `type _init_completion`. + +### Enable kubectl autocompletion + +You now need to ensure that the kubectl completion script gets sourced in all your shell sessions. There are two ways in which you can do this: + +- Source the completion script in your `~/.bashrc` file: + + ```bash + echo 'source <(kubectl completion bash)' >>~/.bashrc + ``` + +- Add the completion script to the `/etc/bash_completion.d` directory: + + ```bash + kubectl completion bash >/etc/bash_completion.d/kubectl + ``` + +If you have an alias for kubectl, you can extend shell completion to work with that alias: + +```bash +echo 'alias k=kubectl' >>~/.bashrc +echo 'complete -F __start_kubectl k' >>~/.bashrc +``` + +{{< note >}} +bash-completion sources all completion scripts in `/etc/bash_completion.d`. +{{< /note >}} + +Both approaches are equivalent. After reloading your shell, kubectl autocompletion should be working. diff --git a/content/en/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md b/content/en/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md new file mode 100644 index 0000000000000..98545406497ba --- /dev/null +++ b/content/en/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md @@ -0,0 +1,89 @@ +--- +title: "bash auto-completion on macOS" +description: "Some optional configuration for bash auto-completion on macOS." +headless: true +--- + +### Introduction + +The kubectl completion script for Bash can be generated with `kubectl completion bash`. Sourcing this script in your shell enables kubectl completion. + +However, the kubectl completion script depends on [**bash-completion**](https://github.com/scop/bash-completion) which you thus have to previously install. + +{{< warning>}} +There are two versions of bash-completion, v1 and v2. V1 is for Bash 3.2 (which is the default on macOS), and v2 is for Bash 4.1+. The kubectl completion script **doesn't work** correctly with bash-completion v1 and Bash 3.2. It requires **bash-completion v2** and **Bash 4.1+**. Thus, to be able to correctly use kubectl completion on macOS, you have to install and use Bash 4.1+ ([*instructions*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)). The following instructions assume that you use Bash 4.1+ (that is, any Bash version of 4.1 or newer). +{{< /warning >}} + +### Upgrade Bash + +The instructions here assume you use Bash 4.1+. You can check your Bash's version by running: + +```bash +echo $BASH_VERSION +``` + +If it is too old, you can install/upgrade it using Homebrew: + +```bash +brew install bash +``` + +Reload your shell and verify that the desired version is being used: + +```bash +echo $BASH_VERSION $SHELL +``` + +Homebrew usually installs it at `/usr/local/bin/bash`. + +### Install bash-completion + +{{< note >}} +As mentioned, these instructions assume you use Bash 4.1+, which means you will install bash-completion v2 (in contrast to Bash 3.2 and bash-completion v1, in which case kubectl completion won't work). +{{< /note >}} + +You can test if you have bash-completion v2 already installed with `type _init_completion`. If not, you can install it with Homebrew: + +```bash +brew install bash-completion@2 +``` + +As stated in the output of this command, add the following to your `~/.bash_profile` file: + +```bash +export BASH_COMPLETION_COMPAT_DIR="/usr/local/etc/bash_completion.d" +[[ -r "/usr/local/etc/profile.d/bash_completion.sh" ]] && . "/usr/local/etc/profile.d/bash_completion.sh" +``` + +Reload your shell and verify that bash-completion v2 is correctly installed with `type _init_completion`. + +### Enable kubectl autocompletion + +You now have to ensure that the kubectl completion script gets sourced in all your shell sessions. There are multiple ways to achieve this: + +- Source the completion script in your `~/.bash_profile` file: + + ```bash + echo 'source <(kubectl completion bash)' >>~/.bash_profile + ``` + +- Add the completion script to the `/usr/local/etc/bash_completion.d` directory: + + ```bash + kubectl completion bash >/usr/local/etc/bash_completion.d/kubectl + ``` + +- If you have an alias for kubectl, you can extend shell completion to work with that alias: + + ```bash + echo 'alias k=kubectl' >>~/.bash_profile + echo 'complete -F __start_kubectl k' >>~/.bash_profile + ``` + +- If you installed kubectl with Homebrew (as explained [here](/docs/tasks/tools/install-kubectl-macos/#install-with-homebrew-on-macos)), then the kubectl completion script should already be in `/usr/local/etc/bash_completion.d/kubectl`. In that case, you don't need to do anything. + + {{< note >}} + The Homebrew installation of bash-completion v2 sources all the files in the `BASH_COMPLETION_COMPAT_DIR` directory, that's why the latter two methods work. + {{< /note >}} + +In any case, after reloading your shell, kubectl completion should be working. diff --git a/content/en/docs/tasks/tools/included/optional-kubectl-configs-zsh.md b/content/en/docs/tasks/tools/included/optional-kubectl-configs-zsh.md new file mode 100644 index 0000000000000..95c8c0aa7134e --- /dev/null +++ b/content/en/docs/tasks/tools/included/optional-kubectl-configs-zsh.md @@ -0,0 +1,29 @@ +--- +title: "zsh auto-completion" +description: "Some optional configuration for zsh auto-completion." +headless: true +--- + +The kubectl completion script for Zsh can be generated with the command `kubectl completion zsh`. Sourcing the completion script in your shell enables kubectl autocompletion. + +To do so in all your shell sessions, add the following to your `~/.zshrc` file: + +```zsh +source <(kubectl completion zsh) +``` + +If you have an alias for kubectl, you can extend shell completion to work with that alias: + +```zsh +echo 'alias k=kubectl' >>~/.zshrc +echo 'complete -F __start_kubectl k' >>~/.zshrc +``` + +After reloading your shell, kubectl autocompletion should be working. + +If you get an error like `complete:13: command not found: compdef`, then add the following to the beginning of your `~/.zshrc` file: + +```zsh +autoload -Uz compinit +compinit +``` \ No newline at end of file diff --git a/content/en/docs/tasks/tools/included/verify-kubectl.md b/content/en/docs/tasks/tools/included/verify-kubectl.md new file mode 100644 index 0000000000000..fbd92e4cb6795 --- /dev/null +++ b/content/en/docs/tasks/tools/included/verify-kubectl.md @@ -0,0 +1,34 @@ +--- +title: "verify kubectl install" +description: "How to verify kubectl." +headless: true +--- + +In order for kubectl to find and access a Kubernetes cluster, it needs a +[kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/), +which is created automatically when you create a cluster using +[kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh) +or successfully deploy a Minikube cluster. +By default, kubectl configuration is located at `~/.kube/config`. + +Check that kubectl is properly configured by getting the cluster state: + +```shell +kubectl cluster-info +``` + +If you see a URL response, kubectl is correctly configured to access your cluster. + +If you see a message similar to the following, kubectl is not configured correctly or is not able to connect to a Kubernetes cluster. + +``` +The connection to the server was refused - did you specify the right host or port? +``` + +For example, if you are intending to run a Kubernetes cluster on your laptop (locally), you will need a tool like Minikube to be installed first and then re-run the commands stated above. + +If kubectl cluster-info returns the url response but you can't access your cluster, to check whether it is configured properly, use: + +```shell +kubectl cluster-info dump +``` \ No newline at end of file diff --git a/content/en/docs/tasks/tools/install-kubectl-linux.md b/content/en/docs/tasks/tools/install-kubectl-linux.md new file mode 100644 index 0000000000000..d64ef99b13f65 --- /dev/null +++ b/content/en/docs/tasks/tools/install-kubectl-linux.md @@ -0,0 +1,194 @@ +--- +reviewers: +- mikedanese +title: Install and Set Up kubectl on Linux +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: Install kubectl on Linux +--- + +## {{% heading "prerequisites" %}} + +You must use a kubectl version that is within one minor version difference of your cluster. For example, a v{{< skew latestVersion >}} client can communicate with v{{< skew prevMinorVersion >}}, v{{< skew latestVersion >}}, and v{{< skew nextMinorVersion >}} control planes. +Using the latest version of kubectl helps avoid unforeseen issues. + +## Install kubectl on Linux + +The following methods exist for installing kubectl on Linux: + +- [Install kubectl binary with curl on Linux](#install-kubectl-binary-with-curl-on-linux) +- [Install using native package management](#install-using-native-package-management) +- [Install using other package management](#install-using-other-package-management) +- [Install on Linux as part of the Google Cloud SDK](#install-on-linux-as-part-of-the-google-cloud-sdk) + +### Install kubectl binary with curl on Linux + +1. Download the latest release with the command: + + ```bash + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + ``` + + {{< note >}} +To download a specific version, replace the `$(curl -L -s https://dl.k8s.io/release/stable.txt)` portion of the command with the specific version. + +For example, to download version {{< param "fullversion" >}} on Linux, type: + + ```bash + curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl + ``` + {{< /note >}} + +1. Validate the binary (optional) + + Download the kubectl checksum file: + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + ``` + + Validate the kubectl binary against the checksum file: + + ```bash + echo "$(}} + Download the same version of the binary and checksum. + {{< /note >}} + +1. Install kubectl + + ```bash + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + ``` + + {{< note >}} + If you do not have root access on the target system, you can still install kubectl to the `~/.local/bin` directory: + + ```bash + mkdir -p ~/.local/bin/kubectl + mv ./kubectl ~/.local/bin/kubectl + # and then add ~/.local/bin/kubectl to $PATH + ``` + + {{< /note >}} + +1. Test to ensure the version you installed is up-to-date: + + ```bash + kubectl version --client + ``` + +### Install using native package management + +{{< tabs name="kubectl_install" >}} +{{% tab name="Debian-based distributions" %}} + +1. Update the `apt` package index and install packages needed to use the Kubernetes `apt` repository: + + ```shell + sudo apt-get update + sudo apt-get install -y apt-transport-https ca-certificates curl + ``` + +2. Download the Google Cloud public signing key: + + ```shell + sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg + ``` + +3. Add the Kubernetes `apt` repository: + + ```shell + echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list + ``` + +4. Update `apt` package index with the new repository and install kubectl: + + ```shell + sudo apt-get update + sudo apt-get install -y kubectl + ``` + +{{% /tab %}} + +{{< tab name="Red Hat-based distributions" codelang="bash" >}} +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOF +yum install -y kubectl +{{< /tab >}} +{{< /tabs >}} + +### Install using other package management + +{{< tabs name="other_kubectl_install" >}} +{{% tab name="Snap" %}} +If you are on Ubuntu or another Linux distribution that support [snap](https://snapcraft.io/docs/core/install) package manager, kubectl is available as a [snap](https://snapcraft.io/) application. + +```shell +snap install kubectl --classic +kubectl version --client +``` + +{{% /tab %}} + +{{% tab name="Homebrew" %}} +If you are on Linux and using [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) package manager, kubectl is available for [installation](https://docs.brew.sh/Homebrew-on-Linux#install). + +```shell +brew install kubectl +kubectl version --client +``` + +{{% /tab %}} + +{{< /tabs >}} + +### Install on Linux as part of the Google Cloud SDK + +{{< include "included/install-kubectl-gcloud.md" >}} + +## Verify kubectl configuration + +{{< include "included/verify-kubectl.md" >}} + +## Optional kubectl configurations + +### Enable shell autocompletion + +kubectl provides autocompletion support for Bash and Zsh, which can save you a lot of typing. + +Below are the procedures to set up autocompletion for Bash and Zsh. + +{{< tabs name="kubectl_autocompletion" >}} +{{< tab name="Bash" include="included/optional-kubectl-configs-bash-linux.md" />}} +{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} +{{< /tabs >}} + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/en/docs/tasks/tools/install-kubectl-macos.md b/content/en/docs/tasks/tools/install-kubectl-macos.md new file mode 100644 index 0000000000000..b748a38c6f526 --- /dev/null +++ b/content/en/docs/tasks/tools/install-kubectl-macos.md @@ -0,0 +1,175 @@ +--- +reviewers: +- mikedanese +title: Install and Set Up kubectl on macOS +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: Install kubectl on macOS +--- + +## {{% heading "prerequisites" %}} + +You must use a kubectl version that is within one minor version difference of your cluster. For example, a v{{< skew latestVersion >}} client can communicate with v{{< skew prevMinorVersion >}}, v{{< skew latestVersion >}}, and v{{< skew nextMinorVersion >}} control planes. +Using the latest version of kubectl helps avoid unforeseen issues. + +## Install kubectl on macOS + +The following methods exist for installing kubectl on macOS: + +- [Install kubectl binary with curl on macOS](#install-kubectl-binary-with-curl-on-macos) +- [Install with Homebrew on macOS](#install-with-homebrew-on-macos) +- [Install with Macports on macOS](#install-with-macports-on-macos) +- [Install on macOS as part of the Google Cloud SDK](#install-on-macos-as-part-of-the-google-cloud-sdk) + +### Install kubectl binary with curl on macOS + +1. Download the latest release: + + {{< tabs name="download_binary_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl" + {{< /tab >}} + {{< /tabs >}} + + {{< note >}} + To download a specific version, replace the `$(curl -L -s https://dl.k8s.io/release/stable.txt)` portion of the command with the specific version. + + For example, to download version {{< param "fullversion" >}} on Intel macOS, type: + + ```bash + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl" + ``` + + And for macOS on Apple Silicon, type: + + ```bash + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/arm64/kubectl" + ``` + + {{< /note >}} + +1. Validate the binary (optional) + + Download the kubectl checksum file: + + {{< tabs name="download_checksum_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl.sha256" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl.sha256" + {{< /tab >}} + {{< /tabs >}} + + Validate the kubectl binary against the checksum file: + + ```bash + echo "$(}} + Download the same version of the binary and checksum. + {{< /note >}} + +1. Make the kubectl binary executable. + + ```bash + chmod +x ./kubectl + ``` + +1. Move the kubectl binary to a file location on your system `PATH`. + + ```bash + sudo mv ./kubectl /usr/local/bin/kubectl + sudo chown root: /usr/local/bin/kubectl + ``` + +1. Test to ensure the version you installed is up-to-date: + + ```bash + kubectl version --client + ``` + +### Install with Homebrew on macOS + +If you are on macOS and using [Homebrew](https://brew.sh/) package manager, you can install kubectl with Homebrew. + +1. Run the installation command: + + ```bash + brew install kubectl + ``` + + or + + ```bash + brew install kubernetes-cli + ``` + +1. Test to ensure the version you installed is up-to-date: + + ```bash + kubectl version --client + ``` + +### Install with Macports on macOS + +If you are on macOS and using [Macports](https://macports.org/) package manager, you can install kubectl with Macports. + +1. Run the installation command: + + ```bash + sudo port selfupdate + sudo port install kubectl + ``` + +1. Test to ensure the version you installed is up-to-date: + + ```bash + kubectl version --client + ``` + + +### Install on macOS as part of the Google Cloud SDK + +{{< include "included/install-kubectl-gcloud.md" >}} + +## Verify kubectl configuration + +{{< include "included/verify-kubectl.md" >}} + +## Optional kubectl configurations + +### Enable shell autocompletion + +kubectl provides autocompletion support for Bash and Zsh, which can save you a lot of typing. + +Below are the procedures to set up autocompletion for Bash and Zsh. + +{{< tabs name="kubectl_autocompletion" >}} +{{< tab name="Bash" include="included/optional-kubectl-configs-bash-mac.md" />}} +{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} +{{< /tabs >}} + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/en/docs/tasks/tools/install-kubectl-windows.md b/content/en/docs/tasks/tools/install-kubectl-windows.md new file mode 100644 index 0000000000000..11f79b6d94709 --- /dev/null +++ b/content/en/docs/tasks/tools/install-kubectl-windows.md @@ -0,0 +1,150 @@ +--- +reviewers: +- mikedanese +title: Install and Set Up kubectl on Windows +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: Install kubectl on Windows +--- + +## {{% heading "prerequisites" %}} + +You must use a kubectl version that is within one minor version difference of your cluster. For example, a v{{< skew latestVersion >}} client can communicate with v{{< skew prevMinorVersion >}}, v{{< skew latestVersion >}}, and v{{< skew nextMinorVersion >}} control planes. +Using the latest version of kubectl helps avoid unforeseen issues. + +## Install kubectl on Windows + +The following methods exist for installing kubectl on Windows: + +- [Install kubectl binary with curl on Windows](#install-kubectl-binary-with-curl-on-windows) +- [Install on Windows using Chocolatey or Scoop](#install-on-windows-using-chocolatey-or-scoop) +- [Install on Windows as part of the Google Cloud SDK](#install-on-windows-as-part-of-the-google-cloud-sdk) + + +### Install kubectl binary with curl on Windows + +1. Download the [latest release {{< param "fullversion" >}}](https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). + + Or if you have `curl` installed, use this command: + + ```powershell + curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe + ``` + + {{< note >}} + To find out the latest stable version (for example, for scripting), take a look at [https://dl.k8s.io/release/stable.txt](https://dl.k8s.io/release/stable.txt). + {{< /note >}} + +1. Validate the binary (optional) + + Download the kubectl checksum file: + + ```powershell + curl -LO https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256 + ``` + + Validate the kubectl binary against the checksum file: + + - Using Command Prompt to manually compare `CertUtil`'s output to the checksum file downloaded: + + ```cmd + CertUtil -hashfile kubectl.exe SHA256 + type kubectl.exe.sha256 + ``` + + - Using PowerShell to automate the verification using the `-eq` operator to get a `True` or `False` result: + + ```powershell + $($(CertUtil -hashfile .\kubectl.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl.exe.sha256) + ``` + +1. Add the binary in to your `PATH`. + +1. Test to ensure the version of `kubectl` is the same as downloaded: + + ```cmd + kubectl version --client + ``` + +{{< note >}} +[Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/#kubernetes) adds its own version of `kubectl` to `PATH`. +If you have installed Docker Desktop before, you may need to place your `PATH` entry before the one added by the Docker Desktop installer or remove the Docker Desktop's `kubectl`. +{{< /note >}} + +### Install on Windows using Chocolatey or Scoop + +1. To install kubectl on Windows you can use either [Chocolatey](https://chocolatey.org) package manager or [Scoop](https://scoop.sh) command-line installer. + + {{< tabs name="kubectl_win_install" >}} + {{% tab name="choco" %}} + ```powershell + choco install kubernetes-cli + ``` + {{% /tab %}} + {{% tab name="scoop" %}} + ```powershell + scoop install kubectl + ``` + {{% /tab %}} + {{< /tabs >}} + + +1. Test to ensure the version you installed is up-to-date: + + ```powershell + kubectl version --client + ``` + +1. Navigate to your home directory: + + ```powershell + # If you're using cmd.exe, run: cd %USERPROFILE% + cd ~ + ``` + +1. Create the `.kube` directory: + + ```powershell + mkdir .kube + ``` + +1. Change to the `.kube` directory you just created: + + ```powershell + cd .kube + ``` + +1. Configure kubectl to use a remote Kubernetes cluster: + + ```powershell + New-Item config -type file + ``` + +{{< note >}} +Edit the config file with a text editor of your choice, such as Notepad. +{{< /note >}} + +### Install on Windows as part of the Google Cloud SDK + +{{< include "included/install-kubectl-gcloud.md" >}} + +## Verify kubectl configuration + +{{< include "included/verify-kubectl.md" >}} + +## Optional kubectl configurations + +### Enable shell autocompletion + +kubectl provides autocompletion support for Bash and Zsh, which can save you a lot of typing. + +Below are the procedures to set up autocompletion for Zsh, if you are running that on Windows. + +{{< include "included/optional-kubectl-configs-zsh.md" >}} + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} \ No newline at end of file diff --git a/content/en/docs/tasks/tools/install-kubectl.md b/content/en/docs/tasks/tools/install-kubectl.md deleted file mode 100644 index 54f1e7e1239f8..0000000000000 --- a/content/en/docs/tasks/tools/install-kubectl.md +++ /dev/null @@ -1,634 +0,0 @@ ---- -reviewers: -- mikedanese -title: Install and Set Up kubectl -content_type: task -weight: 10 -card: - name: tasks - weight: 20 - title: Install kubectl ---- - - -The Kubernetes command-line tool, [kubectl](/docs/reference/kubectl/kubectl/), allows -you to run commands against Kubernetes clusters. -You can use kubectl to deploy applications, inspect and manage cluster resources, -and view logs. For a complete list of kubectl operations, see -[Overview of kubectl](/docs/reference/kubectl/overview/). - - -## {{% heading "prerequisites" %}} - -You must use a kubectl version that is within one minor version difference of your cluster. -For example, a v1.2 client should work with v1.1, v1.2, and v1.3 master. -Using the latest version of kubectl helps avoid unforeseen issues. - - - -## Install kubectl on Linux - -### Install kubectl binary with curl on Linux - -1. Download the latest release with the command: - - ```bash - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - ``` - - {{< note >}} -To download a specific version, replace the `$(curl -L -s https://dl.k8s.io/release/stable.txt)` portion of the command with the specific version. - -For example, to download version {{< param "fullversion" >}} on Linux, type: - - ```bash - curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl - ``` - {{< /note >}} - -1. Validate the binary (optional) - - Download the kubectl checksum file: - - ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" - ``` - - Validate the kubectl binary against the checksum file: - - ```bash - echo "$(}} - Download the same version of the binary and checksum. - {{< /note >}} - -1. Install kubectl - - ```bash - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - ``` - - {{< note >}} - If you do not have root access on the target system, you can still install kubectl to the `~/.local/bin` directory: - - ```bash - mkdir -p ~/.local/bin/kubectl - mv ./kubectl ~/.local/bin/kubectl - # and then add ~/.local/bin/kubectl to $PATH - ``` - - {{< /note >}} - -1. Test to ensure the version you installed is up-to-date: - - ```bash - kubectl version --client - ``` - -### Install using native package management - -{{< tabs name="kubectl_install" >}} -{{< tab name="Ubuntu, Debian or HypriotOS" codelang="bash" >}} -sudo apt-get update && sudo apt-get install -y apt-transport-https gnupg2 curl -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - -echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list -sudo apt-get update -sudo apt-get install -y kubectl -{{< /tab >}} - -{{< tab name="CentOS, RHEL or Fedora" codelang="bash" >}}cat < /etc/yum.repos.d/kubernetes.repo -[kubernetes] -name=Kubernetes -baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -EOF -yum install -y kubectl -{{< /tab >}} -{{< /tabs >}} - -### Install using other package management - -{{< tabs name="other_kubectl_install" >}} -{{% tab name="Snap" %}} -If you are on Ubuntu or another Linux distribution that support [snap](https://snapcraft.io/docs/core/install) package manager, kubectl is available as a [snap](https://snapcraft.io/) application. - -```shell -snap install kubectl --classic - -kubectl version --client -``` - -{{% /tab %}} - -{{% tab name="Homebrew" %}} -If you are on Linux and using [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) package manager, kubectl is available for [installation](https://docs.brew.sh/Homebrew-on-Linux#install). - -```shell -brew install kubectl - -kubectl version --client -``` - -{{% /tab %}} - -{{< /tabs >}} - - -## Install kubectl on macOS - -### Install kubectl binary with curl on macOS - -1. Download the latest release: - - ```bash - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" - ``` - - {{< note >}} - To download a specific version, replace the `$(curl -L -s https://dl.k8s.io/release/stable.txt)` portion of the command with the specific version. - - For example, to download version {{< param "fullversion" >}} on macOS, type: - - ```bash - curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl - ``` - - {{< /note >}} - -1. Validate the binary (optional) - - Download the kubectl checksum file: - - ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl.sha256" - ``` - - Validate the kubectl binary against the checksum file: - - ```bash - echo "$(}} - Download the same version of the binary and checksum. - {{< /note >}} - -1. Make the kubectl binary executable. - - ```bash - chmod +x ./kubectl - ``` - -1. Move the kubectl binary to a file location on your system `PATH`. - - ```bash - sudo mv ./kubectl /usr/local/bin/kubectl && \ - sudo chown root: /usr/local/bin/kubectl - ``` - -1. Test to ensure the version you installed is up-to-date: - - ```bash - kubectl version --client - ``` - -### Install with Homebrew on macOS - -If you are on macOS and using [Homebrew](https://brew.sh/) package manager, you can install kubectl with Homebrew. - -1. Run the installation command: - - ```bash - brew install kubectl - ``` - - or - - ```bash - brew install kubernetes-cli - ``` - -1. Test to ensure the version you installed is up-to-date: - - ```bash - kubectl version --client - ``` - -### Install with Macports on macOS - -If you are on macOS and using [Macports](https://macports.org/) package manager, you can install kubectl with Macports. - -1. Run the installation command: - - ```bash - sudo port selfupdate - sudo port install kubectl - ``` - -1. Test to ensure the version you installed is up-to-date: - - ```bash - kubectl version --client - ``` - -## Install kubectl on Windows - -### Install kubectl binary with curl on Windows - -1. Download the [latest release {{< param "fullversion" >}}](https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). - - Or if you have `curl` installed, use this command: - - ```powershell - curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe - ``` - - {{< note >}} - To find out the latest stable version (for example, for scripting), take a look at [https://dl.k8s.io/release/stable.txt](https://dl.k8s.io/release/stable.txt). - {{< /note >}} - -1. Validate the binary (optional) - - Download the kubectl checksum file: - - ```powershell - curl -LO https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256 - ``` - - Validate the kubectl binary against the checksum file: - - - Using Command Prompt to manually compare `CertUtil`'s output to the checksum file downloaded: - - ```cmd - CertUtil -hashfile kubectl.exe SHA256 - type kubectl.exe.sha256 - ``` - - - Using PowerShell to automate the verification using the `-eq` operator to get a `True` or `False` result: - - ```powershell - $($(CertUtil -hashfile .\kubectl.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl.exe.sha256) - ``` - -1. Add the binary in to your `PATH`. - -1. Test to ensure the version of `kubectl` is the same as downloaded: - - ```cmd - kubectl version --client - ``` - -{{< note >}} -[Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/#kubernetes) adds its own version of `kubectl` to `PATH`. -If you have installed Docker Desktop before, you may need to place your `PATH` entry before the one added by the Docker Desktop installer or remove the Docker Desktop's `kubectl`. -{{< /note >}} - -### Install with PowerShell from PSGallery - -If you are on Windows and using the [PowerShell Gallery](https://www.powershellgallery.com/) package manager, you can install and update kubectl with PowerShell. - -1. Run the installation commands (making sure to specify a `DownloadLocation`): - - ```powershell - Install-Script -Name 'install-kubectl' -Scope CurrentUser -Force - install-kubectl.ps1 [-DownloadLocation ] - ``` - - {{< note >}} - If you do not specify a `DownloadLocation`, `kubectl` will be installed in the user's `temp` Directory. - {{< /note >}} - - The installer creates `$HOME/.kube` and instructs it to create a config file. - -1. Test to ensure the version you installed is up-to-date: - - ```powershell - kubectl version --client - ``` - -{{< note >}} -Updating the installation is performed by rerunning the two commands listed in step 1. -{{< /note >}} - -### Install on Windows using Chocolatey or Scoop - -1. To install kubectl on Windows you can use either [Chocolatey](https://chocolatey.org) package manager or [Scoop](https://scoop.sh) command-line installer. - - {{< tabs name="kubectl_win_install" >}} - {{% tab name="choco" %}} - ```powershell - choco install kubernetes-cli - ``` - {{% /tab %}} - {{% tab name="scoop" %}} - ```powershell - scoop install kubectl - ``` - {{% /tab %}} - {{< /tabs >}} - - -1. Test to ensure the version you installed is up-to-date: - - ```powershell - kubectl version --client - ``` - -1. Navigate to your home directory: - - ```powershell - # If you're using cmd.exe, run: cd %USERPROFILE% - cd ~ - ``` - -1. Create the `.kube` directory: - - ```powershell - mkdir .kube - ``` - -1. Change to the `.kube` directory you just created: - - ```powershell - cd .kube - ``` - -1. Configure kubectl to use a remote Kubernetes cluster: - - ```powershell - New-Item config -type file - ``` - -{{< note >}} -Edit the config file with a text editor of your choice, such as Notepad. -{{< /note >}} - -## Download as part of the Google Cloud SDK - -You can install kubectl as part of the Google Cloud SDK. - -1. Install the [Google Cloud SDK](https://cloud.google.com/sdk/). - -1. Run the `kubectl` installation command: - - ```shell - gcloud components install kubectl - ``` - -1. Test to ensure the version you installed is up-to-date: - - ```shell - kubectl version --client - ``` - -## Verifying kubectl configuration - -In order for kubectl to find and access a Kubernetes cluster, it needs a -[kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/), -which is created automatically when you create a cluster using -[kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh) -or successfully deploy a Minikube cluster. -By default, kubectl configuration is located at `~/.kube/config`. - -Check that kubectl is properly configured by getting the cluster state: - -```shell -kubectl cluster-info -``` - -If you see a URL response, kubectl is correctly configured to access your cluster. - -If you see a message similar to the following, kubectl is not configured correctly or is not able to connect to a Kubernetes cluster. - -``` -The connection to the server was refused - did you specify the right host or port? -``` - -For example, if you are intending to run a Kubernetes cluster on your laptop (locally), you will need a tool like Minikube to be installed first and then re-run the commands stated above. - -If kubectl cluster-info returns the url response but you can't access your cluster, to check whether it is configured properly, use: - -```shell -kubectl cluster-info dump -``` - -## Optional kubectl configurations - -### Enabling shell autocompletion - -kubectl provides autocompletion support for Bash and Zsh, which can save you a lot of typing. - -Below are the procedures to set up autocompletion for Bash (including the difference between Linux and macOS) and Zsh. - -{{< tabs name="kubectl_autocompletion" >}} - -{{% tab name="Bash on Linux" %}} - -### Introduction - -The kubectl completion script for Bash can be generated with the command `kubectl completion bash`. Sourcing the completion script in your shell enables kubectl autocompletion. - -However, the completion script depends on [**bash-completion**](https://github.com/scop/bash-completion), which means that you have to install this software first (you can test if you have bash-completion already installed by running `type _init_completion`). - -### Install bash-completion - -bash-completion is provided by many package managers (see [here](https://github.com/scop/bash-completion#installation)). You can install it with `apt-get install bash-completion` or `yum install bash-completion`, etc. - -The above commands create `/usr/share/bash-completion/bash_completion`, which is the main script of bash-completion. Depending on your package manager, you have to manually source this file in your `~/.bashrc` file. - -To find out, reload your shell and run `type _init_completion`. If the command succeeds, you're already set, otherwise add the following to your `~/.bashrc` file: - -```bash -source /usr/share/bash-completion/bash_completion -``` - -Reload your shell and verify that bash-completion is correctly installed by typing `type _init_completion`. - -### Enable kubectl autocompletion - -You now need to ensure that the kubectl completion script gets sourced in all your shell sessions. There are two ways in which you can do this: - -- Source the completion script in your `~/.bashrc` file: - - ```bash - echo 'source <(kubectl completion bash)' >>~/.bashrc - ``` - -- Add the completion script to the `/etc/bash_completion.d` directory: - - ```bash - kubectl completion bash >/etc/bash_completion.d/kubectl - ``` - -If you have an alias for kubectl, you can extend shell completion to work with that alias: - -```bash -echo 'alias k=kubectl' >>~/.bashrc -echo 'complete -F __start_kubectl k' >>~/.bashrc -``` - -{{< note >}} -bash-completion sources all completion scripts in `/etc/bash_completion.d`. -{{< /note >}} - -Both approaches are equivalent. After reloading your shell, kubectl autocompletion should be working. - -{{% /tab %}} - - -{{% tab name="Bash on macOS" %}} - - -### Introduction - -The kubectl completion script for Bash can be generated with `kubectl completion bash`. Sourcing this script in your shell enables kubectl completion. - -However, the kubectl completion script depends on [**bash-completion**](https://github.com/scop/bash-completion) which you thus have to previously install. - -{{< warning>}} -There are two versions of bash-completion, v1 and v2. V1 is for Bash 3.2 (which is the default on macOS), and v2 is for Bash 4.1+. The kubectl completion script **doesn't work** correctly with bash-completion v1 and Bash 3.2. It requires **bash-completion v2** and **Bash 4.1+**. Thus, to be able to correctly use kubectl completion on macOS, you have to install and use Bash 4.1+ ([*instructions*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)). The following instructions assume that you use Bash 4.1+ (that is, any Bash version of 4.1 or newer). -{{< /warning >}} - -### Upgrade Bash - -The instructions here assume you use Bash 4.1+. You can check your Bash's version by running: - -```bash -echo $BASH_VERSION -``` - -If it is too old, you can install/upgrade it using Homebrew: - -```bash -brew install bash -``` - -Reload your shell and verify that the desired version is being used: - -```bash -echo $BASH_VERSION $SHELL -``` - -Homebrew usually installs it at `/usr/local/bin/bash`. - -### Install bash-completion - -{{< note >}} -As mentioned, these instructions assume you use Bash 4.1+, which means you will install bash-completion v2 (in contrast to Bash 3.2 and bash-completion v1, in which case kubectl completion won't work). -{{< /note >}} - -You can test if you have bash-completion v2 already installed with `type _init_completion`. If not, you can install it with Homebrew: - -```bash -brew install bash-completion@2 -``` - -As stated in the output of this command, add the following to your `~/.bash_profile` file: - -```bash -export BASH_COMPLETION_COMPAT_DIR="/usr/local/etc/bash_completion.d" -[[ -r "/usr/local/etc/profile.d/bash_completion.sh" ]] && . "/usr/local/etc/profile.d/bash_completion.sh" -``` - -Reload your shell and verify that bash-completion v2 is correctly installed with `type _init_completion`. - -### Enable kubectl autocompletion - -You now have to ensure that the kubectl completion script gets sourced in all your shell sessions. There are multiple ways to achieve this: - -- Source the completion script in your `~/.bash_profile` file: - - ```bash - echo 'source <(kubectl completion bash)' >>~/.bash_profile - ``` - -- Add the completion script to the `/usr/local/etc/bash_completion.d` directory: - - ```bash - kubectl completion bash >/usr/local/etc/bash_completion.d/kubectl - ``` - -- If you have an alias for kubectl, you can extend shell completion to work with that alias: - - ```bash - echo 'alias k=kubectl' >>~/.bash_profile - echo 'complete -F __start_kubectl k' >>~/.bash_profile - ``` - -- If you installed kubectl with Homebrew (as explained [above](#install-with-homebrew-on-macos)), then the kubectl completion script should already be in `/usr/local/etc/bash_completion.d/kubectl`. In that case, you don't need to do anything. - - {{< note >}} - The Homebrew installation of bash-completion v2 sources all the files in the `BASH_COMPLETION_COMPAT_DIR` directory, that's why the latter two methods work. - {{< /note >}} - -In any case, after reloading your shell, kubectl completion should be working. -{{% /tab %}} - -{{% tab name="Zsh" %}} - -The kubectl completion script for Zsh can be generated with the command `kubectl completion zsh`. Sourcing the completion script in your shell enables kubectl autocompletion. - -To do so in all your shell sessions, add the following to your `~/.zshrc` file: - -```zsh -source <(kubectl completion zsh) -``` - -If you have an alias for kubectl, you can extend shell completion to work with that alias: - -```zsh -echo 'alias k=kubectl' >>~/.zshrc -echo 'complete -F __start_kubectl k' >>~/.zshrc -``` - -After reloading your shell, kubectl autocompletion should be working. - -If you get an error like `complete:13: command not found: compdef`, then add the following to the beginning of your `~/.zshrc` file: - -```zsh -autoload -Uz compinit -compinit -``` -{{% /tab %}} -{{< /tabs >}} - -## {{% heading "whatsnext" %}} - -* [Install Minikube](https://minikube.sigs.k8s.io/docs/start/) -* See the [getting started guides](/docs/setup/) for more about creating clusters. -* [Learn how to launch and expose your application.](/docs/tasks/access-application-cluster/service-access-application-cluster/) -* If you need access to a cluster you didn't create, see the - [Sharing Cluster Access document](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). -* Read the [kubectl reference docs](/docs/reference/kubectl/kubectl/) - diff --git a/content/en/docs/test.md b/content/en/docs/test.md index aadfc9a9e3a1f..ae5bb447f1534 100644 --- a/content/en/docs/test.md +++ b/content/en/docs/test.md @@ -113,7 +113,7 @@ mind: two consecutive lists. **The HTML comment needs to be at the left margin.** 2. Numbered lists can have paragraphs or block elements within them. - Just indent the content to be the same as the first line of the bullet + Indent the content to be the same as the first line of the bullet point. **This paragraph and the code block line up with the `N` in `Numbered` above.** diff --git a/content/en/docs/tutorials/_index.md b/content/en/docs/tutorials/_index.md index b4f0709a7698b..acd2a4363f484 100644 --- a/content/en/docs/tutorials/_index.md +++ b/content/en/docs/tutorials/_index.md @@ -27,13 +27,15 @@ Before walking through each tutorial, you may want to bookmark the ## Configuration +* [Example: Configuring a Java Microservice](/docs/tutorials/configuration/configure-java-microservice/) + * [Configuring Redis Using a ConfigMap](/docs/tutorials/configuration/configure-redis-using-configmap/) ## Stateless Applications * [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/) -* [Example: Deploying PHP Guestbook application with Redis](/docs/tutorials/stateless-application/guestbook/) +* [Example: Deploying PHP Guestbook application with MongoDB](/docs/tutorials/stateless-application/guestbook/) ## Stateful Applications diff --git a/content/en/docs/tutorials/clusters/apparmor.md b/content/en/docs/tutorials/clusters/apparmor.md index 8ca9f30bad47e..32f25ba48314e 100644 --- a/content/en/docs/tutorials/clusters/apparmor.md +++ b/content/en/docs/tutorials/clusters/apparmor.md @@ -168,8 +168,7 @@ k8s-apparmor-example-deny-write (enforce) *This example assumes you have already set up a cluster with AppArmor support.* -First, we need to load the profile we want to use onto our nodes. The profile we'll use simply -denies all file writes: +First, we need to load the profile we want to use onto our nodes. This profile denies all file writes: ```shell #include @@ -185,7 +184,7 @@ profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { ``` Since we don't know where the Pod will be scheduled, we'll need to load the profile on all our -nodes. For this example we'll just use SSH to install the profiles, but other approaches are +nodes. For this example we'll use SSH to install the profiles, but other approaches are discussed in [Setting up nodes with profiles](#setting-up-nodes-with-profiles). ```shell @@ -323,7 +322,7 @@ Events: 23s 23s 1 {kubelet e2e-test-stclair-node-pool-t1f5} Warning AppArmor Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded ``` -Note the pod status is Failed, with a helpful error message: `Pod Cannot enforce AppArmor: profile +Note the pod status is Pending, with a helpful error message: `Pod Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded`. An event was also recorded with the same message. ## Administration diff --git a/content/en/docs/tutorials/clusters/seccomp.md b/content/en/docs/tutorials/clusters/seccomp.md index adb3d9c500434..971618cf554d8 100644 --- a/content/en/docs/tutorials/clusters/seccomp.md +++ b/content/en/docs/tutorials/clusters/seccomp.md @@ -37,7 +37,7 @@ profiles that give only the necessary privileges to your container processes. In order to complete all steps in this tutorial, you must install [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) and -[kubectl](/docs/tasks/tools/install-kubectl/). This tutorial will show examples +[kubectl](/docs/tasks/tools/). This tutorial will show examples with both alpha (pre-v1.19) and generally available seccomp functionality, so make sure that your cluster is [configured correctly](https://kind.sigs.k8s.io/docs/user/quick-start/#setting-kubernetes-version) @@ -67,8 +67,8 @@ into the cluster. For simplicity, [kind](https://kind.sigs.k8s.io/) can be used to create a single node cluster with the seccomp profiles loaded. Kind runs Kubernetes in Docker, -so each node of the cluster is actually just a container. This allows for files -to be mounted in the filesystem of each container just as one might load files +so each node of the cluster is a container. This allows for files +to be mounted in the filesystem of each container similar to loading files onto a node. {{< codenew file="pods/security/seccomp/kind.yaml" >}} diff --git a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md index 7555a582013a4..b29b352acabe0 100644 --- a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -15,10 +15,8 @@ This page provides a real world example of how to configure Redis using a Config ## {{% heading "objectives" %}} -* Create a `kustomization.yaml` file containing: - * a ConfigMap generator - * a Pod resource config using the ConfigMap -* Apply the directory by running `kubectl apply -k ./` +* Create a ConfigMap with Redis configuration values +* Create a Redis Pod that mounts and uses the created ConfigMap * Verify that the configuration was correctly applied. @@ -38,82 +36,218 @@ This page provides a real world example of how to configure Redis using a Config ## Real World Example: Configuring Redis using a ConfigMap -You can follow the steps below to configure a Redis cache using data stored in a ConfigMap. +Follow the steps below to configure a Redis cache using data stored in a ConfigMap. -First create a `kustomization.yaml` containing a ConfigMap from the `redis-config` file: - -{{< codenew file="pods/config/redis-config" >}} +First create a ConfigMap with an empty configuration block: ```shell -curl -OL https://k8s.io/examples/pods/config/redis-config - -cat <./kustomization.yaml -configMapGenerator: -- name: example-redis-config - files: - - redis-config +cat <./example-redis-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-redis-config +data: + redis-config: "" EOF ``` -Add the pod resource config to the `kustomization.yaml`: +Apply the ConfigMap created above, along with a Redis pod manifest: + +```shell +kubectl apply -f example-redis-config.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +``` + +Examine the contents of the Redis pod manifest and note the following: + +* A volume named `config` is created by `spec.volumes[1]` +* The `key` and `path` under `spec.volumes[1].items[0]` exposes the `redis-config` key from the + `example-redis-config` ConfigMap as a file named `redis.conf` on the `config` volume. +* The `config` volume is then mounted at `/redis-master` by `spec.containers[0].volumeMounts[1]`. + +This has the net effect of exposing the data in `data.redis-config` from the `example-redis-config` +ConfigMap above as `/redis-master/redis.conf` inside the Pod. {{< codenew file="pods/config/redis-pod.yaml" >}} +Examine the created objects: + ```shell -curl -OL https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl get pod/redis configmap/example-redis-config +``` -cat <>./kustomization.yaml -resources: -- redis-pod.yaml -EOF +You should see the following output: + +```shell +NAME READY STATUS RESTARTS AGE +pod/redis 1/1 Running 0 8s + +NAME DATA AGE +configmap/example-redis-config 1 14s ``` -Apply the kustomization directory to create both the ConfigMap and Pod objects: +Recall that we left `redis-config` key in the `example-redis-config` ConfigMap blank: ```shell -kubectl apply -k . +kubectl describe configmap/example-redis-config ``` -Examine the created objects by +You should see an empty `redis-config` key: + ```shell -> kubectl get -k . -NAME DATA AGE -configmap/example-redis-config-dgh9dg555m 1 52s +Name: example-redis-config +Namespace: default +Labels: +Annotations: + +Data +==== +redis-config: +``` -NAME READY STATUS RESTARTS AGE -pod/redis 1/1 Running 0 52s +Use `kubectl exec` to enter the pod and run the `redis-cli` tool to check the current configuration: + +```shell +kubectl exec -it redis -- redis-cli +``` + +Check `maxmemory`: + +```shell +127.0.0.1:6379> CONFIG GET maxmemory +``` + +It should show the default value of 0: + +```shell +1) "maxmemory" +2) "0" +``` + +Similarly, check `maxmemory-policy`: + +```shell +127.0.0.1:6379> CONFIG GET maxmemory-policy +``` + +Which should also yield its default value of `noeviction`: + +```shell +1) "maxmemory-policy" +2) "noeviction" +``` + +Now let's add some configuration values to the `example-redis-config` ConfigMap: + +{{< codenew file="pods/config/example-redis-config.yaml" >}} + +Apply the updated ConfigMap: + +```shell +kubectl apply -f example-redis-config.yaml +``` + +Confirm that the ConfigMap was updated: + +```shell +kubectl describe configmap/example-redis-config ``` -In the example, the config volume is mounted at `/redis-master`. -It uses `path` to add the `redis-config` key to a file named `redis.conf`. -The file path for the redis config, therefore, is `/redis-master/redis.conf`. -This is where the image will look for the config file for the redis master. +You should see the configuration values we just added: -Use `kubectl exec` to enter the pod and run the `redis-cli` tool to verify that -the configuration was correctly applied: +```shell +Name: example-redis-config +Namespace: default +Labels: +Annotations: + +Data +==== +redis-config: +---- +maxmemory 2mb +maxmemory-policy allkeys-lru +``` + +Check the Redis Pod again using `redis-cli` via `kubectl exec` to see if the configuration was applied: ```shell kubectl exec -it redis -- redis-cli +``` + +Check `maxmemory`: + +```shell 127.0.0.1:6379> CONFIG GET maxmemory +``` + +It remains at the default value of 0: + +```shell 1) "maxmemory" -2) "2097152" +2) "0" +``` + +Similarly, `maxmemory-policy` remains at the `noeviction` default setting: + +```shell 127.0.0.1:6379> CONFIG GET maxmemory-policy +``` + +Returns: + +```shell 1) "maxmemory-policy" -2) "allkeys-lru" +2) "noeviction" ``` -Delete the created pod: +The configuration values have not changed because the Pod needs to be restarted to grab updated +values from associated ConfigMaps. Let's delete and recreate the Pod: + ```shell kubectl delete pod redis +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml ``` +Now re-check the configuration values one last time: +```shell +kubectl exec -it redis -- redis-cli +``` -## {{% heading "whatsnext" %}} +Check `maxmemory`: +```shell +127.0.0.1:6379> CONFIG GET maxmemory +``` -* Learn more about [ConfigMaps](/docs/tasks/configure-pod-container/configure-pod-configmap/). +It should now return the updated value of 2097152: +```shell +1) "maxmemory" +2) "2097152" +``` + +Similarly, `maxmemory-policy` has also been updated: +```shell +127.0.0.1:6379> CONFIG GET maxmemory-policy +``` +It now reflects the desired value of `allkeys-lru`: +```shell +1) "maxmemory-policy" +2) "allkeys-lru" +``` + +Clean up your work by deleting the created resources: + +```shell +kubectl delete pod/redis configmap/example-redis-config +``` + +## {{% heading "whatsnext" %}} + + +* Learn more about [ConfigMaps](/docs/tasks/configure-pod-container/configure-pod-configmap/). diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index ba115b601ceca..d8ad753958ac3 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -46,7 +46,7 @@ This tutorial provides a container image that uses NGINX to echo back all the re {{< kat-button >}} {{< note >}} -If you installed minikube locally, run `minikube start`. +If you installed minikube locally, run `minikube start`. Before you run `minikube dashboard`, you should open a new terminal, start `minikube dashboard` there, and then switch back to the main terminal. {{< /note >}} 2. Open the Kubernetes dashboard in a browser: @@ -59,6 +59,22 @@ If you installed minikube locally, run `minikube start`. 4. Katacoda environment only: Type `30000`, and then click **Display Port**. +{{< note >}} +The `dashboard` command enables the dashboard add-on and opens the proxy in the default web browser. You can create Kubernetes resources on the dashboard such as Deployment and Service. + +If you are running in an environment as root, see [Open Dashboard with URL](#open-dashboard-with-url). + +To stop the proxy, run `Ctrl+C` to exit the process. The dashboard remains running. +{{< /note >}} + +## Open Dashboard with URL + +If you don't want to open a web browser, run the dashboard command with the url flag to emit a URL: + +```shell +minikube dashboard --url +``` + ## Create a Deployment A Kubernetes [*Pod*](/docs/concepts/workloads/pods/) is a group of one or more Containers, @@ -136,7 +152,7 @@ Kubernetes [*Service*](/docs/concepts/services-networking/service/). The application code inside the image `k8s.gcr.io/echoserver` only listens on TCP port 8080. If you used `kubectl expose` to expose a different port, clients could not connect to that other port. -2. View the Service you just created: +2. View the Service you created: ```shell kubectl get services @@ -211,7 +227,7 @@ The minikube tool includes a set of built-in {{< glossary_tooltip text="addons" metrics-server was successfully enabled ``` -3. View the Pod and Service you just created: +3. View the Pod and Service you created: ```shell kubectl get pod,svc -n kube-system diff --git a/content/en/docs/tutorials/kubernetes-basics/_index.html b/content/en/docs/tutorials/kubernetes-basics/_index.html index fe4b7cb9de4af..afec8f26a8fa6 100644 --- a/content/en/docs/tutorials/kubernetes-basics/_index.html +++ b/content/en/docs/tutorials/kubernetes-basics/_index.html @@ -41,7 +41,7 @@

Kubernetes Basics

What can Kubernetes do for you?

-

With modern web services, users expect applications to be available 24/7, and developers expect to deploy new versions of those applications several times a day. Containerization helps package software to serve these goals, enabling applications to be released and updated in an easy and fast way without downtime. Kubernetes helps you make sure those containerized applications run where and when you want, and helps them find the resources and tools they need to work. Kubernetes is a production-ready, open source platform designed with Google's accumulated experience in container orchestration, combined with best-of-breed ideas from the community.

+

With modern web services, users expect applications to be available 24/7, and developers expect to deploy new versions of those applications several times a day. Containerization helps package software to serve these goals, enabling applications to be released and updated without downtime. Kubernetes helps you make sure those containerized applications run where and when you want, and helps them find the resources and tools they need to work. Kubernetes is a production-ready, open source platform designed with Google's accumulated experience in container orchestration, combined with best-of-breed ideas from the community.

diff --git a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index 5ac682d7af020..47a2629feb835 100644 --- a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -33,7 +33,7 @@

Kubernetes Clusters

A Kubernetes cluster consists of two types of resources:

    -
  • The Master coordinates the cluster
  • +
  • The Control Plane coordinates the cluster
  • Nodes are the workers that run applications

@@ -71,22 +71,22 @@

Cluster Diagram

-

The Master is responsible for managing the cluster. The master coordinates all activities in your cluster, such as scheduling applications, maintaining applications' desired state, scaling applications, and rolling out new updates.

-

A node is a VM or a physical computer that serves as a worker machine in a Kubernetes cluster. Each node has a Kubelet, which is an agent for managing the node and communicating with the Kubernetes master. The node should also have tools for handling container operations, such as containerd or Docker. A Kubernetes cluster that handles production traffic should have a minimum of three nodes.

+

The Control Plane is responsible for managing the cluster. The Control Plane coordinates all activities in your cluster, such as scheduling applications, maintaining applications' desired state, scaling applications, and rolling out new updates.

+

A node is a VM or a physical computer that serves as a worker machine in a Kubernetes cluster. Each node has a Kubelet, which is an agent for managing the node and communicating with the Kubernetes control plane. The node should also have tools for handling container operations, such as containerd or Docker. A Kubernetes cluster that handles production traffic should have a minimum of three nodes.

-

Masters manage the cluster and the nodes that are used to host the running applications.

+

Control Planes manage the cluster and the nodes that are used to host the running applications.

-

When you deploy applications on Kubernetes, you tell the master to start the application containers. The master schedules the containers to run on the cluster's nodes. The nodes communicate with the master using the Kubernetes API, which the master exposes. End users can also use the Kubernetes API directly to interact with the cluster.

+

When you deploy applications on Kubernetes, you tell the control plane to start the application containers. The control plane schedules the containers to run on the cluster's nodes. The nodes communicate with the control plane using the Kubernetes API, which the control plane exposes. End users can also use the Kubernetes API directly to interact with the cluster.

-

A Kubernetes cluster can be deployed on either physical or virtual machines. To get started with Kubernetes development, you can use Minikube. Minikube is a lightweight Kubernetes implementation that creates a VM on your local machine and deploys a simple cluster containing only one node. Minikube is available for Linux, macOS, and Windows systems. The Minikube CLI provides basic bootstrapping operations for working with your cluster, including start, stop, status, and delete. For this tutorial, however, you'll use a provided online terminal with Minikube pre-installed.

+

A Kubernetes cluster can be deployed on either physical or virtual machines. To get started with Kubernetes development, you can use Minikube. Minikube is a lightweight Kubernetes implementation that creates a VM on your local machine and deploys a simple cluster containing only one node. Minikube is available for Linux, macOS, and Windows systems. The Minikube CLI provides basic bootstrapping operations for working with your cluster, including start, stop, status, and delete. For this tutorial, however, you'll use a provided online terminal with Minikube pre-installed.

Now that you know what Kubernetes is, let's go to the online tutorial and start our first cluster!

diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 2ee67382fd896..15b6d00a6c2da 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -31,7 +31,7 @@

Kubernetes Deployments

Once you have a running Kubernetes cluster, you can deploy your containerized applications on top of it. To do so, you create a Kubernetes Deployment configuration. The Deployment instructs Kubernetes how to create and update instances of your application. Once you've created a Deployment, the Kubernetes - master schedules the application instances included in that Deployment to run on individual Nodes in the + control plane schedules the application instances included in that Deployment to run on individual Nodes in the cluster.

diff --git a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html index c610b6e9f4db2..d7687bc7b1318 100644 --- a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -37,7 +37,7 @@

Overview of Kubernetes Services

  • ClusterIP (default) - Exposes the Service on an internal IP in the cluster. This type makes the Service only reachable from within the cluster.
  • NodePort - Exposes the Service on the same port of each selected Node in the cluster using NAT. Makes a Service accessible from outside the cluster using <NodeIP>:<NodePort>. Superset of ClusterIP.
  • LoadBalancer - Creates an external load balancer in the current cloud (if supported) and assigns a fixed, external IP to the Service. Superset of NodePort.
  • -
  • ExternalName - Exposes the Service using an arbitrary name (specified by externalName in the spec) by returning a CNAME record with the name. No proxy is used. This type requires v1.7 or higher of kube-dns.
  • +
  • ExternalName - Maps the Service to the contents of the externalName field (e.g. `foo.bar.example.com`), by returning a CNAME record with its value. No proxying of any kind is set up. This type requires v1.7 or higher of kube-dns, or CoreDNS version 0.0.8 or higher.
  • More information about the different types of Services can be found in the Using Source IP tutorial. Also see Connecting Applications with Services.

    Additionally, note that there are some use cases with Services that involve not defining selector in the spec. A Service created without selector will also not create the corresponding Endpoints object. This allows users to manually map a Service to specific endpoints. Another possibility why there may be no selector is you are strictly using type: ExternalName.

    @@ -63,13 +63,7 @@

    Summary

    Services and Labels

    - -
    -
    -

    -
    -
    - +

    A Service routes traffic across a set of Pods. Services are the abstraction that allow pods to die and replicate in Kubernetes without impacting your application. Discovery and routing among dependent Pods (such as the frontend and backend components in an application) is handled by Kubernetes Services.

    diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_01_cluster.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_01_cluster.svg index e1f92dace01b7..b18337746749a 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_01_cluster.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_01_cluster.svg @@ -1,6 +1,32 @@ - - - diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_02_first_app.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_02_first_app.svg index e0ae8fa504df1..cf8c922916ff7 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_02_first_app.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_02_first_app.svg @@ -1,5 +1,32 @@ - -
    https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/ +Políticas de seguridad de Pod | https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +Calidad de servicio (y gestión de recursos del clúster) | https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/ +Políticas de Red | https://kubernetes.io/docs/concepts/services-networking/network-policies/ +TLS para Kubernetes Ingress | https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + +## Contenedor + +La seguridad de los contenedores está fuera del alcance de la guía. Aquí hay recomendaciones generales y +enlaces para explorar este tema: + +Área de Interés para Contenedores | Recomendación | +------------------------------ | -------------- | +Escáneres de vulnerabilidad de contenedores y seguridad de dependencia del sistema operativo | Como parte del paso de la creación de la imagen, se debe utilizar un escáner de contenedores para detectar vulnerabilidades. +Firma de Imágenes y Aplicación | Firma de imágenes de contenedores para mantener un sistema confiable para el contenido de sus contenedores. +Prohibir Usuarios Privilegiados | Al crear contenedores, consulte la documentación para crear usuarios dentro de los contenedores con el menor privilegio necesario para cumplir con el propósito del contenedor en el sistema operativo. +Utilice el contenedor de tiempo de ejecución con el aislamiento más fuerte | Seleccione [clases del contenedor runtime](/docs/concepts/containers/runtime-class/) con el proveedor de aislamiento más fuerte. + +## Código + +El código de la aplicación es una de las principales superficies de ataque sobre las que tenemos más control. +Aunque la protección del código de la aplicación está fuera del tema de seguridad de Kubernetes, aquí algunas +recomendaciones para proteger el código de su aplicación: + +### Seguridad del código + +{{< table caption="Code security" >}} + +Áreas de Atención para el Código | Recomendación | +-------------------------| -------------- | +Acceso solo a través de TLS | Si su código necesita comunicarse a través de TCP, ejecute un handshake TLS con el cliente anticipadamente. Con la excepción de algunos casos, encripte todo lo que está en tránsito. Yendo un paso más allá, es una buena idea cifrar el tráfico de red entre los servicios. Esto se puede hacer a través del proceso de autenticación mutua o [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication), que realiza una verificación bilateral de la comunicación a través de los certificados en los servicios. | +Limitación de rangos de puertos de comunicación | Esta recomendación puede ser un poco evidente, pero siempre que sea posible, solo debe exponer los puertos de su servicio que son absolutamente esenciales para la comunicación o la recopilación de métricas. | +Seguridad en dependencia de terceros | Es una buena práctica comprobar periódicamente las bibliotecas de terceros de su aplicación en busca de vulnerabilidades de seguridad. Cada lenguaje de programación tiene una herramienta para realizar esta verificación de forma automática. | +Análisis de código estático | La mayoría de los lenguajes proporcionan una forma de analizar el código en busca de prácticas de codificación potencialmente inseguras. Siempre que sea posible, debe automatizar los escaneos utilizando herramientas que puedan escanear las bases del código en busca de errores de seguridad comunes. Algunas de las herramientas se pueden encontrar en [OWASP Source Code Analysis Tools](https://owasp.org/www-community/Source_Code_Analysis_Tools). | +Ataques de sondeo dinámico | Existen algunas herramientas automatizadas que puede ejecutar en su servicio para explorar algunos de los ataques más conocidos. Esto incluye la inyección de SQL, CSRF y XSS. Una de las herramientas de análisis dinámico más populares es la [OWASP Zed Attack proxy](https://owasp.org/www-project-zap/). | + +{{< /table >}} + +## {{% heading "whatsnext" %}} + +Obtenga más información sobre los temas de seguridad de Kubernetes: + +* [Estándares de seguridad del pod](/docs/concepts/security/pod-security-standards/) +* [Políticas de red para pods](/docs/concepts/services-networking/network-policies/) +* [Control de acceso a la API de Kubernetes](/docs/concepts/security/controlling-access) +* [Protegiendo su clúster](/docs/tasks/administer-cluster/securing-a-cluster/) +* [Criptografía de datos en tránsito](/docs/tasks/tls/managing-tls-in-a-cluster/) +* [Criptografía de datos en reposo](/docs/tasks/administer-cluster/encrypt-data/) +* [Secretos en Kubernetes](/docs/concepts/configuration/secret/) +* [Runtime class](/docs/concepts/containers/runtime-class) \ No newline at end of file diff --git a/content/es/docs/concepts/workloads/pods/ephemeral-containers.md b/content/es/docs/concepts/workloads/pods/ephemeral-containers.md index 1b939c969e230..a880e1cbc86ee 100644 --- a/content/es/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/es/docs/concepts/workloads/pods/ephemeral-containers.md @@ -100,7 +100,7 @@ efímero a añadir como una lista de `EphemeralContainers`: "apiVersion": "v1", "kind": "EphemeralContainers", "metadata": { - "name": "example-pod" + "name": "example-pod" }, "ephemeralContainers": [{ "command": [ diff --git a/content/es/docs/contribute/_index.md b/content/es/docs/contribute/_index.md index 46157bab39a9c..24851d8db0e28 100644 --- a/content/es/docs/contribute/_index.md +++ b/content/es/docs/contribute/_index.md @@ -83,3 +83,7 @@ para proporcionar un punto de partida. - Proponer mejoras al sitio web de Kubernetes y otras herramientas +## {{% heading "whatsnext" %}} + +También puedes leer la +[guía de localización para español](/es/docs/contribute/localization_es/). diff --git a/content/es/docs/contribute/localization_es.md b/content/es/docs/contribute/localization_es.md new file mode 100644 index 0000000000000..22602d3ed939a --- /dev/null +++ b/content/es/docs/contribute/localization_es.md @@ -0,0 +1,43 @@ +--- +title: Contribuir a la documentación de Kubernetes en español +content_type: concept +--- + + + +¡Bienvenido(a)! + +En esta página encontrarás información sobre convenciones utilizadas en la documentación en castellano y un glosario de términos con sus traducciones. + + + +## Glosario de terminología {#terminologia} + +| English | Español | Género | Commentarios | +| ----------------- | ---------------------- | ------------ | ------------------------ | +| availability zone | zona de disponibilidad | femenino | | +| bearer token | bearer token | masculino | | +| built-in | incorporados | masculino | | +| conditions | condiciones | masculino | para node conditions | +| container | contenedor | masculino | | +| controller | controlador | masculino | | +| deploy | desplegar | | | +| Deployment | Deployment | masculino | objeto Kubernetes | +| Endpoints | Endpoints | masculino | objeto Kubernetes | +| file | archivo | masculino | | +| frontend | frontend | masculino | | +| healthy | operativo | | | +| high availability | alta disponibilidad | | | +| hook | hook | masculino | | +| instance | instancia | femenino | | +| Lease | Lease | masculino | objeto Kubernetes | +| Pod | Pod | masculino | objeto Kubernetes | +| ratio | ritmo | | | +| runtime | motor de ejecución | masculino | Container Runtime | +| scheduler | planificador | masculino | | +| Secret | Secret | masculino | objeto Kubernetes | +| secret | secreto | masculino | información confidencial | +| shell | terminal | femenino | | +| stateless | stateless | | | +| taint | contaminación | | | +| worker node | nodo de trabajo | masculino | | diff --git a/content/es/docs/reference/glossary/limitrange.md b/content/es/docs/reference/glossary/limitrange.md new file mode 100755 index 0000000000000..686ed9c34242a --- /dev/null +++ b/content/es/docs/reference/glossary/limitrange.md @@ -0,0 +1,23 @@ +--- +title: LimitRange +id: limitrange +date: 2019-04-15 +full_link: /docs/concepts/policy/limit-range/ +short_description: > + Proporciona restricciones para limitar el consumo de recursos por Contenedores o Pods en un espacio de nombres + +aka: +tags: + - core-object + - fundamental + - architecture +related: + - pod + - container +--- + +Proporciona restricciones para limitar el consumo de recursos por {{< glossary_tooltip text="Contenedores" term_id="container" >}} o {{< glossary_tooltip text="Pods" term_id="pod" >}} en un espacio de nombres ({{< glossary_tooltip text="Namespace" term_id="namespace" >}}) + + + +LimitRange limita la cantidad de objetos que se pueden crear por tipo, así como la cantidad de recursos informáticos que pueden ser requeridos/consumidos por {{< glossary_tooltip text="Pods" term_id="pod" >}} o {{< glossary_tooltip text="Contenedores" term_id="container" >}} individuales en un {{< glossary_tooltip text="Namespace" term_id="namespace" >}}. diff --git a/content/es/docs/tasks/manage-kubernetes-objects/_index.md b/content/es/docs/tasks/manage-kubernetes-objects/_index.md new file mode 100644 index 0000000000000..5815f0c69ecab --- /dev/null +++ b/content/es/docs/tasks/manage-kubernetes-objects/_index.md @@ -0,0 +1,5 @@ +--- +title: "Administrar Objetos en Kubernetes" +description: Interactuando con el API de Kubernetes aplicando paradigmas declarativo e imperativo. +weight: 25 +--- diff --git a/content/es/docs/tasks/manage-kubernetes-objects/declarative-config.md b/content/es/docs/tasks/manage-kubernetes-objects/declarative-config.md new file mode 100644 index 0000000000000..cb1ef2e7e5f64 --- /dev/null +++ b/content/es/docs/tasks/manage-kubernetes-objects/declarative-config.md @@ -0,0 +1,1016 @@ +--- +title: Administración declarativa de Objetos en Kubernetes usando archivos de Configuración +content_type: task +weight: 10 +--- + + +Objetos en Kubernetes pueden ser creados, actualizados y eliminados utilizando +archivos de configuración almacenados en un directorio. Usando el comando +`kubectl apply` podrá crearlos o actualizarlos de manera recursiva según sea necesario. +Este método retiene cualquier escritura realizada contra objetos activos en el +sistema sin unirlos de regreso a los archivos de configuración. `kubectl diff` le +permite visualizar de manera previa los cambios que `apply` realizará. + +## {{% heading "prerequisites" %}} + + +Instale [`kubectl`](/es/docs/tasks/tools/install-kubectl/). + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + +## Modos de administración + +La herramienta `kubectl` soporta tres modos distintos para la administración de objetos: + +* Comandos imperativos +* Configuración de objetos imperativa +* Configuración de objetos declarativa + +Acceda [Administración de objetos de Kubernetes](/docs/concepts/overview/working-with-objects/object-management/) +para una discusión de las ventajas y desventajas de cada modo distinto de administración. + +## Visión general + +La configuración de objetos declarativa requiere una comprensión firme de la +definición y configuración de objetos de Kubernetes. Si aún no lo ha hecho, lea +y complete los siguientes documentos: + +* [Administración de Objetos de Kubernetes usando comandos imperativos](/docs/tasks/manage-kubernetes-objects/imperative-command/) +* [Administración imperativa de los Objetos de Kubernetes usando archivos de Configuración](/docs/tasks/manage-kubernetes-objects/imperative-config/) + +{{< comment >}} +TODO(lmurillo): Update the links above to the spanish versions of these documents once the +localizations become available +{{< /comment >}} + +A continuación la definición de términos usados en este documento: + +- *archivo de configuración de objeto / archivo de configuración*: Un archivo en el + que se define la configuración de un objeto de Kubernetes. Este tema muestra como + utilizar archivos de configuración con `kubectl apply`. Los archivos de configuración + por lo general se almacenan en un sistema de control de versiones, como Git. +- *configuración activa de objeto / configuración activa*: Los valores de configuración + activos de un objeto, según estén siendo observados por el Clúster. Esta configuración + se almacena en el sistema de almacenamiento de Kubernetes, usualmente etcd. +- *escritor de configuración declarativo / escritor declarativo*: Una persona o + componente de software que actualiza a un objeto activo. Los escritores activos a + los que se refiere este tema aplican cambios a los archivos de configuración de objetos + y ejecutan `kubectl apply` para aplicarlos. + +## Como crear objetos + +Utilice `kubectl apply` para crear todos los objetos definidos en los archivos +de configuración existentes en un directorio específico, con excepción de aquellos que +ya existen: + +```shell +kubectl apply -f / +``` + +Esto definirá la anotación `kubectl.kubernetes.io/last-applied-configuration: '{...}'` +en cada objeto. Esta anotación contiene el contenido del archivo de configuración +utilizado para la creación del objeto. + +{{< note >}} +Agregue la opción `-R` para procesar un directorio de manera recursiva. +{{< /note >}} + +El siguiente es un ejemplo de archivo de configuración para un objeto: + +{{< codenew file="application/simple_deployment.yaml" >}} + +Ejecute `kubectl diff` para visualizar el objeto que será creado: + +```shell +kubectl diff -f https://k8s.io/examples/application/simple_deployment.yaml +``` + +{{< note >}} +`diff` utiliza [server-side dry-run](/docs/reference/using-api/api-concepts/#dry-run), +que debe estar habilitado en el `kube-apiserver`. + +Dado que `diff` ejecuta una solicitud de `apply` en el servidor en modo de simulacro (dry-run), +requiere obtener permisos de `PATCH`, `CREATE`, y `UPDATE`. +Vea [Autorización Dry-Run](/docs/reference/using-api/api-concepts#dry-run-authorization) +para más detalles. + +{{< /note >}} + +Cree el objeto usando `kubectl apply`: + +```shell +kubectl apply -f https://k8s.io/examples/application/simple_deployment.yaml +``` + +Despliegue la configuración activa usando `kubectl get`: + +```shell +kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml +``` + +La salida le mostrará que la anotación `kubectl.kubernetes.io/last-applied-configuration` +fue escrita a la configuración activa, y es consistente con los contenidos del archivo +de configuración: + +```yaml +kind: Deployment +metadata: + annotations: + # ... + # Esta es la representación JSON de simple_deployment.yaml + # Fue escrita por kubectl apply cuando el objeto fue creado + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment", + "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, + "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", + "ports":[{"containerPort":80}]}]}}}} + # ... +spec: + # ... + minReadySeconds: 5 + selector: + matchLabels: + # ... + app: nginx + template: + metadata: + # ... + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + # ... + name: nginx + ports: + - containerPort: 80 + # ... + # ... + # ... + # ... +``` + +## Como actualizar objetos + +También puede usar `kubectl apply` para actualizar los objetos definidos en un directorio, +aún cuando esos objetos ya existan en la configuración activa. Con este enfoque logrará +lo siguiente: + +1. Definir los campos que aparecerán en la configuración activa. +2. Eliminar aquellos campos eliminados en el archivo de configuración, de la configuración activa. + +```shell +kubectl diff -f / +kubectl apply -f / +``` + +{{< note >}} +Agregue la opción `-R` para procesar directorios de manera recursiva. +{{< /note >}} + +Este es un ejemplo de archivo de configuración: + +{{< codenew file="application/simple_deployment.yaml" >}} + +Cree el objeto usando `kubectl apply`: + +```shell +kubectl apply -f https://k8s.io/examples/application/simple_deployment.yaml +``` + +{{< note >}} +Con el propósito de ilustrar, el comando anterior se refiere a un único archivo +de configuración en vez de un directorio. +{{< /note >}} + +Despliegue la configuración activa usando `kubectl get`: + +```shell +kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml +``` + +La salida le mostrará que la anotación `kubectl.kubernetes.io/last-applied-configuration` +fue escrita a la configuración activa, y es consistente con los contenidos del archivo +de configuración: + +```yaml +kind: Deployment +metadata: + annotations: + # ... + # Esta es la representación JSON de simple_deployment.yaml + # Fue escrita por kubectl apply cuando el objeto fue creado + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment", + "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, + "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", + "ports":[{"containerPort":80}]}]}}}} + # ... +spec: + # ... + minReadySeconds: 5 + selector: + matchLabels: + # ... + app: nginx + template: + metadata: + # ... + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + # ... + name: nginx + ports: + - containerPort: 80 + # ... + # ... + # ... + # ... +``` + +De manera directa, actualice el campo `replicas` en la configuración activa usando `kubectl scale`. +En este caso no se usa `kubectl apply`: + +```shell +kubectl scale deployment/nginx-deployment --replicas=2 +``` + +Despliegue la configuración activa usando `kubectl get`: + +```shell +kubectl get deployment nginx-deployment -o yaml +``` + +La salida le muestra que el campo `replicas` ha sido definido en 2, y que la +anotación `last-applied-configuration` no contiene el campo `replicas`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + # ... + # note que la anotación no contiene replicas + # debido a que el objeto no fue actualizado usando apply + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment", + "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, + "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", + "ports":[{"containerPort":80}]}]}}}} + # ... +spec: + replicas: 2 # definido por scale + # ... + minReadySeconds: 5 + selector: + matchLabels: + # ... + app: nginx + template: + metadata: + # ... + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + # ... + name: nginx + ports: + - containerPort: 80 + # ... +``` + +Actualice el archivo de configuración `simple_deployment.yaml` para cambiar el campo `image` +de `nginx:1.14.2` a `nginx:1.16.1`, y elimine el campo `minReadySeconds`: + +{{< codenew file="application/update_deployment.yaml" >}} + +Aplique los cambios realizados al archivo de configuración: + +```shell +kubectl diff -f https://k8s.io/examples/application/update_deployment.yaml +kubectl apply -f https://k8s.io/examples/application/update_deployment.yaml +``` + +Despliegue la configuración activa usando `kubectl get`: + +```shell +kubectl get -f https://k8s.io/examples/application/update_deployment.yaml -o yaml +``` + +La salida le mostrará los siguientes cambios hechos a la configuración activa: + +* El campo `replicas` retiene el valor de 2 definido por `kubectl scale`. + Esto es posible ya que el campo fue omitido en el archivo de configuración. +* El campo `image` ha sido actualizado de `nginx:1.16.1` a `nginx:1.14.2`. +* La anotación `last-applied-configuration` ha sido actualizada con la nueva imagen. +* El campo `minReadySeconds` ha sido despejado. +* La anotación `last-applied-configuration` ya no contiene el campo `minReadySeconds` + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + # ... + # La anotación contiene la imagen acutalizada a nginx 1.11.9, + # pero no contiene la actualización de las replicas a 2 + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment", + "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, + "spec":{"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, + "spec":{"containers":[{"image":"nginx:1.16.1","name":"nginx", + "ports":[{"containerPort":80}]}]}}}} + # ... +spec: + replicas: 2 # Definido por `kubectl scale`. Ignorado por `kubectl apply`. + # minReadySeconds fue despejado por `kubectl apply` + # ... + selector: + matchLabels: + # ... + app: nginx + template: + metadata: + # ... + labels: + app: nginx + spec: + containers: + - image: nginx:1.16.1 # Definido `kubectl apply` + # ... + name: nginx + ports: + - containerPort: 80 + # ... + # ... + # ... + # ... +``` + +{{< warning >}} +No se puede combinar `kubectl apply` con comandos de configuración imperativa de objetos +como `create` y `replace`. Esto se debe a que `create` +y `replace` no retienen la anotación `kubectl.kubernetes.io/last-applied-configuration` +que `kubectl apply` utiliza para calcular los cambios por realizar. +{{< /warning >}} + +## Como eliminar objetos + +Hay dos opciones diferentes para eliminar objetos gestionados por `kubectl apply`. + +### Manera recomendada: `kubectl delete -f ` + +La manera recomendada de eliminar objetos de manera manual es utilizando el comando +imperativo, ya que es más explícito en relación a lo que será eliminado, y es +menos probable que resulte en algo siendo eliminado sin la intención del usuario. + +```shell +kubectl delete -f +``` + +### Manera alternativa: `kubectl apply -f --prune -l etiqueta=deseada` + +Únicamente utilice esta opción si está seguro de saber lo que está haciendo. + +{{< warning >}} +`kubectl apply --prune` se encuentra aún en alpha, y cambios incompatibles con versiones previas +podrían ser introducidos en lanzamientos futuros. +{{< /warning >}} + +{{< warning >}} +Sea cuidadoso(a) al usar este comando, para evitar eliminar objetos +no intencionalmente. +{{< /warning >}} + +Como una alternativa a `kubectl delete`, puede usar `kubectl apply` para identificar objetos a ser +eliminados, luego de que sus archivos de configuración han sido eliminados del directorio. El commando `apply` con `--prune` +consulta a la API del servidor por todos los objetos que coincidan con un grupo de etiquetas, e intenta relacionar +la configuración obtenida de los objetos activos contra los objetos según sus archivos de configuración. +Si un objeto coincide con la consulta, y no tiene un archivo de configuración en el directorio, pero si +tiene una anotación `last-applied-configuration`, entonces será eliminado. + +{{< comment >}} +TODO(pwittrock): We need to change the behavior to prevent the user from running apply on subdirectories unintentionally. +{{< /comment >}} + +```shell +kubectl apply -f --prune -l +``` + +{{< warning >}} +`apply` con `--prune` debería de ser ejecutado únicamente en contra del directorio +raíz que contiene los archivos de configuración. Ejecutarlo en contra de sub-directorios +podría causar que objetos sean eliminados no intencionalmente, si son retornados en la +consulta por selección de etiqueta usando `-l ` y no existen en el subdirectorio. +{{< /warning >}} + +## Como visualizar un objeto + +Puede usar `kubectl get` con `-o yaml` para ver la configuración de objetos activos: + +```shell +kubectl get -f -o yaml +``` + +## Como son las diferencias calculadas y unidas por apply + +{{< caution >}} +Un *patch* (parche) es una operación de actualización con alcance a campos específicos +de un objeto, y no al objeto completo. Esto permite actualizar únicamente grupos de campos +específicos en un objeto sin tener que leer el objeto primero. +{{< /caution >}} + +Cuando `kubectl apply` actualiza la configuración activa para un objeto, lo hace enviando +una solicitud de patch al servidor de API. El patch define actualizaciones para campos +específicos en la configuración del objeto activo. El comando `kubectl apply` calcula esta solicitud +de patch usando el archivo de configuración, la configuración activa, y la anotación `last-applied-configuration` +almacenada en la configuración activa. + +### Calculando la unión de un patch + +El comando `kubectl apply` escribe los contenidos de la configuración a la anotación +`kubectl.kubernetes.io/last-applied-configuration`. Esto es usado para identificar aquellos campos +que han sido eliminados de la configuración y deben ser limpiados. Los siguientes pasos +son usados para calcular que campos deben ser eliminados o definidos: + +1. Calculo de campos por eliminar. Estos son los campos presentes en `last-applied-configuration` pero ausentes en el archivo de configuración. +2. Calculo de campos por agregar o definir. Estos son los campos presentes en el archivo de configuración, con valores inconsistentes con la configuración activa. + +A continuación un ejemplo. Suponga que este es el archivo de configuración para un objeto de tipo Deployment: + +{{< codenew file="application/update_deployment.yaml" >}} + +También, suponga que esta es la configuración activa para ese mismo objeto de tipo Deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + # ... + # tome nota de que la anotación no contiene un valor para replicas + # dado que no fue actualizado usando el comando apply + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment", + "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, + "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", + "ports":[{"containerPort":80}]}]}}}} + # ... +spec: + replicas: 2 # definidas por scale + # ... + minReadySeconds: 5 + selector: + matchLabels: + # ... + app: nginx + template: + metadata: + # ... + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + # ... + name: nginx + ports: + - containerPort: 80 + # ... +``` + +Estos son los cálculos de unión que serían realizados por `kubectl apply`: + +1. Calcular los campos por eliminar, leyendo los valores de `last-applied-configuration` + y comparándolos con los valores en el archivo de configuración. + Limpiar los campos definidos en null de manera explícita en el archivo de configuración + sin tomar en cuenta si se encuentran presentes en la anotación `last-applied-configuration`. + En este ejemplo, `minReadySeconds` aparece en la anotación + `last-applied-configuration` pero no aparece en el archivo de configuración. + **Acción:** Limpiar `minReadySeconds` de la configuración activa. +2. Calcular los campos por ser definidos, al leer los valores del fichero de configuración + y compararlos con los valores en la configuración activa. En este ejemplo, el valor `image` + en el archivo de configuración, no coincide con el valor en la configuración activa. + **Acción:** Definir el campo `image` en la configuración activa. +3. Definir el valor de la anotación `last-applied-configuration` para que sea consistente + con el archivo de configuración. +4. Unir los resultados de 1, 2 y 3, en una única solicitud de patch para enviar al servidor de API. + +Esta es la configuración activa como resultado de esta unión: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + # ... + # La anotación contiene la imágen actualizada a nginx 1.11.9, + # pero no contiene la actualización a 2 replicas + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment", + "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, + "spec":{"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, + "spec":{"containers":[{"image":"nginx:1.16.1","name":"nginx", + "ports":[{"containerPort":80}]}]}}}} + # ... +spec: + selector: + matchLabels: + # ... + app: nginx + replicas: 2 # Definido por `kubectl scale`. Ignorado por `kubectl apply`. + # minReadySeconds eliminado por `kubectl apply` + # ... + template: + metadata: + # ... + labels: + app: nginx + spec: + containers: + - image: nginx:1.16.1 # Definido por `kubectl apply` + # ... + name: nginx + ports: + - containerPort: 80 + # ... + # ... + # ... + # ... +``` + +### Como se unen los diferentes tipos de campos + +La manera en la que los campos en un archivo de configuración son unidos con la +configuración activa depende del tipo de campo. Existen varios tipos de campos: + +- *primitivo*: Campos de cadena de texto (string), enteros (integer), o lógicos (boolean). + Por ejemplo, `image` y `replicas` son campos de tipo primitivo. **Acción:** Reemplazarlos. + +- *mapa*, también llamados *objeto*: Campo de tipo mapa o un tipo + complejo que contiene sub-campos. Por ejemplo, `labels`, + `annotations`,`spec` y `metadata` son todos mapas. **Acción:** Unir los elementos o sub-campos. + +- *lista*: Campos que contienen una lista de elementos que pueden ser de tipo primitivo o mapa. + Como ejemplos, `containers`, `ports`, y `args` son listas. **Acción:** Varía. + +Cuando `kubectl apply` actualiza un campo de tipo mapa o lista, típicamente no reemplaza +el campo completo, sino actualiza los sub-elementos individuales. +Por ejemplo, cuando se hace una unión del campo `spec` en un Deployment, el `spec` +completo no es reemplazado, por el contrario, únicamente los sub-campos de `spec` como +`replica` son comparados y unidos. + +### Uniendo cambios en campos primitivos + +Campos primitivos son limpiados o reemplazados. + +{{< note >}} +`-` determina que "no aplica" debido a que el valor no es utilizado. +{{< /note >}} + +| Campo en el archivo de configuración | Campo en la configuración activa | Campo en last-applied-configuration | Acción | +|---------------------------------------|------------------------------------|-------------------------------------|--------------------------------------------------------------| +| Si | Si | - | Define el valor en el archivo de configuración como activo. | +| Si | No | - | Define el valor a la configuración local. | +| No | - | Si | Elimina de la configuración activa. | +| No | - | No | No hacer nada. Mantiene el valor activo. | + +### Uniendo cambios en campos de un mapa + +Los campos que conjuntamente representan un mapa, son unidos al comparar cada uno de los subcampos o elementos del mapa: + +{{< note >}} +`-` determina que "no aplica" debido a que el valor no es utilizado. +{{< /note >}} + +| Propiedad en archivo de configuración | Propiedad en configuración activa | Campo en last-applied-configuration | Acción | +|---------------------------------------|-----------------------------------|-------------------------------------|------------------------------------------| +| Si | Si | - | Comparar valores de sub-propiedades. | +| Si | No | - | Usar configuración local. | +| No | - | Si | Eliminar de la configuración activa. | +| No | - | No | No hacer nada. Mantener el valor activo. | + +### Uniendo cambios en campos de tipo lista + +El unir cambios en una lista utiliza una de tres posibles estrategias: + +* Reemplazar la lista si todos sus elementos son primitivos. +* Unir elementos individuales en líneas de elementos complejos. +* Unir una lista de elementos primitivos. + +Se define la estrategia elegida con base en cada campo. + +#### Reemplazar una lista si todos sus elementos son primitivos + +Trata la lista como si fuese un campo primitivo. Reemplaza o elimina la lista completa. +Esto preserva el orden de los elementos. + + +**Ejemplo:** Usando `kubectl apply` para actualizar el campo `args` de un Contenedor en un Pod. +Esto define el valor de `args` en la configuración activa, al valor en el archivo de configuración. +Cualquier elemento de `args` que haya sido previamente agregado a la configuración activa se perderá. +El orden de los elementos definidos en `args` en el archivo de configuración, serán conservados +en la configuración activa. + +```yaml +# valor en last-applied-configuration + args: ["a", "b"] + +# valores en archivo de configuración + args: ["a", "c"] + +# configuración activa + args: ["a", "b", "d"] + +# resultado posterior a la unión + args: ["a", "c"] +``` + +**Explicación:** La unión utilizó los valores del archivo de configuración para definir los nuevos valores de la lista. + +#### Unir elementos individuales en una lista de elementos complejos + +Trata la lista como un mapa, y trata cada campo específico de cada elemento como una llave. +Agrega, elimina o actualiza elementos individuales. Esta operación no conserva el orden. + +Esta estrategia de unión utiliza una etiqueta especial en cada campo llamada `patchMergeKey`. La etiqueta +`patchMergeKey` es definida para cada campo en el código fuente de Kubernetes: +[types.go](https://github.com/kubernetes/api/blob/d04500c8c3dda9c980b668c57abc2ca61efcf5c4/core/v1/types.go#L2747) +Al unir una lista de mapas, el campo especificado en `patchMergeKey` para el elemento dado +se utiliza como un mapa de llaves para ese elemento. + +**Ejemplo:** Utilice `kubectl apply` para actualizar el campo `containers` de un PodSpec. +Esto une la lista como si fuese un mapa donde cada elemento utiliza `name` por llave. + +```yaml +# valor en last-applied-configuration + containers: + - name: nginx + image: nginx:1.16 + - name: nginx-helper-a # llave: nginx-helper-a; será eliminado en resultado + image: helper:1.3 + - name: nginx-helper-b # llave: nginx-helper-b; será conservado + image: helper:1.3 + +# valor en archivo de configuración + containers: + - name: nginx + image: nginx:1.16 + - name: nginx-helper-b + image: helper:1.3 + - name: nginx-helper-c # llavel: nginx-helper-c; será agregado en el resultado + image: helper:1.3 + +# configuración activa + containers: + - name: nginx + image: nginx:1.16 + - name: nginx-helper-a + image: helper:1.3 + - name: nginx-helper-b + image: helper:1.3 + args: ["run"] # Campo será conservado + - name: nginx-helper-d # llave: nginx-helper-d; será conservado + image: helper:1.3 + +# resultado posterior a la unión + containers: + - name: nginx + image: nginx:1.16 + # Elemento nginx-helper-a fue eliminado + - name: nginx-helper-b + image: helper:1.3 + args: ["run"] # Campo fue conservado + - name: nginx-helper-c # Elemento fue agregado + image: helper:1.3 + - name: nginx-helper-d # Elemento fue ignorado + image: helper:1.3 +``` + +**Explicación:** + +- El contenedor llamado "nginx-helper-a" fué eliminado al no aparecer ningún + contenedor llamado "nginx-helper-a" en el archivo de configuración. +- El contenedor llamado "nginx-helper-b" mantiene los cambios existentes en `args` + en la configuración activa. `kubectl apply` pudo identificar que + el contenedor "nginx-helper-b" en la configuración activa es el mismo + "nginx-helper-b" que aparece en el archivo de configuración, aún teniendo diferentes + valores en los campos (no existe `args` en el archivo de configuración). Esto sucede + debido a que el valor del campo `patchMergeKey` (name) es idéntico en ambos. +- El contenedor llamado "nginx-helper-c" fue agregado ya que no existe ningún contenedor + con ese nombre en la configuración activa, pero si existe uno con ese nombre + en el archivo de configuración. +- El contendor llamado "nginx-helper-d" fue conservado debido a que no aparece + ningún elemento con ese nombre en last-applied-configuration. + +#### Unir una lista de elementos primitivos + +A partir de Kubernetes 1.5, el unir listas de elementos primitivos no es soportado. + +{{< note >}} +La etiqueta `patchStrategy` en [types.go](https://github.com/kubernetes/api/blob/d04500c8c3dda9c980b668c57abc2ca61efcf5c4/core/v1/types.go#L2748) es la que +determina cual de las estrategias aplica para cualquier campo en particular. +Para campos de tipo lista, el campo será reemplazado cuando no exista una especificación de `patchStrategy`. +{{< /note >}} + +{{< comment >}} +TODO(pwittrock): Uncomment this for 1.6 + +- Treat the list as a set of primitives. Replace or delete individual + elements. Does not preserve ordering. Does not preserve duplicates. + +**Example:** Using apply to update the `finalizers` field of ObjectMeta +keeps elements added to the live configuration. Ordering of finalizers +is lost. +{{< /comment >}} + +## Valores de campo por defecto + +El Servidor de API define algunos campos a sus valores por defecto si no son especificados +al momento de crear un objeto. + +Aquí puede ver un archivo de configuración para un Deployment. Este archivo no especifica +el campo `strategy`: + +{{< codenew file="application/simple_deployment.yaml" >}} + +Cree un nuevo objeto `kubectl apply`: + +```shell +kubectl apply -f https://k8s.io/examples/application/simple_deployment.yaml +``` + +Despliegue la configuración activa usando `kubectl get`: + +```shell +kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml +``` + +La salida muestra que el servidor de API definió varios campos con los valores por defecto +en la configuración activa. Estos campos no fueron especificados en el archivo de +configuración. + +```yaml +apiVersion: apps/v1 +kind: Deployment +# ... +spec: + selector: + matchLabels: + app: nginx + minReadySeconds: 5 + replicas: 1 # valor por defecto definido por apiserver + strategy: + rollingUpdate: # valor por defecto definido por apiserver - derivado de strategy.type + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate # valor por defecto definido por apiserver + template: + metadata: + creationTimestamp: null + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + imagePullPolicy: IfNotPresent # valor por defecto definido por apiserver + name: nginx + ports: + - containerPort: 80 + protocol: TCP # valor por defecto definido por apiserver + resources: {} # valor por defecto definido por apiserver + terminationMessagePath: /dev/termination-log # valor por defecto definido por apiserver + dnsPolicy: ClústerFirst # valor por defecto definido por apiserver + restartPolicy: Always # valor por defecto definido por apiserver + securityContext: {} # valor por defecto definido por apiserver + terminationGracePeriodSeconds: 30 # valor por defecto definido por apiserver +# ... +``` + +En una solicitud de patch, los campos definidos a valores por defecto no son redefinidos a excepción +de cuando hayan sido limpiados de manera explícita como parte de la solicitud de patch. Esto puede +causar comportamientos no esperados para campos cuyo valor por defecto es basado en los valores +de otros campos. Cuando el otro campo ha cambiado, el valor por defecto de ellos no será actualizado +de no ser que sean limpiados de manera explícita. + +Por esta razón, se recomienda que algunos campos que reciben un valor por defecto del +servidor sean definidos de manera explícita en los archivos de configuración, aun cuando +el valor definido sea idéntico al valor por defecto. Esto facilita la identificación +de valores conflictivos que podrían no ser revertidos a valores por defecto por parte +del servidor. + +**Ejemplo:** + +```yaml +# last-applied-configuration +spec: + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + +# archivo de configuración +spec: + strategy: + type: Recreate # valor actualizado + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + +# configuración activa +spec: + strategy: + type: RollingUpdate # valor por defecto + rollingUpdate: # valor por defecto derivado del campo type + maxSurge : 1 + maxUnavailable: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + +# resultado posterior a la unión - ERROR! +spec: + strategy: + type: Recreate # valor actualizado: incompatible con RollingUpdate + rollingUpdate: # valor por defecto: incompatible con "type: Recreate" + maxSurge : 1 + maxUnavailable: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +``` + +**Explicación:** + +1. El usuario crea un Deployment sin definir `strategy.type`. +2. El servidor define `strategy.type` a su valor por defecto de `RollingUpdate` y + agrega los valores por defecto a `strategy.rollingUpdate`. +3. El usuario cambia `strategy.type` a `Recreate`. Los valores de `strategy.rollingUpdate` + se mantienen en su configuración por defecto, sin embargo el servidor espera que se limpien. + Si los valores de `strategy.rollingUpdate` hubiesen sido definidos inicialmente en el archivo + de configuración, hubiese sido más claro que requerían ser eliminados. +4. Apply fallará debido a que `strategy.rollingUpdate` no fue eliminado. El campo `strategy.rollingupdate` + no puede estar definido, si el valor de `strategy.type` es `Recreate`. + +Recomendación: Estos campos deberían de ser definidos de manera explícita en el archivo de configuración: + +- Etiquetas de Selectors y PodTemplate en cargas de trabajo como Deployment, StatefulSet, Job, DaemonSet, + ReplicaSet, y ReplicationController +- Estrategia de rollout para un Deployment + +### Como limpiar campos definidos a valores por defecto por el servidor, o definidos por otros escritores + +Campos que no aparecen en el archivo de configuración pueden ser limpiados si se define su valor +a `null` y luego se aplica el archivo de configuración. +Para los campos definidos a valores por defecto por el servidor, esto provoca que se reestablezca +a sus valores por defecto. + +## Como cambiar al propietario de un campo entre un archivo de configuración y un escritor imperativo + +Estos son los únicos métodos que debe usar para cambiar un campo individual de un objeto: + +- Usando `kubectl apply`. +- Escribiendo de manera directa a la configuración activa sin modificar el archivo de configuración: +por ejemplo, usando `kubectl scale`. + +### Cambiando al propietario de un campo de un escritor imperativo a un archivo de configuración + +Añada el campo al archivo de configuración, y no realice nuevas actualizaciones a la configuración +activa que no sucedan por medio de `kubectl apply`. + +### Cambiando al propietario de un archivo de configuración a un escritor imperativo + +A partir de Kubernetes 1.5, el cambiar un campo que ha sido definido por medio de un +archivo de configuración para que sea modificado por un escritor imperativo requiere +pasos manuales: + +- Eliminar el campo del archivo de configuración. +- Eliminar el campo de la anotación `kubectl.kubernetes.io/last-applied-configuration` en el objeto activo. + +## Cambiando los métodos de gestión + +Los objetos en Kubernetes deberían de ser gestionados utilizando únicamente un método +a la vez. El alternar de un método a otro es posible, pero es un proceso manual. + +{{< note >}} +Esta bien el usar eliminación imperativa junto a gestión declarativa. +{{< /note >}} + +{{< comment >}} +TODO(pwittrock): We need to make using imperative commands with +declarative object configuration work so that it doesn't write the +fields to the annotation, and instead. Then add this bullet point. + +- using imperative commands with declarative configuration to manage where each manages different fields. +{{< /comment >}} + +### Migrando de gestión imperativa con comandos a configuración declarativa de objetos + +El migrar de gestión imperativa utilizando comandos a la gestión declarativa de objetos +requiere varios pasos manuales: + +1. Exporte el objeto activo a un archivo local de configuración: + + ```shell + kubectl get / -o yaml > _.yaml + ``` + +1. Elimine de manera manual el campo `status` del archivo de configuración. + + {{< note >}} + Este paso es opcional, ya que `kubectl apply` no actualiza el campo `status` + aunque este presente en el archivo de configuración. + {{< /note >}} + +1. Defina la anotación `kubectl.kubernetes.io/last-applied-configuration` en el objeto: + + ```shell + kubectl replace --save-config -f _.yaml + ``` + +1. Modifique el proceso para usar `kubectl apply` para gestionar el objeto de manera exclusiva. + +{{< comment >}} +TODO(pwittrock): Why doesn't export remove the status field? Seems like it should. +{{< /comment >}} + +### Migrando de gestión imperativa de la configuración de objetos a gestión declarativa + +1. Defina la anotación `kubectl.kubernetes.io/last-applied-configuration` en el objeto: + + ```shell + kubectl replace --save-config -f _.yaml + ``` + +1. Modifique el proceso para usar `kubectl apply` para gestionar el objeto de manera exclusiva. + +## Definiendo los selectores para el controlador y las etiquetas de PodTemplate + +{{< warning >}} +Se desaconseja encarecidamente actualizar los selectores en controladores. +{{< /warning >}} + +La forma recomendada es definir una etiqueta única e inmutable para PodTemplate usada +únicamente por el selector del controlador sin tener ningún otro significado semántico. + +**Ejemplo:** + +```yaml +selector: + matchLabels: + controller-selector: "apps/v1/deployment/nginx" +template: + metadata: + labels: + controller-selector: "apps/v1/deployment/nginx" +``` + +## {{% heading "whatsnext" %}} + + +* [Administración de Objetos de Kubernetes usando comandos imperativos](/docs/tasks/manage-kubernetes-objects/imperative-command/) +* [Administración imperativa de los Objetos de Kubernetes usando archivos de configuración](/docs/tasks/manage-kubernetes-objects/imperative-config/) +* [Referencia del comando Kubectl](/docs/reference/generated/kubectl/kubectl-commands/) +* [Referencia de la API de Kubernetes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) + + diff --git a/content/es/docs/tasks/run-application/run-stateless-application-deployment.md b/content/es/docs/tasks/run-application/run-stateless-application-deployment.md index 4bbe221adf454..f6e82d85a9701 100644 --- a/content/es/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/es/docs/tasks/run-application/run-stateless-application-deployment.md @@ -49,7 +49,6 @@ Puedes correr una aplicación creando un `deployment` de Kubernetes, y puedes de El resultado es similar a esto: - user@computer:~/website$ kubectl describe deployment nginx-deployment Name: nginx-deployment Namespace: default CreationTimestamp: Tue, 30 Aug 2016 18:11:37 -0700 diff --git a/content/es/docs/tasks/tools/install-kubectl.md b/content/es/docs/tasks/tools/install-kubectl.md index af1abeb63dc79..fdca3db92c58f 100644 --- a/content/es/docs/tasks/tools/install-kubectl.md +++ b/content/es/docs/tasks/tools/install-kubectl.md @@ -188,7 +188,7 @@ Si estás en macOS y utilizas el gestor de paquetes [Macports](https://macports. ### Instalar el binario de kubectl con curl en Windows -1. Descargar la última entrega {{< param "fullversion" >}} de [este link]((https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). +1. Descargar la última entrega {{< param "fullversion" >}} de [este link](https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). o si tiene `curl` instalada, utiliza este comando: diff --git a/content/es/examples/application/simple_deployment.yaml b/content/es/examples/application/simple_deployment.yaml new file mode 100644 index 0000000000000..d9c74af8c577b --- /dev/null +++ b/content/es/examples/application/simple_deployment.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + minReadySeconds: 5 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/content/es/examples/application/update_deployment.yaml b/content/es/examples/application/update_deployment.yaml new file mode 100644 index 0000000000000..7230cc432332b --- /dev/null +++ b/content/es/examples/application/update_deployment.yaml @@ -0,0 +1,18 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.16.1 # actualice el valor de image + ports: + - containerPort: 80 diff --git a/content/fr/community/static/cncf-code-of-conduct.md b/content/fr/community/static/cncf-code-of-conduct.md index 8d67a9c21add8..93ac77947797c 100644 --- a/content/fr/community/static/cncf-code-of-conduct.md +++ b/content/fr/community/static/cncf-code-of-conduct.md @@ -24,7 +24,7 @@ Ce Code de conduite s’applique à la fois dans le cadre du projet et dans le c Des cas de conduite abusive, de harcèlement ou autre pratique inacceptable ayant cours sur Kubernetes peuvent être signalés en contactant le [comité pour le code de conduite de Kubernetes](https://git.k8s.io/community/committee-code-of-conduct) via l'adresse . Pour d'autres projets, bien vouloir contacter un responsable de projet CNCF ou notre médiateur, Mishi Choudhary à l'adresse . -Ce Code de conduite est inspiré du « Contributor Covenant » (http://contributor-covenant.org) version 1.2.0, disponible à l’adresse http://contributor-covenant.org/version/1/2/0/. +Ce Code de conduite est inspiré du « Contributor Covenant » (https://contributor-covenant.org) version 1.2.0, disponible à l’adresse https://contributor-covenant.org/version/1/2/0/. ### Code de conduite pour les événements de la CNCF diff --git a/content/fr/docs/concepts/cluster-administration/logging.md b/content/fr/docs/concepts/cluster-administration/logging.md index 465ec742b1332..0932de7406790 100644 --- a/content/fr/docs/concepts/cluster-administration/logging.md +++ b/content/fr/docs/concepts/cluster-administration/logging.md @@ -82,7 +82,7 @@ conteneur a crashé. Si le Pod a plusieurs conteneurs, il faut spécifier le nom du conteneur dont on veut récupérer le journal d'évènement. Dans notre exemple le conteneur s'appelle -`count` donc vous pouvez utiliser `kubectl logs counter count`. Plus de détails +`count` donc vous pouvez utiliser `kubectl logs counter count`. Plus de détails dans la [documentation de `kubectl logs`] (/docs/reference/generated/kubectl/kubectl-commands#logs) diff --git a/content/fr/docs/concepts/workloads/controllers/statefulset.md b/content/fr/docs/concepts/workloads/controllers/statefulset.md new file mode 100644 index 0000000000000..87286aeaa43ce --- /dev/null +++ b/content/fr/docs/concepts/workloads/controllers/statefulset.md @@ -0,0 +1,278 @@ +--- +title: StatefulSets +content_type: concept +weight: 30 +--- + + + +StatefulSet est l'objet de l'API de charge de travail utilisé pour gérer des applications avec état (*stateful*). + +{{< glossary_definition term_id="statefulset" length="all" >}} + + + + +## Utiliser des StatefulSets + +Les StatefulSets sont utiles pour des applications qui nécessitent une ou plusieurs des choses suivantes : + +* Des identifiants réseau stables et uniques. +* Un stockage persistant stable. +* Un déploiement et une mise à l'échelle ordonnés et contrôlés. +* Des mises à jour continues (*rolling update*) ordonnées et automatisées. + +Ci-dessus, stable est synonyme de persistance suite au (re)scheduling de Pods. +Si une application ne nécessite aucun identifiant stable ou de déploiement, suppression ou +mise à l'échelle stables, vous devriez déployer votre application en utilisant un objet de charge de travail +fournissant un ensemble de réplicas sans état (*stateless*). + +Un [Deployment](/fr/docs/concepts/workloads/controllers/deployment/) ou +[ReplicaSet](/fr/docs/concepts/workloads/controllers/replicaset/) peut être mieux adapté pour vos applications sans état. + +## Limitations + +* Le stockage pour un Pod donné doit être provisionné soit par un [approvisionneur de PersistentVolume](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/README.md) basé sur un `storage class` donné, soit pré-provisionné par un admin. +* Supprimer et/ou réduire l'échelle d'un StatefulSet à zéro ne supprimera *pas* les volumes associés avec le StatefulSet. Ceci est fait pour garantir la sécurité des données, ce qui a généralement plus de valeur qu'une purge automatique de toutes les ressources relatives à un StatefulSet. +* Les StatefulSets nécessitent actuellement un [Service Headless](/fr/docs/concepts/services-networking/service/#headless-services) qui est responsable de l'identité réseau des Pods. Vous êtes responsable de la création de ce Service. +* Les StatefulSets ne fournissent aucune garantie de la terminaison des pods lorsqu'un StatefulSet est supprimé. Pour avoir une terminaison ordonnée et maîtrisée des pods du StatefulSet, il est possible de réduire l'échelle du StatefulSet à 0 avant de le supprimer. +* Lors de l'utilisation de [Rolling Updates](#rolling-updates) avec la + [Politique de gestion des Pods](#politiques-de-gestion-dun-pod) par défaut (`OrderedReady`), + il est possible de tomber dans un état indéfini nécessitant une + [intervention manuelle pour réparer](#rollback-forcé). + +## Composants + +L'exemple ci-dessous décrit les composants d'un StatefulSet. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx # doit correspondre à .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # est 1 par défaut + template: + metadata: + labels: + app: nginx # doit correspondre à .spec.selector.matchLabels + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi +``` + +Dans l'exemple ci-dessus : + +* Un Service Headless, appelé `nginx`, est utilisé pour contrôler le domaine réseau. +* Le StatefulSet, appelé `web`, a une Spec indiquant que 3 réplicas du container nginx seront démarrés dans des Pods. +* Le `volumeClaimTemplates` fournira un stockage stable utilisant des [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) provisionnés par un approvisionneur de PersistentVolume. + +Le nom d'un objet StatefulSet doit être un +[nom de sous-domaine DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) valide. + +## Sélecteur de Pod + +Vous devez renseigner le champ `.spec.selector` d'un StatefulSet pour qu'il corresponde aux labels de son `.spec.template.metadata.labels`. Avant Kubernetes 1.8, le champ `.spec.selector` était mis par défaut s'il était omis. Pour les versions 1.8 et ultérieures, ne pas spécifier de sélecteur de Pod résulte en une erreur de validation lors de la création du StatefulSet. + +## Identité du Pod + +Les Pods d'un StatefulSet ont une identité unique comprenant un ordinal, une identité réseau stable et un stockage stable. +L'identité est accrochée au Pod, indépendamment du noeud sur lequel il est (re)programmé. + +### Index Ordinal + +Pour un StatefulSet avec N réplicas, chaque Pod du StatefulSet se verra assigné un ordinal entier, de 0 à N-1, +unique sur l'ensemble des pods. + +### ID réseau stable + +Chaque Pod dans un StatefulSet dérive son nom d'hôte du nom du StatefulSet +et de l'ordinal du Pod. Le modèle pour le nom d'hôte généré est +`$(nom statefulset)-$(ordinal)`. L'exemple ci-dessus créera trois Pods +nommés `web-0,web-1,web-2`. +Un StatefulSet peut utiliser un [Service Headless](/docs/concepts/services-networking/service/#headless-services) +pour contrôler le domaine de ses Pods. Le domaine pris en charge par ce Service prend la forme : +`$(nom service).$(namespace).svc.cluster.local`, où "cluster.local" est le domaine du cluster. +Chaque fois qu'un Pod est créé, il obtient un sous-domaine DNS correspondant, prenant la forme : +`$(nom pod).$(domaine du service gouvernant)`, où le service gouvernant est défini par le +champ `serviceName` du StatefulSet. + +En fonction de la façon dont est configuré le DNS dans votre cluster, vous ne pourrez peut-être pas rechercher immédiatement +le nom DNS d'un pod nouvellement exécuté. Ce problème peut se produire lorsque d'autres clients dans le +cluster ont déjà envoyé des requêtes pour le nom d'hôte du Pod avant sa création. +La mise en cache négative (normale pour le DNS) signifie que les résultats des recherches précédentes ayant échoué sont +mémorisés et réutilisés, même après que le Pod ait démarré, pendant au moins quelques secondes. + +Si vous avez besoin de découvrir les Pods rapidement après leur création, vous avez plusieurs options : + +- Interrogez directement l'API Kubernetes (par exemple, à l'aide d'un watch) plutôt que de vous fier aux recherches DNS. +- Réduisez le temps de mise en cache dans votre fournisseur de DNS Kubernetes (cela signifie généralement modifier le ConfigMap de CoreDNS, qui met actuellement en cache pendant 30 secondes). + +Comme mentionné dans la section [limitations](#limitations), vous êtes responsable de +créer le [Service Headless](/docs/concepts/services-networking/service/#headless-services) +responsable de l'identité réseau des Pods. + +Voici quelques exemples de choix pour le domaine du cluster, le nom du service, +le nom du StatefulSet et comment cela affecte les noms DNS des pods du StatefulSet. + +Domaine Cluster | Service (ns/nom) | StatefulSet (ns/nom) | Domaine StatefulSet | DNS Pod | Nom d'hôte | +--------------- | ----------------- | -------------------- | ------------------------------- | -------------------------------------------- | ------------ | + cluster.local | default/nginx | default/web | nginx.default.svc.cluster.local | web-{0..N-1}.nginx.default.svc.cluster.local | web-{0..N-1} | + cluster.local | foo/nginx | foo/web | nginx.foo.svc.cluster.local | web-{0..N-1}.nginx.foo.svc.cluster.local | web-{0..N-1} | + kube.local | foo/nginx | foo/web | nginx.foo.svc.kube.local | web-{0..N-1}.nginx.foo.svc.kube.local | web-{0..N-1} | + +{{< note >}} +Le domaine cluster sera `cluster.local` à moins qu'il soit +[configuré autrement](/docs/concepts/services-networking/dns-pod-service/). +{{< /note >}} + +### Stockage stable + +Kubernetes crée un [PersistentVolume](/docs/concepts/storage/persistent-volumes/) pour chaque +VolumeClaimTemplate. Dans l'exemple nginx ci-dessus, chaque Pod se verra affecter un unique PersistentVolume +avec un StorageClass de `my-storage-class` et 1 Gib de stockage provisionné. Si aucun StorageClass +n'est spécifié, alors le StorageClass par défaut sera utilisé. Lorsqu'un Pod est (re)schedulé +sur un noeud, ses `volumeMounts` montent les PersistentVolumes associés aux +PersistentVolumeClaims. Notez que les PersistentVolumes associés avec les PersistentVolumeClaims des Pods +ne sont pas supprimés lorsque les Pods, ou le StatefulSet, sont supprimés. +Ceci doit être fait manuellement. + +### Étiquette du nom de Pod + +Lorsque le StatefulSet {{< glossary_tooltip term_id="controller" >}} crée un Pod, +il ajoute une étiquette, `statefulset.kubernetes.io/pod-name`, renseignée avec le nom du Pod. +Cette étiquette vous permet d'attacher un Service à un Pod spécifique du StatefulSet. + +## Garanties de déploiment et de mise à l'échelle + +* Pour un StatefulSet avec N réplicas, lorsque les Pods sont déployés, ils sont créés de manière séquentielle, dans l'ordre {0..N-1}. +* Lorsque les Pods sont supprimés, ils sont terminés dans l'ordre inverse, {N-1..0}. +* Avant qu'une opération de mise à l'échelle soit appliquée à un Pod, tous ses prédécesseurs doivent être Running et Ready. +* Avant qu'un Pod soit terminé, tous ses successeurs doivent être complètement arrêtés. + +Le StatefulSet ne devrait pas spécifier un `pod.Spec.TerminationGracePeriodSeconds` à 0. Cette pratique +est dangereuse et fortement déconseillée. Pour plus d'explications, veuillez vous référer à [forcer la suppression de Pods de StatefulSet](/docs/tasks/run-application/force-delete-stateful-set-pod/). + +Lorsque l'exemple nginx ci-dessus est créé, trois Pods seront déployés dans l'ordre +web-0, web-1, web-2. web-1 ne sera pas déployé avant que web-0 soit +[Running et Ready](/fr/docs/concepts/workloads/pods/pod-lifecycle/), et web-2 ne sera pas déployé avant que +web-1 soit Running et Ready. Si web-0 venait à échouer, après que web-1 soit Running et Ready, mais avant que +web-2 soit lancé, web-2 ne serait pas lancé avant que web-0 soit correctement relancé et redevienne Running et Ready. + +Si un utilisateur venait à mettre à l'échelle l'exemple déployé en patchant le StatefulSet pour que +`replicas=1`, web-2 serait terminé en premier. web-1 ne serait pas terminé avant que web-2 +ne soit complètement arrêté et supprimé. Si web-0 venait à échouer après que web-2 soit terminé et complètement arrêté, +mais avant que web-1 soit terminé, web-1 ne serait pas terminé avant que web-0 soit Running et Ready. + +### Politiques de gestion d'un Pod + +Dans Kubernetes 1.7 et ultérieurs, le StatefulSet vous permet d'assouplir ses garanties d'ordre, +tout en préservant ses garanties d'unicité et d'identité via son champ `.spec.podManagementPolicy`. + +#### Gestion de Pod OrderedReady + +La gestion de Pod `OrderedReady` est la valeur par défaut pour les StatefulSets. Il implémente le comportement décrit [ci-dessus](#garanties-de-déploiment-et-de-mise-à-l-échelle). + +#### Gestion de Pod Parallel + +La gestion de Pod `Parallel` indique au contrôleur de StatefulSet de lancer ou +terminer tous les Pods en parallèle, et de ne pas attendre que les Pods deviennent Running +et Ready ou complètement terminés avant de lancer ou terminer un autre +Pod. Cette option affecte seulement le comportement pour les opérations de mise à l'échelle. +Les mises à jour ne sont pas affectées. + +## Stratégies de mise à jour + +Dans Kubernetes 1.7 et ultérieurs, le champ `.spec.updateStrategy` d'un StatefulSet vous permet +de configurer et désactiver les rolling updates automatisés pour les conteneurs, étiquettes, +requête/limites de ressources, et annotations pour les Pods d'un StatefulSet. + +### On Delete + +La stratégie de mise à jour `OnDelete` implémente l'ancien comportement (1.6 et précédents). Lorsque +`.spec.updateStrategy.type` d'un StatefulSet est mis à `OnDelete`, le contrôleur de StatefulSet +ne mettra pas à jour automatiquement les Pods dans un StatefulSet. +Les utilisateurs doivent supprimer manuellement les Pods pour forcer le contrôleur à créer de nouveaux +Pods qui réflètent les modifications faites à un `.spec.template` d'un StatefulSet. + +### Rolling Updates + +La stratégie de mise à jour `RollingUpdate` implémente le rolling update automatisé pour les Pods d'un +StatefulSet. C'est la stratégie par défaut lorsque `.spec.updateStrategy` n'est pas spécifié. +Lorsqu'un `.spec.updateStrategy.type` d'un StatefulSet est mis à `RollingUpdate`, le contrôleur de +StatefulSet va supprimer et recréer chaque Pod d'un StatefulSet. Il va procéder dans le même ordre +que pour la terminaison d'un Pod (de l'ordinal le plus grand au plus petit), mettant à jour chaque Pod, +un seul à la fois. Il va attendre qu'un Pod mis à jour soit Running et Ready avant de mettre à jour +son prédécesseur. + +#### Partitions + +La stratégie de mise à jour `RollingUpdate` peut être partitionnée, en spécifiant une +`.spec.updateStrategy.rollingUpdate.partition`. Si une partition est spécifiée, tous les Pods ayant un +ordinal plus grand ou égal à la partition seront mis à jour lorsque le +`.spec.template` du StatefulSet sera mis à jour. Tous les Pods ayant un ordinal inférieur à la partition +ne sera pas mis à jour, et, même s'ils sont supprimés, ils seront recréés avec l'ancienne version. Si une +`.spec.updateStrategy.rollingUpdate.partition` d'un StatefulSet est plus grand que son `.spec.replicas`, +les mises à jour de son `.spec.template` ne seront pas propagés à ses Pods. +Dans la plupart des cas vous n'aurez pas à utiliser de partition, mais elles sont utiles si vous désirez +organiser une mise à jour, déployer une version canari, ou effectuer un déploiement par étapes. + +#### Rollback forcé + +En utilisant des [Rolling Updates](#rolling-updates) avec la +[politique de gestion d'un Pod](#politiques-de-gestion-dun-pod) par défaut (`OrderedReady`), +il est possible de se retrouver dans un état inconsistant nécessitant une intervention manuelle pour réparation. + +Si vous mettez à jour le template de Pod dans une configuration qui ne devient jamais Running et +Ready (par exemple, du fait d'un mauvais binaire ou d'une erreur de configuration au niveau de l'application), +le StatefulSet va arrêter le rollout et attendre. + +Dans cet état, il n'est pas suffisant de revenir à une bonne configuration du template de Pod. +En raison d'une [erreur connue](https://github.com/kubernetes/kubernetes/issues/67250), +le StatefulSet va continuer à attendre que le Pod en échec Pod devienne Ready +(ce qui n'arrive jamais) avant qu'il tente de revenir à la bonne configuration. + +Après être revenu au bon template, vous devez aussi supprimer tous les Pods que le StatefulSet +avait déjà essayé de démarrer avec la mauvaise configuration. +Le StatefulSet va alors commencer à recréer les Pods en utilisant le bon template. + +## {{% heading "whatsnext" %}} + +* Suivre un exemple de [déploiement d'une application stateful](/docs/tutorials/stateful-application/basic-stateful-set/). +* Suivre un exemple de [déploiement de Cassandra avec des Stateful Sets](/docs/tutorials/stateful-application/cassandra/). +* Suivre un exemple d'[exécution d'une application stateful redondante](/docs/tasks/run-application/run-replicated-stateful-application/). diff --git a/content/fr/docs/reference/glossary/statefulset.md b/content/fr/docs/reference/glossary/statefulset.md new file mode 100755 index 0000000000000..2acf3370a193d --- /dev/null +++ b/content/fr/docs/reference/glossary/statefulset.md @@ -0,0 +1,22 @@ +--- +title: StatefulSet +id: statefulset +date: 2018-04-12 +full_link: /fr/docs/concepts/workloads/controllers/statefulset/ +short_description: > + Gère le déploiement et la mise à l'échelle d'un ensemble de Pods, avec un stockage durable et des identifiants persistants pour chaque Pod. + +aka: +tags: +- fundamental +- core-object +- workload +- storage +--- + Gère le déploiement et la mise à l'échelle d'un ensemble de {{< glossary_tooltip text="Pods" term_id="pod" >}}, *et fournit des garanties sur l'ordre et l'unicité* de ces Pods. + + + +Comme un {{< glossary_tooltip term_id="deployment" >}}, un StatefulSet gère des Pods qui sont basés sur une même spécification de conteneur. Contrairement à un Deployment, un StatefulSet maintient une identité pour chacun de ces Pods. Ces Pods sont créés à partir de la même spec, mais ne sont pas interchangeables : chacun a un identifiant persistant qu'il garde à travers tous ses re-scheduling. + +Si vous voulez utiliser des volumes de stockage pour fournir de la persistance à votre charge de travail, vous pouvez utiliser un StatefulSet comme partie de la solution. Même si des Pods individuels d'un StatefulSet sont susceptibles d'échouer, les identifiants persistants des Pods rendent plus facile de faire correspondre les volumes existants aux nouveaux Pods remplaçant ceux ayant échoué. diff --git a/content/fr/docs/setup/_index.md b/content/fr/docs/setup/_index.md index 37161dbc0cd20..5424c448a7f5b 100644 --- a/content/fr/docs/setup/_index.md +++ b/content/fr/docs/setup/_index.md @@ -35,7 +35,7 @@ Vous devriez choisir une solution locale si vous souhaitez : * Essayer ou commencer à apprendre Kubernetes * Développer et réaliser des tests sur des clusters locaux -Choisissez une [solution locale] (/fr/docs/setup/pick-right-solution/#solutions-locales). +Choisissez une [solution locale](/fr/docs/setup/pick-right-solution/#solutions-locales). ## Solutions hébergées @@ -49,7 +49,7 @@ Vous devriez choisir une solution hébergée si vous : * N'avez pas d'équipe de Site Reliability Engineering (SRE) dédiée, mais que vous souhaitez une haute disponibilité. * Vous n'avez pas les ressources pour héberger et surveiller vos clusters -Choisissez une [solution hébergée] (/fr/docs/setup/pick-right-solution/#solutions-hebergées). +Choisissez une [solution hébergée](/fr/docs/setup/pick-right-solution/#solutions-hebergées). ## Solutions cloud clés en main @@ -63,7 +63,7 @@ Vous devriez choisir une solution cloud clés en main si vous : * Voulez plus de contrôle sur vos clusters que ne le permettent les solutions hébergées * Voulez réaliser vous même un plus grand nombre d'operations -Choisissez une [solution clé en main] (/fr/docs/setup/pick-right-solution/#solutions-clés-en-main) +Choisissez une [solution clé en main](/fr/docs/setup/pick-right-solution/#solutions-clés-en-main) ## Solutions clés en main sur site @@ -76,7 +76,7 @@ Vous devriez choisir une solution de cloud clé en main sur site si vous : * Disposez d'une équipe SRE dédiée * Avez les ressources pour héberger et surveiller vos clusters -Choisissez une [solution clé en main sur site] (/fr/docs/setup/pick-right-solution/#solutions-on-premises-clés-en-main). +Choisissez une [solution clé en main sur site](/fr/docs/setup/pick-right-solution/#solutions-on-premises-clés-en-main). ## Solutions personnalisées @@ -84,11 +84,11 @@ Les solutions personnalisées vous offrent le maximum de liberté sur vos cluste d'expertise. Ces solutions vont du bare-metal aux fournisseurs de cloud sur différents systèmes d'exploitation. -Choisissez une [solution personnalisée] (/fr/docs/setup/pick-right-solution/#solutions-personnalisées). +Choisissez une [solution personnalisée](/fr/docs/setup/pick-right-solution/#solutions-personnalisées). ## {{% heading "whatsnext" %}} -Allez à [Choisir la bonne solution] (/fr/docs/setup/pick-right-solution/) pour une liste complète de solutions. +Allez à [Choisir la bonne solution](/fr/docs/setup/pick-right-solution/) pour une liste complète de solutions. diff --git a/content/fr/docs/setup/learning-environment/minikube.md b/content/fr/docs/setup/learning-environment/minikube.md index 77be61831fc14..2ab0b3ae5ae99 100644 --- a/content/fr/docs/setup/learning-environment/minikube.md +++ b/content/fr/docs/setup/learning-environment/minikube.md @@ -48,10 +48,10 @@ Suivez les étapes ci-dessous pour commencer et explorer Minikube. Starting local Kubernetes cluster... ``` - Pour plus d'informations sur le démarrage de votre cluster avec une version spécifique de Kubernetes, une machine virtuelle ou un environnement de conteneur, voir [Démarrage d'un cluster].(#starting-a-cluster). + Pour plus d'informations sur le démarrage de votre cluster avec une version spécifique de Kubernetes, une machine virtuelle ou un environnement de conteneur, voir [Démarrage d'un cluster](#starting-a-cluster). 2. Vous pouvez maintenant interagir avec votre cluster à l'aide de kubectl. - Pour plus d'informations, voir [Interagir avec votre cluster.](#interacting-with-your-cluster). + Pour plus d'informations, voir [Interagir avec votre cluster](#interacting-with-your-cluster). Créons un déploiement Kubernetes en utilisant une image existante nommée `echoserver`, qui est un serveur HTTP, et exposez-la sur le port 8080 à l’aide de `--port`. @@ -529,5 +529,3 @@ Les contributions, questions et commentaires sont les bienvenus et sont encourag Les développeurs de minikube sont dans le canal #minikube du [Slack](https://kubernetes.slack.com) de Kubernetes (recevoir une invitation [ici](http://slack.kubernetes.io/)). Nous avons également la liste de diffusion [kubernetes-dev Google Groupes](https://groups.google.com/forum/#!forum/kubernetes-dev). Si vous publiez sur la liste, veuillez préfixer votre sujet avec "minikube:". - - diff --git a/content/fr/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/fr/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index 746602d8b4f7c..34ae40d7cc4ae 100644 --- a/content/fr/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/fr/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -81,7 +81,7 @@ metadata: name: 1.13-sample scheduler: extraArgs: - address: 0.0.0.0 + bind-address: 0.0.0.0 config: /home/johndoe/schedconfig.yaml kubeconfig: /home/johndoe/kubeconfig.yaml ``` diff --git a/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index bcf325df432b4..06c6d2f53c1d0 100644 --- a/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -40,7 +40,7 @@ Pour plus d'informations sur la création d'un cluster avec kubeadm, une fois qu ## Vérifiez que les adresses MAC et product_uuid sont uniques pour chaque nœud {#verify-mac-address} * Vous pouvez obtenir l'adresse MAC des interfaces réseau en utilisant la commande `ip link` ou` ifconfig -a` -* Le product_uuid peut être vérifié en utilisant la commande `sudo cat/sys/class/dmi/id/product_uuid` +* Le product_uuid peut être vérifié en utilisant la commande `sudo cat /sys/class/dmi/id/product_uuid` Il est très probable que les périphériques matériels aient des adresses uniques, bien que certaines machines virtuelles puissent avoir des valeurs identiques. Kubernetes utilise ces valeurs pour identifier de manière unique les nœuds du cluster. diff --git a/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md b/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md index 0845cf2806c34..284024b425a07 100644 --- a/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -114,7 +114,7 @@ cpu-demo 974m Souvenez-vous qu'en réglant `-cpu "2"`, vous avez configuré le conteneur pour faire en sorte qu'il utilise 2 CPU, mais que le conteneur ne peut utiliser qu'environ 1 CPU. L'utilisation du CPU du conteneur est entravée, car le conteneur tente d'utiliser plus de ressources CPU que sa limite. {{< note >}} -Une autre explication possible de la la restriction du CPU est que le Nœud pourrait ne pas avoir +Une autre explication possible de la restriction du CPU est que le Nœud pourrait ne pas avoir suffisamment de ressources CPU disponibles. Rappelons que les conditions préalables à cet exercice exigent que chacun de vos Nœuds doit avoir au moins 1 CPU. Si votre conteneur fonctionne sur un nœud qui n'a qu'un seul CPU, le conteneur ne peut pas utiliser plus que 1 CPU, quelle que soit la limite de CPU spécifiée pour le conteneur. {{< /note >}} diff --git a/content/fr/docs/tasks/tools/install-minikube.md b/content/fr/docs/tasks/tools/install-minikube.md index b079eba6ec666..745f1e4c227ba 100644 --- a/content/fr/docs/tasks/tools/install-minikube.md +++ b/content/fr/docs/tasks/tools/install-minikube.md @@ -84,7 +84,7 @@ Vous pouvez télécharger les packages `.deb` depuis [Docker](https://www.docker {{< caution >}} Le pilote VM `none` peut entraîner des problèmes de sécurité et de perte de données. -Avant d'utiliser `--driver=none`, consultez [cette documentation] (https://minikube.sigs.k8s.io/docs/reference/drivers/none/) pour plus d'informations. +Avant d'utiliser `--driver=none`, consultez [cette documentation](https://minikube.sigs.k8s.io/docs/reference/drivers/none/) pour plus d'informations. {{}} Minikube prend également en charge un `vm-driver=podman` similaire au pilote Docker. Podman est exécuté en tant que superutilisateur (utilisateur root), c'est le meilleur moyen de garantir que vos conteneurs ont un accès complet à toutes les fonctionnalités disponibles sur votre système. diff --git a/content/id/docs/concepts/architecture/controller.md b/content/id/docs/concepts/architecture/controller.md index 6cf90cf9e6189..096dd75085543 100644 --- a/content/id/docs/concepts/architecture/controller.md +++ b/content/id/docs/concepts/architecture/controller.md @@ -60,7 +60,7 @@ Job adalah sumber daya dalam Kubernetes yang menjalankan a {{< glossary_tooltip term_id="pod" >}}, atau mungkin beberapa Pod sekaligus, untuk melakukan sebuah pekerjaan dan kemudian berhenti. -(Setelah [dijadwalkan](../../../../en/docs/concepts/scheduling/), objek Pod +(Setelah [dijadwalkan](../../../../en/docs/concepts/scheduling-eviction/), objek Pod akan menjadi bagian dari keadaan yang diinginkan oleh kubelet). Ketika _controller job_ melihat tugas baru, maka _controller_ itu memastikan bahwa, @@ -102,9 +102,8 @@ yang diinginkannya melalui server API, dan kemudian berkomunikasi langsung dengan sistem eksternal untuk membawa keadaan saat ini mendekat keadaan yang diinginkan. -(Sebenarnya ada sebuah _controller_ yang melakukan penskalaan node secara -horizontal dalam klaster kamu. Silahkan lihat -[_autoscaling_ klaster](/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaling)). +(Sebenarnya ada sebuah [_controller_](https://github.com/kubernetes/autoscaler/) yang melakukan penskalaan node secara +horizontal dalam klaster kamu. ## Status sekarang berbanding status yang diinginkan {#sekarang-banding-diinginkan} diff --git a/content/id/docs/concepts/architecture/nodes.md b/content/id/docs/concepts/architecture/nodes.md index ab13cf122a5b6..685d54ddddb8f 100644 --- a/content/id/docs/concepts/architecture/nodes.md +++ b/content/id/docs/concepts/architecture/nodes.md @@ -134,6 +134,7 @@ Jika sudah tidak tersedia, kontroler node menghilangkan node tersebut dari dafta Ketiga, melakukan monitor terhadap kondisi kesehatan (health) node. Kontroler node bertanggung jawab untuk mengubah status `NodeReady condition` pada `NodeStatus` menjadi `ConditionUnknown`, ketika sebuah node terputus jaringannya (kontroler node tidak lagi mendapat heartbeat karena suatu hal, contohnya karena node tidak hidup), dan saat kemudian melakukan eviction terhadap semua pod yang ada pada node tersebut (melalui terminasi halus -- graceful) jika node masih terus terputus. (Timeout standar adalah 40 detik untuk mulai melaporkan `ConditionUnknown` dan 5 menit setelah itu untuk mulai melakukan eviction terhadap pod.) + Kontroler node memeriksa state masing-masing node untuk durasi yang ditentukan oleh argumen `--node-monitor-period`. Pada versi Kubernetes sebelum 1.13, `NodeStatus` adalah heartbeat yang diberikan oleh node. @@ -154,6 +155,7 @@ Perlakuan eviction pada node berubah ketika sebuah node menjadi tidak seh Kontroler node memeriksa berapa persentase node di dalam zona tersebut yang tidak sehat (saat `NodeReady condition` menjadi `ConditionUnknown` atau `ConditionFalse`) pada saat yang bersamaan. Jika persentase node yang tidak sehat bernilai `--unhealthy-zone-threshold` (default-nya 0.55), maka rate eviction berkurang: untuk ukuran klaster yang kecil (saat jumlahnya lebih kecil atau sama dengan jumlah node `--large-cluster-size-threshold` - default-nya 50), maka eviction akan berhenti dilakukan. Jika masih besar jumlahnya, rate eviction dikurangi menjadi `--secondary-node-eviction-rate` (default-nya 0.01) per detik. + Alasan kenapa hal ini diimplementasi untuk setiap zona availability adalah karena satu zona bisa saja terputus dari master, saat yang lainnya masih terhubung. Jika klaster tidak menjangkau banyak zona availability yang disediakan oleh penyedia cloud, maka hanya ada satu zona (untuk semua node di dalam klaster). @@ -164,7 +166,7 @@ Pada kasus ini, kontroler node berasumsi ada masalah pada jaringan master, dan m Mulai dari Kubernetes 1.6, kontroler node juga bertanggung jawab untuk melakukan eviction pada pod-pod yang berjalan di atas node dengan taints `NoExecute`, ketika pod-pod tersebut sudah tidak lagi tolerate terhadap taints. Sebagai tambahan, hal ini di-nonaktifkan secara default pada fitur alpha, kontroler node bertanggung jawab untuk menambahkan taints yang berhubungan dengan masalah pada node, seperti terputus atau `NotReady`. -Lihat [dokumentasi ini](/id/docs/concepts/configuration/taint-and-toleration/) untuk bahasan detail tentang taints `NoExecute` dan fitur alpha. +Lihat [dokumentasi ini](/id/docs/concepts/scheduling-eviction/taint-and-toleration/) untuk bahasan detail tentang taints `NoExecute` dan fitur alpha. Mulai dari versi 1.8, kontroler node bisa diatur untuk bertanggung jawab pada pembuatan taints yang merepresentasikan node condition. Ini merupakan fitur alpha untuk versi 1.8. diff --git a/content/id/docs/concepts/cluster-administration/cloud-providers.md b/content/id/docs/concepts/cluster-administration/cloud-providers.md index 9a32af1eb89f8..4eb6a474b8dc0 100644 --- a/content/id/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/id/docs/concepts/cluster-administration/cloud-providers.md @@ -11,7 +11,7 @@ Laman ini akan menjelaskan bagaimana cara mengelola Kubernetes yang berjalan pad ### Kubeadm -[Kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) merupakan salah satu cara yang banyak digunakan untuk membuat klaster Kubernetes. +[Kubeadm](/docs/reference/setup-tools/kubeadm/) merupakan salah satu cara yang banyak digunakan untuk membuat klaster Kubernetes. Kubeadm memiliki beragam opsi untuk mengatur konfigurasi spesifik untuk penyedia layanan cloud. Salah satu contoh yang biasa digunakan pada penyedia cloud *in-tree* yang dapat diatur dengan kubeadm adalah sebagai berikut: ```yaml @@ -347,4 +347,4 @@ Penyedia layanan IBM Cloud Kubernetes Service memanfaatkan Kubernetes-native *pe ### Nama Node Penyedia layanan cloud Baidu menggunakan alamat IP privat dari *node* (yang ditentukan oleh kubelet atau menggunakan `--hostname-override`) sebagai nama dari objek Kubernetes Node. -Perlu diperhatikan bahwa nama Kubernetes Node harus sesuai dengan alamat IP privat dari Baidu VM. \ No newline at end of file +Perlu diperhatikan bahwa nama Kubernetes Node harus sesuai dengan alamat IP privat dari Baidu VM. diff --git a/content/id/docs/concepts/cluster-administration/monitoring.md b/content/id/docs/concepts/cluster-administration/monitoring.md deleted file mode 100644 index f4917496a98a3..0000000000000 --- a/content/id/docs/concepts/cluster-administration/monitoring.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: Metrik-Metrik untuk Control Plane Kubernetes -content_type: concept -weight: 60 -aliases: -- controller-metrics.md ---- - - - -Metrik dari komponen sistem dapat memberikan pandangan yang lebih baik tentang apa -yang sedang terjadi di dalam sistem. Metrik sangat berguna untuk membuat dasbor (_dashboard_) -dan peringatan (_alert_). - -Metrik di dalam _control plane_ Kubernetes disajikan dalam [format prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) -dan dapat terbaca oleh manusia. - - - - - -## Metrik-Metrik pada Kubernetes - -Dalam kebanyakan kasus, metrik tersedia pada _endpoint_ `/metrics` dari server HTTP. -Untuk komponen yang tidak mengekspos _endpoint_ secara bawaan, _endpoint_ tersebut dapat diaktifkan -dengan menggunakan opsi `--bind-address`. - -Contoh-contoh untuk komponen tersebut adalah: - -* {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}} -* {{< glossary_tooltip term_id="kube-proxy" text="kube-proxy" >}} -* {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} -* {{< glossary_tooltip term_id="kube-scheduler" text="kube-scheduler" >}} -* {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} - -Di dalam lingkungan produksi, kamu mungkin ingin mengonfigurasi [Server Prometheus](https://prometheus.io/) -atau _scraper_ metrik (pengambil metrik) lainnya untuk mengumpulkan metrik-metrik ini secara berkala -dan membuatnya tersedia dalam semacam basis data yang _time series_. - -Perlu dicatat bahwa {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} -juga mengekspos metrik pada _endpoint-endpoint_ seperti `/metrics/cadvisor`, -`/metrics/resource` dan `/metrics/probes`. Metrik-metrik tersebut tidak memiliki -siklus hidup yang sama. - -Jika klaster kamu menggunakan {{< glossary_tooltip term_id="rbac" text="RBAC" >}}, -untuk membaca metrik memerlukan otorisasi melalui sebuah User, Group, atau -ServiceAccount dengan ClusterRole yang memperbolehkan mengakses `/metrics`. - -Sebagai contoh: - -``` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: prometheus -rules: - - nonResourceURLs: - - "/metrics" - verbs: - - get -``` - -## Siklus Hidup (_Lifecycle_) Metrik - -Alpha metric → Stable metric → Deprecated metric → Hidden metric → Deletion - -Metrik-metrik _Alpha_ tidak memiliki jaminan stabilitas; dengan demikian mereka -dapat dimodifikasi atau dihapus kapan saja. - -Metrik-metrik _Stable_ dijamin tidak berubah (dijamin stabilitasnya); Secara khusus, stabilitas berarti: - -* metrik itu sendiri tidak akan dihapus (atau diganti namanya) -* jenis metrik tidak akan dimodifikasi - -Metrik _Deprecated_ memberi penanda bahwa metrik tersebut suatu saat akan dihapus; untuk -menemukan versi yang mana, kamu perlu memeriksa anotasi, yang mencakup dari versi -Kubernetes mana yang metrik tersebut akan dianggap _deprecated_. - -Sebelum _deprecation_: - -``` -# HELP some_counter this counts things -# TYPE some_counter counter -some_counter 0 -``` - -Sesudah _deprecation_: - -``` -# HELP some_counter (Deprecated since 1.15.0) this counts things -# TYPE some_counter counter -some_counter 0 -``` - -Setelah metrik disembunyikan maka secara bawaan metrik tidak dipublikasikan -untuk _scraping_ (pengambilan metrik). Untuk menggunakan metrik yang tersembunyi, kamu perlu mengganti (_override_) -konfigurasi untuk komponen klaster yang relevan. - -Setelah metrik dihapus, metrik tidak dipublikasikan. Kamu tidak dapat mengubah -metrik tersebut dengan menggunakan _override_. - -## Melihat Metrik tersembunyi - -Seperti dijelaskan di atas, para admin dapat mengaktifkan metrik tersembunyi -melalui opsi pada baris perintah (_command line_) untuk _binary_ (program) tertentu. Ini ditujukan untuk -digunakan sebagai solusi bagi para admin apabila mereka gagal memigrasi -metrik yang sudah _deprecated_ dalam rilis terakhir. - -Opsi `show-hidden-metrics-for-version` menunjukkan versi yang ingin kamu tampilkan -metrik yang sudah _deprecated_ dalam rilis tersebut. Versi ini ditampilkan dalam bentuk x.y, -di mana x adalah versi _major_, y ​​adalah versi minor. Versi _patch_ tidak -diperlukan meskipun metrik dapat di_-deprecate_ dalam rilis _patch_, hal ini -adalah karena kebijakan _deprecation_ untuk metrik hanya berlaku terhadap rilis minor. - -Opsi tersebut hanya dapat menggunakan versi minor sebelumnya sebagai parameternya. Semua -metrik yang disembunyikan di versi sebelumnya akan dikeluarkan jika para admin -mengatur versi sebelumnya ke `show-hidden-metrics-for-version`. Versi yang -terlalu lama tidak diizinkan karena hal ini melanggar kebijakan untuk metrik yang -sudah _deprecated_. - -Ambil metrik `A` sebagai contoh, di sini diasumsikan bahwa `A` sudah _deprecated_ -pada rilis 1.n. Menurut kebijakan metrik yang sudah _deprecated_, kita dapat mencapai kesimpulan -sebagai berikut: - -* Pada rilis `1.n`, metrik sudah di_-deprecated_, dan dapat diperlihatkan secara bawaan. -* Pada rilis `1.n + 1`, metrik disembunyikan secara bawaan dan dapat - diperlihatkan dengan baris perintah `show-hidden-metrics-for-version=1.n`. -* Pada rilis `1.n + 2`, metrik harus dihapus dari _codebase_. Tidak ada jalan - keluar lagi. - -Jika kamu meng-_upgrade_ dari rilis `1.12` ke` 1.13`, tetapi masih bergantung pada -metrik `A` yang di-_deprecate_ dalam` 1.12`, kamu harus mengatur metrik -tersembunyi melalui baris perintah: `--show-hidden-metrics=1.12` dan ingatlah -untuk menghapus ketergantungan terhadap metrik ini sebelum meng-_upgrade_ ke `1.14`. - -## Metrik komponen - -### Metrik kube-controller-manager - -Metrik Controller Manager memberikan pandangan penting -tentang kinerja dan kesehatan Controller Manager. Metrik ini mencakup metrik -_runtime_ berbahasa Go yang umum seperti jumlah _go_routine_ dan metrik khusus -pengontrol seperti latensi _request etcd_ atau latensi API dari Cloud provider -(AWS, GCE, OpenStack) yang dapat digunakan untuk mengukur kesehatan klaster. - -Mulai dari Kubernetes 1.7, metrik Cloud provider yang detail tersedia untuk -operasi penyimpanan untuk GCE, AWS, Vsphere, dan OpenStack. -Metrik ini dapat digunakan untuk memantau kesehatan operasi PersistentVolume. - -Misalnya, untuk GCE metrik tersebut adalah: - -``` -cloudprovider_gce_api_request_duration_seconds { request = "instance_list"} -cloudprovider_gce_api_request_duration_seconds { request = "disk_insert"} -cloudprovider_gce_api_request_duration_seconds { request = "disk_delete"} -cloudprovider_gce_api_request_duration_seconds { request = "attach_disk"} -cloudprovider_gce_api_request_duration_seconds { request = "detach_disk"} -cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} -``` - - - -## {{% heading "whatsnext" %}} - -* Baca tentang [format teks Prometheus](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) untuk berbagai metrik -* Lihat daftar [metrik Kubernetes yang _stable_](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) -* Baca tentang [kebijakan _deprecation_ Kubernetes](https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior ) - diff --git a/content/id/docs/concepts/cluster-administration/system-metrics.md b/content/id/docs/concepts/cluster-administration/system-metrics.md new file mode 100644 index 0000000000000..f731ccfd7767e --- /dev/null +++ b/content/id/docs/concepts/cluster-administration/system-metrics.md @@ -0,0 +1,166 @@ +--- +title: Metrik untuk Komponen Sistem Kubernetes +content_type: concept +weight: 60 +--- + + + +Metrik dari komponen sistem dapat memberikan gambaran yang lebih baik tentang apa +yang sedang terjadi di dalam sistem. Metrik sangat berguna untuk membuat dasbor (_dashboard_) +dan peringatan (_alert_). + +Komponen Kubernetes mengekspos metrik dalam [format Prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/). +Format ini berupa teks biasa yang terstruktur, dirancang agar orang dan mesin dapat membacanya. + + + +## Metrik-metrik dalam Kubernetes + +Dalam kebanyakan kasus, metrik tersedia pada _endpoint_ `/metrics` dari server HTTP. +Untuk komponen yang tidak mengekspos _endpoint_ secara bawaan, _endpoint_ tersebut dapat diaktifkan +dengan menggunakan opsi `--bind-address`. + +Contoh-contoh untuk komponen tersebut adalah: + +* {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}} +* {{< glossary_tooltip term_id="kube-proxy" text="kube-proxy" >}} +* {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} +* {{< glossary_tooltip term_id="kube-scheduler" text="kube-scheduler" >}} +* {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} + +Di dalam lingkungan produksi, kamu mungkin ingin mengonfigurasi [Server Prometheus](https://prometheus.io/) +atau pengambil metrik (_metrics scraper_) lainnya untuk mengumpulkan metrik-metrik ini secara berkala +dan membuatnya tersedia dalam semacam pangkalan data deret waktu (_time series database_). + +Perlu dicatat bahwa {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} +juga mengekspos metrik pada _endpoint-endpoint_ seperti `/metrics/cadvisor`, +`/metrics/resource` dan `/metrics/probes`. Metrik-metrik tersebut tidak memiliki +siklus hidup yang sama. + +Jika klastermu menggunakan {{< glossary_tooltip term_id="rbac" text="RBAC" >}}, +maka membaca metrik memerlukan otorisasi melalui _user_, _group_, atau +ServiceAccount dengan ClusterRole yang memperbolehkan untuk mengakses `/metrics`. + +Sebagai contoh: +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: + - nonResourceURLs: + - "/metrics" + verbs: + - get +``` + +## Siklus hidup metrik + +Metrik alfa (_alpha_) → Metrik stabil → Metrik usang (_deprecated_) → Metrik tersembunyi → Metrik terhapus + +Metrik alfa tidak memiliki jaminan stabilitas. Metrik ini +dapat dimodifikasi atau dihapus kapan saja. + +Metrik stabil dijamin tidak akan mengalami perubahan. Hal ini berarti: +* Metrik stabil tanpa penanda usang (_deprecated signature_) tidak akan dihapus ataupun diganti namanya +* Jenis metrik stabil tidak akan dimodifikasi + +Metrik usang dijadwalkan untuk dihapus, tetapi masih tersedia untuk digunakan. +Metrik ini mencakup anotasi versi di mana metrik ini dianggap menjadi usang. + +Sebagai contoh: + +* Sebelum menjadi usang + + ``` + # HELP some_counter this counts things + # TYPE some_counter counter + some_counter 0 + ``` + +* Setelah menjadi usang + + ``` + # HELP some_counter (Deprecated since 1.15.0) this counts things + # TYPE some_counter counter + some_counter 0 + ``` + +Metrik tersembunyi tidak lagi dipublikasikan untuk pengambilan metrik (_scraping_), tetapi masih tersedia untuk digunakan. Untuk menggunakan metrik tersembunyi, lihat bagian [Menampilkan metrik tersembunyi](#menampilkan-metrik-tersembunyi). + +Metrik yang terhapus tidak lagi dipublikasikan dan tidak dapat digunakan lagi. + +## Menampilkan metrik tersembunyi + +Seperti yang dijelaskan di atas, admin dapat mengaktifkan metrik tersembunyi melalui opsi baris perintah pada biner (program) tertentu. Ini dimaksudkan untuk digunakan sebagai jalan keluar bagi admin jika mereka melewatkan migrasi metrik usang dalam rilis terakhir. + +Opsi `show-hidden-metrics-for-version` menerima input versi yang kamu inginkan untuk menampilkan metrik usang dalam rilis tersebut. Versi tersebut dinyatakan sebagai x.y, di mana x adalah versi mayor, y adalah versi minor. Versi _patch_ tidak diperlukan meskipun metrik dapat menjadi usang dalam rilis _patch_, alasannya adalah kebijakan penandaan metrik usang dijalankan terhadap rilis minor. + +Opsi tersebut hanya dapat menerima input versi minor sebelumnya sebagai nilai. Semua metrik yang disembunyikan di versi sebelumnya akan dikeluarkan jika admin mengatur versi sebelumnya ke `show-hidden-metrics-for-version`. Versi yang terlalu lama tidak diperbolehkan karena melanggar kebijakan untuk metrik usang. + +Ambil metrik `A` sebagai contoh, di sini diasumsikan bahwa` A` sudah menjadi usang di versi 1.n. Berdasarkan kebijakan metrik usang, kita dapat mencapai kesimpulan berikut: + +* Pada rilis `1.n`, metrik menjadi usang, dan dapat dikeluarkan secara bawaan. +* Pada rilis `1.n+1`, metrik disembunyikan secara bawaan dan dapat dikeluarkan dengan baris perintah `show-hidden-metrics-for-version=1.n`. +* Pada rilis `1.n+2`, metrik harus dihapus dari _codebase_. Tidak ada jalan keluar lagi. + +Jika kamu meningkatkan versi dari rilis `1.12` ke `1.13`, tetapi masih bergantung pada metrik `A` yang usang di `1.12`, kamu harus mengatur metrik tersembunyi melalui baris perintah: `--show-hidden-metrics = 1.12` dan ingatlah untuk menghapus ketergantungan terhadap metrik ini sebelum meningkatkan versi rilis ke `1.14`. + +## Menonaktifkan metrik akselerator + +kubelet mengumpulkan metrik akselerator melalui cAdvisor. Untuk mengumpulkan metrik ini, untuk akselerator seperti GPU NVIDIA, kubelet membuka koneksi dengan _driver_ GPU. Ini berarti untuk melakukan perubahan infrastruktur (misalnya, pemutakhiran _driver_), administrator klaster perlu menghentikan agen kubelet. + +Pengumpulkan metrik akselerator sekarang menjadi tanggung jawab vendor dibandingkan kubelet. Vendor harus menyediakan sebuah kontainer untuk mengumpulkan metrik dan mengeksposnya ke layanan metrik (misalnya, Prometheus). + +[Gerbang fitur `DisableAcceleratorUsageMetrics`](/docs/reference/command-line-tools-reference/feature-gates/) menonaktifkan metrik yang dikumpulkan oleh kubelet, dengan [lini masa (_timeline_) untuk mengaktifkan fitur ini secara bawaan](https://github.com/kubernetes/enhancements/tree/411e51027db842355bd489691af897afc1a41a5e/keps/sig-node/1867-disable-accelerator-usage-metrics#graduation-criteria). + +## Metrik komponen + +### Metrik kube-controller-manager + +Metrik _controller manager_ memberikan gambaran penting +tentang kinerja dan kesehatan _controller manager_. Metrik ini mencakup metrik +_runtime_ bahasa Go yang umum seperti jumlah go_routine dan metrik khusus +pengontrol seperti latensi permintaan etcd atau latensi API Cloudprovider +(AWS, GCE, OpenStack) yang dapat digunakan untuk mengukur kesehatan klaster. + +Mulai dari Kubernetes 1.7, metrik Cloudprovider yang detail tersedia untuk +operasi penyimpanan untuk GCE, AWS, Vsphere, dan OpenStack. +Metrik ini dapat digunakan untuk memantau kesehatan operasi _persistent volume_. + +Misalnya, untuk GCE metrik-metrik berikut ini dipanggil: + +``` +cloudprovider_gce_api_request_duration_seconds { request = "instance_list"} +cloudprovider_gce_api_request_duration_seconds { request = "disk_insert"} +cloudprovider_gce_api_request_duration_seconds { request = "disk_delete"} +cloudprovider_gce_api_request_duration_seconds { request = "attach_disk"} +cloudprovider_gce_api_request_duration_seconds { request = "detach_disk"} +cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} +``` + +### Metrik kube-scheduler + +{{< feature-state for_k8s_version="v1.20" state="alpha" >}} + +Penjadwal mengekspos metrik opsional yang melaporkan sumber daya yang diminta dan limit yang diinginkan dari semua pod yang berjalan. Metrik ini dapat digunakan untuk membangun dasbor perencanaan kapasitas, mengevaluasi limit penjadwalan yang digunakan saat ini atau secara historis, dengan cepat mengidentifikasi beban kerja yang tidak dapat dijadwalkan karena kurangnya sumber daya, dan membandingkan permintaan sumber daya oleh pod dengan penggunaannya yang aktual. + +kube-scheduler mengidentifikasi [permintaan dan limit](/docs/concepts/configuration/manage-resources-containers/) sumber daya yang dikonfigurasi untuk setiap Pod; jika permintaan atau limit bukan nol, kube-scheduler akan melaporkan deret waktu (_timeseries_) metrik. Deret waktu diberi label dengan: +- namespace +- nama pod +- node di mana pod dijadwalkan atau _string_ kosong jika belum dijadwalkan +- prioritas +- penjadwal yang ditugaskan untuk pod itu +- nama dari sumber daya (misalnya, `cpu`) +- satuan dari sumber daya jika diketahui (misalnya, `cores`) + +Setelah pod selesai (memiliki `restartPolicy` `Never` atau `OnFailure` dan berada dalam fase pod `Succeeded` atau `Failed`, atau telah dihapus dan semua kontainer dalam keadaan Terminated) deret metrik tidak lagi dilaporkan karena penjadwal sekarang sudah dibebaskan untuk menjadwalkan pod lain untuk dijalankan. Metrik yang dibahas pada bagian ini dikenal sebagai `kube_pod_resource_request` dan` kube_pod_resource_limit`. + +Metrik diekspos melalui _endpoint_ HTTP `/metrics/resources` dan memerlukan otorisasi yang sama seperti endpoint `/metrics` +pada penjadwal. Kamu harus menggunakan opsi `--show-hidden-metrics-for-version=1.20` untuk mengekspos metrik-metrik stabilitas alfa ini. + +## {{% heading "whatsnext" %}} + +* Baca tentang [format teks Prometheus](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) untuk berbagai metrik +* Baca tentang [kebijakan _deprecation_ Kubernetes](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior) diff --git a/content/id/docs/concepts/containers/runtime-class.md b/content/id/docs/concepts/containers/runtime-class.md index 73252a03e4af3..539fbb7038cfd 100644 --- a/content/id/docs/concepts/containers/runtime-class.md +++ b/content/id/docs/concepts/containers/runtime-class.md @@ -45,7 +45,7 @@ soal bagaimana melakukan konfigurasi untuk implementasi CRI yang kamu miliki. Untuk saat ini, RuntimeClass berasumsi bahwa semua _node_ di dalam klaster punya konfigurasi yang sama (homogen). Jika ada _node_ yang punya konfigurasi berbeda dari yang lain (heterogen), maka perbedaan ini harus diatur secara independen di luar RuntimeClass -melalui fitur _scheduling_ (lihat [Menempatkan Pod pada Node](/id/docs/concepts/configuration/assign-pod-node/)). +melalui fitur _scheduling_ (lihat [Menempatkan Pod pada Node](/id/docs/concepts/scheduling-eviction/assign-pod-node/)). {{< /note >}} Seluruh konfigurasi memiliki nama `handler` yang terkait, dijadikan referensi oleh RuntimeClass. diff --git a/content/id/docs/concepts/overview/working-with-objects/labels.md b/content/id/docs/concepts/overview/working-with-objects/labels.md index 306edc0bfbdf3..7b4b125062120 100644 --- a/content/id/docs/concepts/overview/working-with-objects/labels.md +++ b/content/id/docs/concepts/overview/working-with-objects/labels.md @@ -220,6 +220,6 @@ selector: #### Memilih kumpulan Node Salah satu contoh penggunaan pemilihan dengan menggunakan label yaitu untuk membatasi suatu kumpulan Node tertentu yang dapat digunakan oleh Pod. -Lihat dokumentasi pada [pemilihan Node](/id/docs/concepts/configuration/assign-pod-node/) untuk informasi lebih lanjut. +Lihat dokumentasi pada [pemilihan Node](/id/docs/concepts/scheduling-eviction/assign-pod-node/) untuk informasi lebih lanjut. diff --git a/content/id/docs/concepts/scheduling-eviction/_index.md b/content/id/docs/concepts/scheduling-eviction/_index.md new file mode 100644 index 0000000000000..0d080ed79dd4f --- /dev/null +++ b/content/id/docs/concepts/scheduling-eviction/_index.md @@ -0,0 +1,5 @@ +--- +title: "Penjadwalan dan Pengusiran" +weight: 90 +--- + diff --git a/content/id/docs/concepts/configuration/assign-pod-node.md b/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md similarity index 98% rename from content/id/docs/concepts/configuration/assign-pod-node.md rename to content/id/docs/concepts/scheduling-eviction/assign-pod-node.md index ee9e8bf2f4a88..f8654f42769fb 100644 --- a/content/id/docs/concepts/configuration/assign-pod-node.md +++ b/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -114,7 +114,7 @@ Berikut ini contoh dari pod yang menggunakan afinitas node: Aturan afinitas node tersebut menyatakan pod hanya bisa ditugaskan pada node dengan label yang memiliki kunci `kubernetes.io/e2e-az-name` dan bernilai `e2e-az1` atau `e2e-az2`. Selain itu, dari semua node yang memenuhi kriteria tersebut, mode dengan label dengan kunci `another-node-label-key` and bernilai `another-node-label-value` harus lebih diutamakan. -Kamu dapat meilhat operator `In` digunakan dalam contoh berikut. Sitaksis afinitas node yang baru mendukung operator-operator berikut: `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt`. Kamu dapat menggunakan `NotIn` dan `DoesNotExist` untuk mewujudkan perilaku node anti-afinitas, atau menggunakan [node taints](/id/docs/concepts/configuration/taint-and-toleration/) untuk menolak pod dari node tertentu. +Kamu dapat meilhat operator `In` digunakan dalam contoh berikut. Sitaksis afinitas node yang baru mendukung operator-operator berikut: `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt`. Kamu dapat menggunakan `NotIn` dan `DoesNotExist` untuk mewujudkan perilaku node anti-afinitas, atau menggunakan [node taints](/id/docs/concepts/scheduling-eviction/taint-and-toleration/) untuk menolak pod dari node tertentu. Jika kamu menyatakan `nodeSelector` dan `nodeAffinity`. *keduanya* harus dipenuhi agar pod dapat dijadwalkan pada node kandidat. @@ -284,7 +284,7 @@ Lihat [tutorial ZooKeeper](/docs/tutorials/stateful-application/zookeeper/#toler Untuk informasi lebih lanjut tentang afinitas/anti-afinitas antar pod, lihat [design doc](https://git.k8s.io/community/contributors/design-proposals/scheduling/podaffinity.md). -Kamu juga dapat mengecek [Taints](/id/docs/concepts/configuration/taint-and-toleration/), yang memungkinkan sebuah *node* untuk *menolak* sekumpulan pod. +Kamu juga dapat mengecek [Taints](/id/docs/concepts/scheduling-eviction/taint-and-toleration/), yang memungkinkan sebuah *node* untuk *menolak* sekumpulan pod. ## nodeName diff --git a/content/id/docs/concepts/scheduling/kube-scheduler.md b/content/id/docs/concepts/scheduling-eviction/kube-scheduler.md similarity index 99% rename from content/id/docs/concepts/scheduling/kube-scheduler.md rename to content/id/docs/concepts/scheduling-eviction/kube-scheduler.md index 6f7efab3d9773..f55b1d942d716 100644 --- a/content/id/docs/concepts/scheduling/kube-scheduler.md +++ b/content/id/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -94,7 +94,7 @@ penilaian oleh penjadwal: ## {{% heading "whatsnext" %}} -* Baca tentang [penyetelan performa penjadwal](/id/docs/concepts/scheduling/scheduler-perf-tuning/) +* Baca tentang [penyetelan performa penjadwal](/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * Baca tentang [pertimbangan penyebarang topologi pod](/id/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Baca [referensi dokumentasi](/docs/reference/command-line-tools-reference/kube-scheduler/) untuk _kube-scheduler_ * Pelajari tentang [mengkonfigurasi beberapa penjadwal](/docs/tasks/administer-cluster/configure-multiple-schedulers/) diff --git a/content/id/docs/concepts/configuration/pod-overhead.md b/content/id/docs/concepts/scheduling-eviction/pod-overhead.md similarity index 100% rename from content/id/docs/concepts/configuration/pod-overhead.md rename to content/id/docs/concepts/scheduling-eviction/pod-overhead.md diff --git a/content/id/docs/concepts/configuration/resource-bin-packing.md b/content/id/docs/concepts/scheduling-eviction/resource-bin-packing.md similarity index 100% rename from content/id/docs/concepts/configuration/resource-bin-packing.md rename to content/id/docs/concepts/scheduling-eviction/resource-bin-packing.md diff --git a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md b/content/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md similarity index 97% rename from content/id/docs/concepts/scheduling/scheduler-perf-tuning.md rename to content/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 3689ecf7cb6f7..3e94be54329a2 100644 --- a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md +++ b/content/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -8,7 +8,7 @@ weight: 70 {{< feature-state for_k8s_version="v1.14" state="beta" >}} -[kube-scheduler](/id/docs/concepts/scheduling/kube-scheduler/#kube-scheduler) +[kube-scheduler](/id/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler) merupakan penjadwal (_scheduler_) Kubernetes bawaan yang bertanggung jawab terhadap penempatan Pod-Pod pada seluruh Node di dalam sebuah klaster. @@ -66,7 +66,7 @@ Kamu bisa mengatur ambang batas untuk menentukan berapa banyak jumlah Node minim persentase bagian dari seluruh Node di dalam klaster kamu. kube-scheduler akan mengubahnya menjadi bilangan bulat berisi jumlah Node. Saat penjadwalan, jika kube-scheduler mengidentifikasi cukup banyak Node-Node layak untuk melewati jumlah persentase yang diatur, maka kube-scheduler -akan berhenti mencari Node-Node layak dan lanjut ke [fase penskoran] (/id/docs/concepts/scheduling/kube-scheduler/#kube-scheduler-implementation). +akan berhenti mencari Node-Node layak dan lanjut ke [fase penskoran] (/id/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler-implementation). [Bagaimana penjadwal mengecek Node](#bagaimana-penjadwal-mengecek-node) menjelaskan proses ini secara detail. diff --git a/content/id/docs/concepts/scheduling/scheduling-framework.md b/content/id/docs/concepts/scheduling-eviction/scheduling-framework.md similarity index 100% rename from content/id/docs/concepts/scheduling/scheduling-framework.md rename to content/id/docs/concepts/scheduling-eviction/scheduling-framework.md diff --git a/content/id/docs/concepts/configuration/taint-and-toleration.md b/content/id/docs/concepts/scheduling-eviction/taint-and-toleration.md similarity index 100% rename from content/id/docs/concepts/configuration/taint-and-toleration.md rename to content/id/docs/concepts/scheduling-eviction/taint-and-toleration.md diff --git a/content/id/docs/concepts/scheduling/_index.md b/content/id/docs/concepts/scheduling/_index.md deleted file mode 100644 index 8903577124f58..0000000000000 --- a/content/id/docs/concepts/scheduling/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Penjadwalan" -weight: 90 ---- - diff --git a/content/id/docs/concepts/services-networking/service.md b/content/id/docs/concepts/services-networking/service.md index 5136a00415e95..3c85c5abbf49a 100644 --- a/content/id/docs/concepts/services-networking/service.md +++ b/content/id/docs/concepts/services-networking/service.md @@ -745,7 +745,7 @@ dan tidak akan menerima trafik apa pun. Untuk menghasilkan distribusi trafik yang merata, kamu dapat menggunakan _DaemonSet_ atau melakukan spesifikasi -[pod anti-affinity](/id/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +[pod anti-affinity](/id/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) agar `Pod` tidak di-_assign_ ke _node_ yang sama. NLB juga dapat digunakan dengan anotasi [internal load balancer](/id/docs/concepts/services-networking/service/#internal-load-balancer). diff --git a/content/id/docs/concepts/storage/storage-classes.md b/content/id/docs/concepts/storage/storage-classes.md index 2897399e80995..c5fc71a8de8a4 100644 --- a/content/id/docs/concepts/storage/storage-classes.md +++ b/content/id/docs/concepts/storage/storage-classes.md @@ -149,10 +149,10 @@ PersistentVolumeClaim dibuat. PersistentVolume akan dipilih atau di-_provisionin sesuai dengan topologi yang dispesifikasikan oleh limitasi yang diberikan oleh mekanisme _scheduling_ Pod. Hal ini termasuk, tetapi tidak hanya terbatas pada, [persyaratan sumber daya](/id/docs/concepts/configuration/manage-compute-resources-container), -[_node selector_](/id/docs/concepts/configuration/assign-pod-node/#nodeselector), +[_node selector_](/id/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector), [afinitas dan -anti-afinitas Pod](/id/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity), -serta [_taint_ dan _toleration_](/id/docs/concepts/configuration/taint-and-toleration). +anti-afinitas Pod](/id/docs/concepts/scheduling-evictionassign-pod-node/#affinity-and-anti-affinity), +serta [_taint_ dan _toleration_](/id/docs/concepts/scheduling-eviction/taint-and-toleration). Beberapa _plugin_ di bawah ini mendukung `WaitForFirstConsumer` dengan _provisioning_ dinamis: diff --git a/content/id/docs/concepts/workloads/controllers/daemonset.md b/content/id/docs/concepts/workloads/controllers/daemonset.md index 0b1c0e71e92f1..905a0a193aa9f 100644 --- a/content/id/docs/concepts/workloads/controllers/daemonset.md +++ b/content/id/docs/concepts/workloads/controllers/daemonset.md @@ -97,8 +97,8 @@ membuat Pod dengan nilai yang berbeda di sebuah Node untuk _testing_. Jika kamu menspesifikasikan `.spec.template.spec.nodeSelector`, maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [selektor -Node](/id/docs/concepts/configuration/assign-pod-node/). Demikian juga, jika kamu menspesifikasikan `.spec.template.spec.affinity`, -maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [Node affinity](/id/docs/concepts/configuration/assign-pod-node/). +Node](/id/docs/concepts/scheduling-eviction/assign-pod-node/). Demikian juga, jika kamu menspesifikasikan `.spec.template.spec.affinity`, +maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [Node affinity](/id/docs/concepts/scheduling-eviction/assign-pod-node/). Jika kamu tidak menspesifikasikan sama sekali, maka _controller_ DaemonSet akan membuat Pod pada semua Node. diff --git a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md index 0e1b36ccc5f6f..97aa5a47f3cb4 100644 --- a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -85,6 +85,6 @@ Perhatikan bahwa hal ini dapat terjadi apabila TTL diaktifkan dengan nilai selai [Membersikan Job secara Otomatis](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically) -[Dokumentasi Rancangan](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/0026-ttl-after-finish.md) +[Dokumentasi Rancangan](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/592-ttl-after-finish/README.md) diff --git a/content/id/docs/concepts/workloads/pods/ephemeral-containers.md b/content/id/docs/concepts/workloads/pods/ephemeral-containers.md index e952bdd19bec5..2d0c515859f03 100644 --- a/content/id/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/id/docs/concepts/workloads/pods/ephemeral-containers.md @@ -106,7 +106,7 @@ deskripsikan kontainer sementara untuk ditambahkan dalam daftar "apiVersion": "v1", "kind": "EphemeralContainers", "metadata": { - "name": "example-pod" + "name": "example-pod" }, "ephemeralContainers": [{ "command": [ diff --git a/content/id/docs/contribute/_index.md b/content/id/docs/contribute/_index.md index d793a789672d9..6762aa97d12a8 100644 --- a/content/id/docs/contribute/_index.md +++ b/content/id/docs/contribute/_index.md @@ -75,5 +75,5 @@ terhadap dokumentasi Kubernetes, tetapi daftar ini dapat membantumu memulainya. - Untuk berkontribusi ke komunitas Kubernetes melalui forum-forum daring seperti Twitter atau Stack Overflow, atau mengetahui tentang pertemuan komunitas (_meetup_) lokal dan acara-acara Kubernetes, kunjungi [situs komunitas Kubernetes](/community/). - Untuk mulai berkontribusi ke pengembangan fitur, baca [_cheatseet_ kontributor](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet). - +- Untuk kontribusi khusus ke halaman Bahasa Indonesia, baca [Dokumentasi Khusus Untuk Translasi Bahasa Indonesia](/docs/contribute/localization_id.md) diff --git a/content/id/docs/contribute/localization_id.md b/content/id/docs/contribute/localization_id.md new file mode 100644 index 0000000000000..5a9c491297bc8 --- /dev/null +++ b/content/id/docs/contribute/localization_id.md @@ -0,0 +1,178 @@ +--- +title: Dokumentasi Khusus Untuk Translasi Bahasa Indonesia +content_type: concept +--- + + + +Panduan khusus untuk bergabung ke komunitas SIG DOC Indonesia dan melakukan +kontribusi untuk mentranslasikan dokumentasi Kubernetes ke dalam Bahasa +Indonesia. + + + +## Manajemen _Milestone_ Tim {#manajemen-milestone-tim} + +Secara umum siklus translasi dokumentasi ke Bahasa Indonesia akan dilakukan +3 kali dalam setahun (sekitar setiap 4 bulan). Untuk menentukan dan mengevaluasi +pencapaian atau _milestone_ dalam kurun waktu tersebut [jadwal rapat daring +reguler tim Bahasa Indonesia](https://zoom.us/j/6072809193) dilakukan secara +konsisten setiap dua minggu sekali. Dalam [agenda rapat ini](https://docs.google.com/document/d/1Qrj-WUAMA11V6KmcfxJsXcPeWwMbFsyBGV4RGbrSRXY) +juga dilakukan pemilihan PR _Wrangler_ untuk dua minggu ke depan. Tugas PR +_Wrangler_ tim Bahasa Indonesia serupa dengan PR _Wrangler_ dari proyek +_upstream_. + +Target pencapaian atau _milestone_ tim akan dirilis sebagai +[_issue tracking_ seperti ini](https://github.com/kubernetes/website/issues/22296) +pada Kubernetes GitHub Website setiap 4 bulan. Dan bersama dengan informasi +PR _Wrangler_ yang dipilih setiap dua minggu, keduanya akan diumumkan di Slack +_channel_ [#kubernetes-docs-id](https://kubernetes.slack.com/archives/CJ1LUCUHM) +dari Komunitas Kubernetes. + +## Cara Memulai Translasi + +Untuk menerjemahkan satu halaman Bahasa Inggris ke Bahasa Indonesia, lakukan +langkah-langkah berikut ini: + +* Check halaman _issue_ di GitHub dan pastikan tidak ada orang lain yang sudah +mengklaim halaman kamu dalam daftar periksa atau komentar-komentar sebelumnya. +* Klaim halaman kamu pada _issue_ di GitHub dengan memberikan komentar di bawah +dengan nama halaman yang ingin kamu terjemahkan dan ambillah hanya satu halaman +dalam satu waktu. +* _Fork_ [repo ini](https://github.com/kubernetes/website), buat terjemahan +kamu, dan kirimkan PR (_pull request_) dengan label `language/id` +* Setelah dikirim, pengulas akan memberikan komentar dalam beberapa hari, dan +tolong untuk menjawab semua komentar. Direkomendasikan juga untuk melakukan +[_squash_](https://github.com/wprig/wprig/wiki/How-to-squash-commits) _commit_ +kamu dengan pesan _commit_ yang baik. + + +## Informasi Acuan Untuk Translasi + +Tidak ada panduan gaya khusus untuk menulis translasi ke bahasa Indonesia. +Namun, secara umum kita dapat mengikuti panduan gaya bahasa Inggris dengan +beberapa tambahan untuk kata-kata impor yang dicetak miring. + +Harap berkomitmen dengan terjemahan kamu dan pada saat kamu mendapatkan komentar +dari pengulas, silahkan atasi sebaik-baiknya. Kami berharap halaman yang +diklaim akan diterjemahkan dalam waktu kurang lebih dua minggu. Jika ternyata +kamu tidak dapat berkomitmen lagi, beri tahu para pengulas agar mereka dapat +meberikan halaman tersebut ke orang lain. + +Beberapa acuan tambahan dalam melakukan translasi silahkan lihat informasi +berikut ini: + +### Daftara Glosarium Translasi dari tim SIG DOC Indonesia +Untuk kata-kata selengkapnya silahkan baca glosariumnya +[disini](#glosarium-indonesia) + +### KBBI +Konsultasikan dengan KBBI (Kamus Besar Bahasa Indonesia) +[disini](https://kbbi.web.id/) dari +[Kemendikbud](https://kbbi.kemdikbud.go.id/). + +### RSNI Glosarium dari Ivan Lanin +[RSNI Glosarium](https://github.com/jk8s/sig-docs-id-localization-how-tos/blob/master/resources/RSNI-glossarium.pdf) +dapat digunakan untuk memahami bagaimana menerjemahkan berbagai istilah teknis +dan khusus Kubernetes. + + +## Panduan Penulisan _Source Code_ + +### Mengikuti kode asli dari dokumentasi bahasa Inggris + +Untuk kenyamanan pemeliharaan, ikuti lebar teks asli dalam kode bahasa Inggris. +Dengan kata lain, jika teks asli ditulis dalam baris yang panjang tanpa putus +atu baris, maka teks tersebut ditulis panjang dalam satu baris meskipun dalam +bahasa Indonesia. Jagalah agar tetap serupa. + +### Hapus nama reviewer di kode asli bahasa Inggris + +Terkadang _reviewer_ ditentukan di bagian atas kode di teks asli Bahasa Inggris. +Secara umum, _reviewer-reviewer_ halaman aslinya akan kesulitan untuk meninjau +halaman dalam bahasa Indonesia, jadi hapus kode yang terkait dengan informasi +_reviewer_ dari metadata kode tersebut. + + +## Panduan Penulisan Kata-kata Translasi + +### Panduan umum + +* Gunakan "kamu" daripada "Anda" sebagai subyek agar lebih bersahabat dengan +para pembaca dokumentasi. +* Tulislah miring untuk kata-kata bahasa Inggris yang diimpor jika kamu tidak +dapat menemukan kata-kata tersebut dalam bahasa Indonesia. +*Benar*: _controller_. *Salah*: controller, `controller` + +### Panduan untuk kata-kata API Objek Kubernetes + +Gunakan gaya "CamelCase" untuk menulis objek API Kubernetes, lihat daftar +lengkapnya [di sini](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/). +Sebagai contoh: + +* *Benar*: PersistentVolume. *Salah*: volume persisten, `PersistentVolume`, +persistentVolume +* *Benar*: Pod. *Salah*: pod, `pod`, "pod" + +*Tips* : Biasanya API objek sudah ditulis dalam huruf kapital pada halaman asli +bahasa Inggris. + +### Panduan untuk kata-kata yang sama dengan API Objek Kubernetes + +Ada beberapa kata-kata yang serupa dengan nama API objek dari Kubernetes dan +dapat mengacu ke arti yang lebih umum (tidak selalu dalam konteks Kubernetes). +Sebagai contoh: _service_, _container_, _node_ , dan lain sebagainya. Kata-kata +sebaiknya ditranslasikan ke Bahasa Indonesia sebagai contoh _service_ menjadi +layanan, _container_ menjadi kontainer. + +*Tips* : Biasanya kata-kata yang mengacu ke arti yang lebih umum sudah *tidak* +ditulis dalam huruf kapital pada halaman asli bahasa Inggris. + +### Panduan untuk "Feature Gate" Kubernetes + +Istilah [_functional gate_](https://kubernetes.io/ko/docs/reference/command-line-tools-reference/feature-gates/) +Kubernetes tidak perlu diterjemahkan ke dalam bahasa Indonesia dan tetap +dipertahankan dalam bentuk aslinya. + +Contoh dari _functional gate_ adalah sebagai berikut: + +- Akselerator +- AdvancedAuditing +- AffinityInAnnotations +- AllowExtTrafficLocalEndpoints +- ... + +### Glosarium Indonesia {#glosarium-indonesia} + +Inggris | Tipe Kata | Indonesia | Sumber | Contoh Kalimat +---|---|---|---|--- +cluster | | klaster | | +container | | kontainer | | +node | kata benda | node | | +file | | berkas | | +service | kata benda | layanan | | +set | | sekumpulan | | +resource | | sumber daya | | +default | | bawaan atau standar (tergantung context) | | Secara bawaan, ...; Pada konfigurasi dan instalasi standar, ... +deploy | | menggelar | | +image | | _image_ | | +request | | permintaan | | +object | kata benda | objek | https://kbbi.web.id/objek | +command | | perintah | https://kbbi.web.id/perintah | +view | | tampilan | | +support | | tersedia atau dukungan (tergantung konteks) | "This feature is supported on version X; Fitur ini tersedia pada versi X; Supported by community; Didukung oleh komunitas" +release | kata benda | rilis | https://kbbi.web.id/rilis | +tool | | perangkat | | +deployment | | penggelaran | | +client | | klien | | +reference | | rujukan | | +update | | pembaruan | | The latest update... ; Pembaruan terkini... +state | | _state_ | | +task | | _task_ | | +certificate | | sertifikat | | +install | | instalasi | https://kbbi.web.id/instalasi | +scale | | skala | | +process | kata kerja | memproses | https://kbbi.web.id/proses | +replica | kata benda | replika | https://kbbi.web.id/replika | +flag | | tanda, parameter, argumen | | +event | | _event_ | | \ No newline at end of file diff --git a/content/id/docs/home/supported-doc-versions.md b/content/id/docs/home/supported-doc-versions.md index 6cecfdaec171e..c28dec9fab326 100644 --- a/content/id/docs/home/supported-doc-versions.md +++ b/content/id/docs/home/supported-doc-versions.md @@ -1,29 +1,12 @@ --- -title: Versi Kubernetes yang Termasuk dalam Dokumentasi -content_type: concept +title: Versi Dokumentasi yang Tersedia +content_type: custom +layout: supported-versions card: name: about weight: 10 - title: Versi Kubernetes yang Termasuk dalam Dokumentasi + title: Versi Dokumentasi yang Tersedia --- - - -Situs ini merupakan dokumentasi dari Kubernetes versi saat ini dan 4 versi sebelumnya. - - - - - -## Versi saat ini - -Versi saat ini adalah -[{{< param "version" >}}](/). - -## Versi sebelumnya - -{{< versions-other >}} - - - - +Situs web ini berisi dokumentasi untuk versi terkini Kubernetes +dan empat versi sebelumnya. diff --git a/content/id/docs/reference/glossary/taint.md b/content/id/docs/reference/glossary/taint.md new file mode 100644 index 0000000000000..1545a025fde4f --- /dev/null +++ b/content/id/docs/reference/glossary/taint.md @@ -0,0 +1,18 @@ +--- +title: Taint +id: taint +date: 2019-01-11 +full_link: /id/docs/concepts/scheduling-eviction/taint-and-toleration/ +short_description: > + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Taint mencegah penjadwalan Pod pada Node atau grup Node. + +aka: +tags: +- core-object +- fundamental +--- + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Taint mencegah penjadwalan {{< glossary_tooltip text="Pod" term_id="pod" >}} pada {{< glossary_tooltip text="Node" term_id="node" >}} atau grup dari Node. + + + +Taint dan {{< glossary_tooltip text="toleransi" term_id="toleration" >}} bekerja sama untuk memastikan bahwa Pod tidak dijadwalkan ke Node yang tidak sesuai. Satu atau lebih taint dapat diterapkan pada Node. Sebuah Node seharusnya hanya menjadwalkan Pod dengan toleransi yang cocok untuk taint yang dikonfigurasi. diff --git a/content/id/docs/reference/glossary/toleration.md b/content/id/docs/reference/glossary/toleration.md new file mode 100644 index 0000000000000..45a2ac7f7c7dc --- /dev/null +++ b/content/id/docs/reference/glossary/toleration.md @@ -0,0 +1,17 @@ +--- +title: Toleransi (Toleration) +id: toleration +date: 2019-01-11 +full_link: /docs/concepts/scheduling-eviction/taint-and-toleration/ +short_description: > + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Toleransi memungkinkan penjadwalan Pod pada Node atau grup dari Node yang memiliki taint yang cocok. +aka: +tags: +- core-object +- fundamental +--- + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Toleransi memungkinkan penjadwalan Pod pada Node atau grup dari Node yang memiliki {{< glossary_tooltip text="taints" term_id="taint" >}} yang cocok. + + + +Toleransi dan {{< glossary_tooltip text="taints" term_id="taint" >}} bekerja sama untuk memastikan bahwa Pod tidak dijadwalkan ke Node yang tidak sesuai. Satu atau lebih taint dapat diterapkan pada Node. Sebuah Node seharusnya hanya menjadwalkan Pod dengan toleransi yang cocok untuk taint yang dikonfigurasi. diff --git a/content/id/docs/reference/setup-tools/kubeadm/_index.md b/content/id/docs/reference/setup-tools/kubeadm/_index.md new file mode 100644 index 0000000000000..6e858c541adfd --- /dev/null +++ b/content/id/docs/reference/setup-tools/kubeadm/_index.md @@ -0,0 +1,30 @@ +--- +title: "Kubeadm" +weight: 10 +no_list: true +content_type: concept +card: + name: reference + weight: 40 +--- + +Kubeadm adalah fitur yang dibuat untuk menyediakan `kubeadm init` dan` kubeadm join` sebagai praktik terbaik dengan "jalur cepat" untuk membuat klaster Kubernetes. + +kubeadm melakukan tindakan yang diperlukan untuk membuat klaster minimum yang layak untuk aktif dan berjalan. Secara desain, ini hanya memperdulikan tentang *bootstrap*, bukan tentang mesin *provisioning*. Demikian pula, dengan instalasi berbagai *addon* atau tambahan yang bagus untuk dimiliki, seperti Dasbor Kubernetes, solusi pemantauan, dan tambahan khusus cloud, tidak termasuk dalam cakupan. + +Sebaliknya, kita mengharapkan perangkat dengan tingkat yang lebih tinggi dan lebih disesuaikan untuk dibangun di atas kubeadm, dan idealnya, menggunakan kubeadm sebagai dasar dari semua penerapan untuk mempermudah pembuatan klaster yang sesuai. + +## Cara Instalasi + +Untuk menginstal kubeadm, silakan lihat [Petunjuk Instalasi](/docs/setup/production-environment/tools/kubeadm/install-kubeadm). + +## {{% heading "whatsnext" %}} + +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init) untuk melakukan **bootstrap** pada node control-plane Kubernetes +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join) untuk melakukan **bootstrap** worker node Kubernetes worker dan menggabungkannya ke dalam klaster +* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade) untuk melakukan pembaruan klaster Kubernetes ke versi yang lebih baru +* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config) untuk mengonfigurasi klaster kamu untuk `kubeadm upgrade`, jika kamu menggunakan kubeadm v1.7.x atau dibawahnya untuk menginisialisasi klaster kamu +* [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token) untuk mengatur token-token untuk `kubeadm join` +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset) untuk mengembalikan semua perubahan yang dibuat untuk host dengan `kubeadm init` atau `kubeadm join` +* [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version) untuk mencetak versi kubeadm +* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha) untuk melihat sekilas sekumpulan fitur yang ada untuk mengumpulkan *feedback* atau umpan balik dari komunitas diff --git a/content/id/docs/setup/learning-environment/_index.md b/content/id/docs/setup/learning-environment/_index.md index 4c116a9bc08a6..cdfe637e6402a 100644 --- a/content/id/docs/setup/learning-environment/_index.md +++ b/content/id/docs/setup/learning-environment/_index.md @@ -2,3 +2,34 @@ title: Lingkungan Pembelajaran weight: 20 --- + +## kind + +[`kind`](https://kind.sigs.k8s.io/docs/) memberikan kamu kemampuan untuk +menjalankan Kubernetes pada komputer lokal kamu. Perangkat ini membutuhkan +[Docker](https://docs.docker.com/get-docker/) yang sudah diinstal dan +terkonfigurasi. + +Halaman [Memulai Cepat](https://kind.sigs.k8s.io/docs/user/quick-start/) `kind` +memperlihatkan kepada kamu tentang apa yang perlu kamu lakukan agar `kind` dapat +berjalan dan bekerja. + +Melihat Memulai Cepat Kind + +## minikube + +Seperti halnya dengan `kind`, [`minikube`](https://minikube.sigs.k8s.io/) +merupakan perangkat yang memungkinkan kamu untuk menjalankan Kubernetes +secara lokal. `minikube` menjalankan sebuah klaster Kubernetes dengan +satu node saja dalam komputer pribadi (termasuk Windows, macOS dan Linux) +sehingga kamu dapat mencoba Kubernetes atau untuk pekerjaan pengembangan +sehari-hari. + +Kamu bisa mengikuti petunjuk resmi +[Memulai!](https://minikube.sigs.k8s.io/docs/start/) +`minikube` jika kamu ingin fokus agar perangkat ini terinstal. + +Lihat Panduan Memulai! Minikube + +Setelah kamu memiliki `minikube` yang bekerja, kamu bisa menggunakannya +untuk [menjalankan aplikasi contoh](/id/docs/tutorials/hello-minikube/). diff --git a/content/id/docs/setup/learning-environment/minikube.md b/content/id/docs/setup/learning-environment/minikube.md deleted file mode 100644 index 8729968738903..0000000000000 --- a/content/id/docs/setup/learning-environment/minikube.md +++ /dev/null @@ -1,514 +0,0 @@ ---- -title: Instalasi Kubernetes dengan Minikube -weight: 30 -content_type: concept ---- - - - -Minikube adalah alat yang memudahkan untuk menjalankan Kubernetes pada komputer lokal. Minikube menjalankan satu Node klaster Kubernetes di dalam _Virtual Machine_ (VM) pada laptop kamu untuk pengguna yang ingin mencoba Kubernetes atau mengembangkannya. - - - - -## Fitur Minikube - -Minikube mendukung fitur Kubernetes berikut: - -* DNS -* NodePort -* {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} dan {< glossary_tooltip text="Secret" term_id="secret" >}} -* _Dashboard_ -* _Container runtime_: [Docker](https://www.docker.com/), [CRI-O](https://cri-o.io/), dan [containerd](https://github.com/containerd/containerd) -* {{< glossary_tooltip text="CNI" term_id="cni" >}} -* Ingress - -## Instalasi - -Lihat [Instalasi Minikube](/id/docs/tasks/tools/install-minikube/). - -## Memulai Cepat - -Demonstrasi singkat ini memandu kamu tentang bagaimana memulai, menggunakan dan menghapus Minikube secara lokal. Ikuti langkah berikut untuk memulai dan menjelajahi Minikube. - -1. Mulailah Minikube dan buatlah sebuah klaster: - - ```shell - minikube start - ``` - - Keluaran menyerupai: - - ``` - Starting local Kubernetes cluster... - Running pre-create checks... - Creating machine... - Starting local Kubernetes cluster... - ``` - Untuk informasi lebih lanjut mengenai bagaimana memulai klaster pada versi Kubernetes tertentu, VM atau Container _runtime_, lihatlah [Memulai klaster](#memulai-klaster). - -2. Kini kamu bisa berinteraksi dengan klaster kamu dengan kubectl. Untuk informasi lebih lanjut, lihatlah [Interaksi dengan klaster kamu](#interaksi-dengan-klaster-kamu). - - Mari kita buat Kubernetes Deployment menggunakan _image_ bernama `echoserver`, yaitu sebuah server HTTP sederhana dan buka layanan pada porta 8080 dengan menggunakan opsi `--port`. - - ```shell - kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.10 - ``` - - Keluaran menyerupai: - - ``` - deployment.apps/hello-minikube created - ``` -3. Untuk mengakses Deployment `hello-minikube`, bukalah dia sebagai sebuah Service: - - ```shell - kubectl expose deployment hello-minikube --type=NodePort --port=8080 - ``` - - Opsi `--type=NodePort` menentukan tipe Service. - - Keluarannya menyerupai: - - ``` - service/hello-minikube exposed - ``` - -4. Pod `hello-minikube` saat ini telah dibuat namun kamu harus menunggu hingga Pod selesai dijalankan sebelum dapat mengaksesnya melalui Service yang telah dibuka. - - Cek apakah Pod sudah berjalan dan beroperasi: - - ```shell - kubectl get pod - ``` - - Jika keluaran menampilkan `STATUS` sebagai `ContainerCreating`, maka Pod sedang dalam proses pembuatan: - - ``` - NAME READY STATUS RESTARTS AGE - hello-minikube-3383150820-vctvh 0/1 ContainerCreating 0 3s - ``` - - Jika keluaran menampilkan `STATUS` sebagai `Running`, maka Pod sudah berjalan dan beroperasi: - - ``` - NAME READY STATUS RESTARTS AGE - hello-minikube-3383150820-vctvh 1/1 Running 0 13s - ``` - -5. Ambil URL Service yang telah dibuka untuk melihat Service secara detail: - - ```shell - minikube service hello-minikube --url - ``` - -6. Untuk melihat detail dari klaster lokal kamu, salin dan tempel URL yang kamu dapatkan dari keluaran pada peramban kamu. - - Keluarannya menyerupai: - - ``` - Hostname: hello-minikube-7c77b68cff-8wdzq - - Pod Information: - -no pod information available- - - Server values: - server_version=nginx: 1.13.3 - lua: 10008 - - Request Information: - client_address=172.17.0.1 - method=GET - real path=/ - query= - request_version=1.1 - request_scheme=http - request_uri=http://192.168.99.100:8080/ - - Request Headers: - accept=*/* - host=192.168.99.100:30674 - user-agent=curl/7.47.0 - - Request Body: - -no body in request- - ``` - - Jika kamu tidak lagi membutuhkan Service dan klaster, maka kamu bisa menghapusnya. - -7. Hapuslah Service `hello-minikube`: - - ```shell - kubectl delete services hello-minikube - ``` - - Keluarannya menyerupai: - - ``` - service "hello-minikube" deleted - ``` - -8. Hapuslah Deployment `hello-minikube`: - - ```shell - kubectl delete deployment hello-minikube - ``` - - Keluarannya menyerupai: - - ``` - deployment.extensions "hello-minikube" deleted - ``` - -9. Hentikanlah klaster Minikube lokal: - - ```shell - minikube stop - ``` - - Keluarannya menyerupai: - - ``` - Stopping "minikube"... - "minikube" stopped. - ``` - - Untuk informasi lebih lanjut, lihatlah [Menghentikan Klaster](#menghentikan-klaster). - -10. Hapuslah klaster Minikube lokal - - ```shell - minikube delete - ``` - Keluarannya menyerupai: - ``` - Deleting "minikube" ... - The "minikube" cluster has been deleted. - ``` - Untuk informasi lebih lanjut, lihat [Menghapus Klaster](#menghapus-klaster). - -## Mengelola Klaster - -### Memulai Klaster - -Perintah `minikube start` bisa digunakan untuk memulai klaster kamu. -Perintah ini membuat dan mengonfigurasi sebuah mesin virtual yang menjalankan klaster Kubernetes dengan satu Node. -Perintah ini juga mengonfigurasi instalasi [kubectl](/id/docs/user-guide/kubectl-overview/) untuk berkomunikasi dengan klaster ini. - -{{< note >}} -Jika kamu menggunakan proksi web, maka kamu harus meneruskan informasi berikut ini ke perintah `minikube start`: - -```shell -https_proxy= minikube start --docker-env http_proxy= --docker-env https_proxy= --docker-env no_proxy=192.168.99.0/24 -``` -Sayangnya, pengaturan dengan _environment variable_ saja tidak berguna. - -Minikube juga membuat konteks "minikube", dan menetapkannya sebagai bawaan di kubectl. -Untuk kembali menggunakan konteks ini, jalankan perintah: `kubectl config use-context minikube`. -{{< /note >}} - -#### Menentukan Versi Kubernetes - -Kamu bisa menentukan versi Kubernetes yang digunakan oleh Minikube dengan -menambahkan `--kubernetes-version` ke perintah `minikube start`. Sebagai -contoh, untuk menjalankan versi {{}}, kamu akan menjalankan perintah berikut: - -``` -minikube start --kubernetes-version {{< param "fullversion" >}} -``` -#### Menentukan _driver_ VM - -Kamu bisa mengubah _driver_ VM dengan menambahkan tanda `--driver=` pada `minikube start`. -Sebagai contoh: -```shell -minikube start --driver= -``` - -Minikube mendukung _driver_ berikut ini: -{{< note >}} -Lihat [_DRIVER_](https://minikube.sigs.k8s.io/docs/reference/drivers/) untuk detail tentang _driver_ yang didukung dan proses instalasi _plugin_. -{{< /note >}} - -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/drivers/docker/)) docker -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/drivers/virtualbox/)) virtualbox -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/drivers/podman/)) podman (TAHAP EXPERIMEN) -* vmwarefusion -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/)) kvm2 -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/)) hyperkit -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/hyperv/)) hyperv -Perlu diingat bahwa IP dibawah adalah dinamik dan bisa berubah. IP ini bisa diambil dengan `minikube ip`. -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/)) vmware (_driver_ VMware terpadu) -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/parallels/)) parallels -* none (menjalankan komponen Kubernetes pada hos dan bukan pada mesin virtual. Kamu harus menjalankan Linux dan harus menginstal {{}}.) - -{{< caution >}} -Jika kamu menggunakan _driver_ `none`, beberapa komponen Kubernetes dijalankan sebagai Container istimewa yang memiliki efek samping di luar lingkungan Minikube. Efek samping tersebut berarti bahwa _driver_ `none` tidak direkomendasikan untuk komputer pribadi. -{{< /caution >}} - -#### Memulai klaster pada _runtime_ kontainer alternatif -Kamu bisa memulai Minikube pada _runtime_ kontainer berikut. -{{< tabs name="container_runtimes" >}} -{{% tab name="containerd" %}} -Untuk menggunakan [containerd](https://github.com/containerd/containerd) sebagai _runtime_ kontainer, jalankan: -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --container-runtime=containerd \ - --bootstrapper=kubeadm -``` - -Atau kamu bisa menggunakan versi yang diperpanjang: - -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --extra-config=kubelet.container-runtime=remote \ - --extra-config=kubelet.container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --extra-config=kubelet.image-service-endpoint=unix:///run/containerd/containerd.sock \ - --bootstrapper=kubeadm -``` -{{% /tab %}} -{{% tab name="CRI-O" %}} -Untuk menggunakan [CRI-O](https://cri-o.io/) sebagain _runtime_ kontainer, jalankan: -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --container-runtime=cri-o \ - --bootstrapper=kubeadm -``` -Atau kamu bisa menggunakan versi yang diperpanjang: - -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --extra-config=kubelet.container-runtime=remote \ - --extra-config=kubelet.container-runtime-endpoint=/var/run/crio.sock \ - --extra-config=kubelet.image-service-endpoint=/var/run/crio.sock \ - --bootstrapper=kubeadm -``` -{{% /tab %}} -{{< /tabs >}} - -#### Menggunakan _image_ lokal degan menggunakan kembali _daemon_ Docker - -Saat menggunakan sebuah VM untuk Kubernetes, akan lebih baik jika _daemon_ Docker bawaan Minikube digunakan kembali. Menggunakan kembali _daemon_ bawaan membuat kamu tidak perlu membangun registri Docker pada mesin hos kamu dan mengunggah _image_ ke dalamnya. Namun, kamu dapat membangun di dalam _daemon_ Docker yang sama dengan Minikube, yang tentunya dapat mempercepat percobaan lokal. - -{{< note >}} -Pastikan untuk memberi _tag_ pada Docker _image_ kamu dengan sesuatu selain `latest` dan gunakan _tag_ tersebut untuk menarik _image_. Karena `:latest` adalah bawaan, dengan kebijakan penarikan _image_ bawaan, yaitu `Always`, kesalahan penarikan _image_ (`ErrImagePull`) akhirnya dapat terjadi jika kamu tidak memiliki _image_ Docker di register Docker bawaan (biasanya DockerHub). -{{< /note >}} - -Untuk bekerja dengan _daemon_ Docker pada mesin Mac/Linux, jalankan baris terakhir dari `minikube docker-env`. - -Kamu sekarang dapat menggunakan Docker di terminal mesin Mac/Linux kamu untuk berkomunikasi dengan _daemon_ Docker di dalam VM Minikube: - -```shell -docker ps -``` - -{{< note >}} -Pada Centos 7, Docker bisa memberikan kesalahan berikut: - -``` -Could not read CA certificate "/etc/docker/ca.pem": open /etc/docker/ca.pem: no such file or directory -``` - -Kamu bisa memperbaikinya dengan memperbaharui /etc/sysconfig/docker untuk memastikan bahwa lingkungan Minikube dikenali: - -```shell -< DOCKER_CERT_PATH=/etc/docker ---- -> if [ -z "${DOCKER_CERT_PATH}" ]; then -> DOCKER_CERT_PATH=/etc/docker -> fi -``` -{{< /note >}} - -### Mengonfigurasi Kubernetes - -Minikube memiliki sebuah fitur "pengonfigurasi" yang memperbolehkan pengguna untuk mengkonfigurasi komponen Kubernetes dengan sembarang nilai. -Untuk menggunakan fitur ini, kamu bisa menggunakan _flag_ `--extra-config` pada perintah `minikube start`. - -_Flag_ ini berulang, jadi kamu bisa menggunakannya beberapa kali dengan beberapa nilai yang berbeda untuk mengatur beberapa opsi. - -_Flag_ ini menerima sebuah _string_ dalam format `component.key=value`, di mana `component` adalah salah satu _string_ dari list di bawah, `key` adalah nilai dari _struct_ configurasi dan `value` adalah nilai yang digunakan. - -Kunci yang valid bisa ditemukan dengan memeriksa dokumentasi `componentconfigs` Kubernetes untuk setiap komponen. -Berikut adalah dokumentasi untuk setiap konfigurasi yang didukung: - -* [kubelet](https://godoc.org/k8s.io/kubernetes/pkg/kubelet/apis/config#KubeletConfiguration) -* [apiserver](https://godoc.org/k8s.io/kubernetes/cmd/kube-apiserver/app/options#ServerRunOptions) -* [proxy](https://godoc.org/k8s.io/kubernetes/pkg/proxy/apis/config#KubeProxyConfiguration) -* [controller-manager](https://godoc.org/k8s.io/kubernetes/pkg/controller/apis/config#KubeControllerManagerConfiguration) -* [etcd](https://godoc.org/github.com/coreos/etcd/etcdserver#ServerConfig) -* [scheduler](https://godoc.org/k8s.io/kubernetes/pkg/scheduler/apis/config#KubeSchedulerConfiguration) - -#### Contoh - -Untuk mengubah pengaturan `MaxPods` menjadi 5 pada Kubelet, gunakan _flag_ ini: `--extra-config=kubelet.MaxPods=5`. - -Fitur ini juga mendukung _struct_ yang berulang. Untuk mengubah pengaturan `LeaderElection.LeaderElect` menjadi `true` pada penjadwal, gunakan _flag_: `--extra-config=scheduler.LeaderElection.LeaderElect=true`. - -Untuk mengatur `AuthorizationMode` pada `apiserver` menjadi `RBAC`, kamu bisa menggunakan: `--extra-config=apiserver.authorization-mode=RBAC`. - -### Menghentikan klaster -Perintah `minikube stop` bisa digunakan untuk menghentikan klaster kamu. -Perintah ini menghentikan mesin virtual Minikube, tapi mempertahankan semua status dan data klaster. -Memulai klaster lagi akan mengembalikannya ke keadaan sebelumnya. - -### Menghapus klaster -Perintah `minikube delete` bisa digunakan untuk menghapus klaster kamu. -Perintah ini menghentikan dan menghapus mesin virtual Minikube. Tidak ada data atau _state_ yang dipertahankan. - -### Memperbaharui Minikube -Jika kamu menggunakan MacOS dan [Brew Package Manager](https://brew.sh/) sudah terpasang, jalankan: - -```shell -brew update -brew upgrade minikube -``` - -## Interaksi dengan Klaster Kamu - -### Kubectl - -Perintah `minikube start` membuat sebuah [konteks kubectl](/id/docs/reference/generated/kubectl/kubectl-commands#-em-set-context-em-) yang disebut "minikube". -Konteks ini menyimpan pengaturan untuk berkomunikasi dengan klaster Minikube kamu. - -Minikube menetapkan konteks ini sebagai bawaan secara otomatis, tetapi jika kamu ingin mengubah kembali ke konteks tersebut di kemudian hari, gunakan: - -`kubectl config use-context minikube` - -Atau berikan konteks untuk setiap perintah seperti ini: - -`kubectl get pods --context=minikube` - -### Dashboard - -Untuk mengakses [Kubernetes Dashboard](/docs/tasks/access-application-cluster/web-ui-dashboard/), gunakan perintah ini pada terminal setelah memulai Minikube untuk mendapatkan alamatnya: - -```shell -minikube dashboard -``` - -### Service - -Untuk mengakses Service yang dibuka via NodePort, jalankan perintah ini pada terminal setelah memulai Minikube untuk mendapatkan alamat: - -```shell -minikube service [-n NAMESPACE] [--url] NAME -``` - -## Jaringan - -Mesin virtual Minikube dibuka ke sistem hos melalui alamat IP _host-only_ , yang bisa didapatkan dengan perintah `minikube ip`. -Seluruh Service dengan jenis `NodePort` bisa diakses melalui alamat IP pada NodePort. - -Untuk mementukan NodePort pada Service kamu, kamu bisa menggunakan perintah `kubectl` sebagai berikut: - -`kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].nodePort}"'` - -## PersistentVolume - -Minikube mendukung [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) dengan jenis `hostPath`. -PersistenVolume ini dipetakan ke direktori di dalam mesin virtual Minikube. - -Mesin virtual Minikube melakukan _booting_ ke tmpfs, sehingga sebagian besar direktori tidak akan bertahan setelah di _reboot_ (`minikube stop`). - -Namun, Minikube diatur untuk mempertahankan berkas yang tersimpan didalam direktori hos berikut: - -* `/data` -* `/var/lib/minikube` -* `/var/lib/docker` - -Ini adalah contoh pengaturan PersistentVolume untuk mempertahankan data di dalam direktori `/data`: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pv0001 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 5Gi - hostPath: - path: /data/pv0001/ -``` - -## Folder hos yang di _mount_ -Beberapa _driver_ akan memasang folder _hos_ dalam VM sehingga kamu dapat dengan mudah berbagi berkas antara VM dan hos. Saat ini, hal tersebut tidak dapat dikonfigurasi dan berbeda untuk setiap _driver_ dan sistem operasi yang kamu gunakan. - -{{< note >}} -Berbagi folder hos belum diimplementasikan pada _driver_ KVM. -{{< /note >}} - -| Driver | OS | HostFolder | VM | -| --- | --- | --- | --- | -| VirtualBox | Linux | /home | /hosthome | -| VirtualBox | macOS | /Users | /Users | -| VirtualBox | Windows | C://Users | /c/Users | -| VMware Fusion | macOS | /Users | /mnt/hgfs/Users | -| Xhyve | macOS | /Users | /Users | - -## Registri Container Pribadi - -Untuk mengakses registri Container pribadi, ikuti langkah berikut pada [halaman ini](/id/docs/concepts/containers/images/). - -Kami merekomendasi penggunaan `ImagePullSecrets`, tetapi jika kamu ingin mengonfigurasi akses pada virtual mesin Minikube, kamu bisa menempatkan `.dockercfg` pada direktori `/home/docker` atau `config.json` dalam direktori `/home/docker/.docker`. - -## Tambahan (_Add-on_) - -Supaya Minikube memulai atau memulai kembali kustom tambahan dengan benar, -tempatkan tambahan yang ingin kamu jalankan di dalam direktori `~/.minikube/addons`. -Tambahan dalam folder akan dipindahkan ke virtual mesin Minikube dan dijalankan setiap kali Minikube -dimulai atau dimulai ulang. - -## Menggunakan Minikube dengan Proksi HTTP - -Minikube membuat sebuah mesin virtual yang memasukkan Kubernetes dan _daemon_ Docker. -Ketika Kubernetes berusaha untuk menjadwalkan Container dengan Docker, _daemon_ Docker mungkin membutuhkan -akses jaringan eksternal untuk menarik Container. - -Jika kamu berada di belakang _proxy_ HTTP, kamu mungkin perlu menyediakan Docker dengan pengaturan proksi. -Untuk melakukan ini, berikan _environment variable_ yang dibutuhkan sebagai _flag_ pada saat `minikube start`. - -Contoh: - -```shell -minikube start --docker-env http_proxy=http://$YOURPROXY:PORT \ - --docker-env https_proxy=https://$YOURPROXY:PORT -``` - -Jika alamat mesin virtual kamu adalah 192.168.99.100, maka ada kemungkinan pengaturan proksi kamu akan mencegah `kubectl` untuk mencapainya. -Untuk melewatkan konfigurasi _proxy_ untuk alamat IP ini, kamu harus memodifikasi pengaturan _no_proxy` kamu. Kamu bisa melakukannya dengan: - -```shell -export no_proxy=$no_proxy,$(minikube ip) -``` - -## Masalah yang Diketahui - -Fitur yang memerlukan banyak Node tidak akan berfungsi dalam Minikube. - -## Desain - -Minikube menggunakan [libmachine](https://github.com/docker/machine/tree/master/libmachine) untuk menyediakan mesin virtual, dan [kubeadm](https://github.com/kubernetes/kubeadm) untuk menyediakan klaster Kubernetes. - -Untuk info lebih lanjut tentang Minikube, lihat [proposal](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/local-cluster-ux.md). - -## Tautan Tambahan - -* **Tujuan and Non-Tujuan**: Untuk tujuan dan non-tujuan dari projek Minikube, lihat [roadmap](https://minikube.sigs.k8s.io/docs/contrib/roadmap/). -* **Petunjuk Pengembangan**: Lihat [Berkontribusi](https://minikube.sigs.k8s.io/docs/contrib/) untuk ikhtisar bagaimana cara mengirimkan _pull request_. -* **Membangun Minikube**: Untuk instruksi bagaimana membangun atau mengetes Minikube dari sumber kode, lihat [petunjuk membangun](https://minikube.sigs.k8s.io/docs/contrib/building/). -* **Menambahkan Dependensi Baru**: Untuk instruksi bagaimana menambahkan dependensi baru ke Minikube, lihat [petunjuk penambahan dependensi](https://minikube.sigs.k8s.io/docs/contrib/drivers/). -* **Menambahkan Addon Baru**: Untuk instruksi bagaimana menambahkan tambahan baru untuk Minikube, lihat [petunjuk menambahkan addon baru](https://minikube.sigs.k8s.io/docs/contrib/addons/). -* **MicroK8s**: Pengguna Linux yang ingin menghindari penggunaan mesin virtual, bisa mempertimbangkan [MicroK8s](https://microk8s.io/) sebagai alternatif. - -## Komunitas - -Kontribusi, pertanyaan, dan komentar sangat diharapkan! Pengembang Minikube berkumpul dalam [Slack](https://kubernetes.slack.com) di _channel_ #minikube (dapatkan undangan [di sini](http://slack.kubernetes.io/)). Kami juga memiliki [milis kubernetes-dev Google Groups](https://groups.google.com/forum/#!forum/kubernetes-dev). Jika kamu memposting sesuatu, awali subjek kamu dengan "minikube: ". diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 8a345296a394a..6bbf23b53e9fc 100644 --- a/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -134,7 +134,7 @@ tidak didukung oleh kubeadm. ### Informasi lebih lanjut -Untuk informasi lebih lanjut mengenai argumen-argumen `kubeadm init`, lihat [panduan referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/). +Untuk informasi lebih lanjut mengenai argumen-argumen `kubeadm init`, lihat [panduan referensi kubeadm](/docs/reference/setup-tools/kubeadm/). Untuk daftar pengaturan konfigurasi yang lengkap, lihat [dokumentasi berkas konfigurasi](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). @@ -569,7 +569,7 @@ opsinya. * Pastikan klaster berjalan dengan benar menggunakan [Sonobuoy](https://github.com/heptio/sonobuoy) * Lihat [Memperbaharui klaster kubeadm](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) untuk detail mengenai pembaruan klaster menggunakan `kubeadm`. -* Pelajari penggunaan `kubeadm` lebih lanjut pada [dokumentasi referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm) +* Pelajari penggunaan `kubeadm` lebih lanjut pada [dokumentasi referensi kubeadm](/docs/reference/setup-tools/kubeadm) * Pelajari lebih lanjut mengenai [konsep-konsep](/docs/concepts/) Kubernetes dan [`kubectl`](/docs/user-guide/kubectl-overview/). * Lihat halaman [Cluster Networking](/id/docs/concepts/cluster-administration/networking/) untuk daftar _add-on_ jaringan Pod yang lebih banyak. diff --git a/content/id/docs/sitemap.md b/content/id/docs/sitemap.md deleted file mode 100644 index 56b0ac30af930..0000000000000 --- a/content/id/docs/sitemap.md +++ /dev/null @@ -1,114 +0,0 @@ ---- ---- - - - -Pilih tag atau gunakan drop down untuk melakukan filter. Pilih header pada tabel untuk mengurutkan. - -

    -Filter berdasarkan Konsep:
    -Filter berdasarkan Obyek:
    -Filter berdasarkan Perintah: -

    - -
    diff --git a/content/id/docs/tasks/administer-cluster/cluster-management.md b/content/id/docs/tasks/administer-cluster/cluster-management.md deleted file mode 100644 index 0473dde9f3d8b..0000000000000 --- a/content/id/docs/tasks/administer-cluster/cluster-management.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -title: Manajemen Klaster -content_type: concept ---- - - - -Dokumen ini menjelaskan beberapa topik yang terkait dengan siklus hidup sebuah klaster: membuat klaster baru, -memperbarui Node _control plane_ dan Node pekerja dari klaster kamu, -melakukan pemeliharaan Node (misalnya pembaruan kernel), dan meningkatkan versi API Kubernetes dari -klaster yang berjalan. - - - - -## Membuat dan mengonfigurasi klaster - -Untuk menginstal Kubernetes dalam sekumpulan mesin, konsultasikan dengan salah satu [Panduan Memulai](/id/docs/setup) tergantung dengan lingkungan kamu. - -## Memperbarui klaster - -Status saat ini pembaruan klaster bergantung pada penyedia, dan beberapa rilis yang mungkin memerlukan perhatian khusus saat memperbaruinya. Direkomendasikan agar admin membaca [Catatan Rilis](https://git.k8s.io/kubernetes/CHANGELOG/README.md), serta catatan khusus pembaruan versi sebelum memperbarui klaster mereka. - -### Memperbarui klaster Azure Kubernetes Service (AKS) - -Azure Kubernetes Service memungkinkan pembaruan layanan mandiri yang mudah dari _control plane_ dan Node pada klaster kamu. Prosesnya adalah -saat ini dimulai oleh pengguna dan dijelaskan dalam [Azure AKS documentation](https://docs.microsoft.com/en-us/azure/aks/upgrade-cluster). - -### Memperbarui klaster Google Compute Engine - -Google Compute Engine Open Source (GCE-OSS) mendukung pembaruan _control plane_ dengan menghapus dan -membuat ulang _control plane_, sambil mempertahankan _Persistent Disk_ (PD) yang sama untuk memastikan bahwa data disimpan pada berkas -untuk setiap kali pembaruan. - -Pembaruan Node untuk GCE menggunakan [grup _instance_ yang di-_manage_](https://cloud.google.com/compute/docs/instance-groups/), dimana setiap Node -dihancurkan secara berurutan dan kemudian dibuat ulang dengan perangkat lunak baru. Semua Pod yang berjalan di Node tersebut harus -dikontrol oleh pengontrol replikasi (_Replication Controller_), atau dibuat ulang secara manual setelah peluncuran. - -Pembaruan versi pada klaster open source Google Compute Engine (GCE) yang dikontrol oleh skrip `cluster/gce/upgrade.sh`. - -Dapatkan penggunaan dengan menjalankan `cluster/gce/upgrade.sh -h`. - -Misalnya, untuk meningkatkan hanya _control plane_ kamu ke versi tertentu (v1.0.2): - -```shell -cluster/gce/upgrade.sh -M v1.0.2 -``` - -Sebagai alternatif, untuk meningkatkan seluruh klaster kamu ke rilis yang stabil terbaru gunakan: - -```shell -cluster/gce/upgrade.sh release/stable -``` - -### Memperbarui klaster Google Kubernetes Engine - -Google Kubernetes Engine secara otomatis memperbarui komponen _control plane_ (misalnya, `kube-apiserver`, ` kube-scheduler`) ke versi yang terbaru. Ini juga menangani pembaruan sistem operasi dan komponen lain yang dijalankan oleh _control plane_. - -Proses pembaruan Node dimulai oleh pengguna dan dijelaskan dalam [Dokumentasi Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs/clusters/upgrade). - -### Memperbarui klaster Amazon EKS - -Komponen _control plane_ klaster pada Amazon EKS dapat diperbarui dengan menggunakan eksctl, AWS Management Console, atau AWS CLI. Prosesnya dimulai oleh pengguna dan dijelaskan di [Dokumentasi Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). - -### Memperbarui klaster Oracle Cloud Infrastructure Container Engine untuk Kubernetes (OKE) - -Oracle membuat dan mengelola sekumpulan Node _control plane_ pada _control plane_ Oracle atas nama kamu (dan infrastruktur Kubernetes terkait seperti Node etcd) untuk memastikan kamu memiliki Kubernetes _control plane_ yang terkelola dengan ketersedian tinggi. Kamu juga dapat memperbarui Node _control plane_ ini dengan mulus ke versi Kubernetes baru tanpa berhenti. Tindakan ini dijelaskan dalam [Dokumentasi OKE](https://docs.cloud.oracle.com/iaas/Content/ContEng/Tasks/contengupgradingk8smasternode.htm). - -### Memperbarui klaster pada platform yang lain - -Penyedia dan alat yang berbeda akan mengelola pembaruan secara berbeda. Kamu disarankan untuk membaca dokumentasi utama mereka terkait pembaruan. - -* [kops](https://github.com/kubernetes/kops) -* [kubespray](https://github.com/kubernetes-incubator/kubespray) -* [CoreOS Tectonic](https://coreos.com/tectonic/docs/latest/admin/upgrade.html) -* [Digital Rebar](https://provision.readthedocs.io/en/tip/doc/content-packages/krib.html) -* ... - -Untuk memperbarukan sebuah klaster pada platform yang tidak disebutkan dalam daftar di atas, periksa urutan pembaruan komponen pada -halaman [Versi Skewed](/docs/setup/release/version-skew-policy/#supported-component-upgrade-order). - -## Merubah ukuran klaster - -Jika klaster kamu kekurangan sumber daya, kamu dapat dengan mudah menambahkan lebih banyak mesin ke klaster tersebut jika klaster kamu -menjalankan [Mode Node Registrasi Sendiri](/docs/concepts/architecture/nodes/#self-registration-of-nodes). -Jika kamu menggunakan GCE atau Google Kubernetes Engine, itu dilakukan dengan mengubah ukuran grup _instance_ yang mengelola Node kamu. -Ini dapat dilakukan dengan mengubah jumlah _instance_ pada -`Compute > Compute Engine > Instance groups > your group > Edit group` -[Laman Google Cloud Console](https://console.developers.google.com) atau dengan baris perintah gcloud: - -```shell -gcloud compute instance-groups managed resize kubernetes-node-pool --size=42 --zone=$ZONE -``` - -Grup _instance_ akan menangani penempatan _image_ yang sesuai pada mesin baru dan memulainya, -sedangkan Kubelet akan mendaftarkan Node-nya ke server API agar tersedia untuk penjadwalan. -Jika kamu menurunkan skala grup _instance_, sistem akan secara acak memilih Node untuk dimatikan. - -Di lingkungan lain kamu mungkin perlu mengonfigurasi mesin sendiri dan memberi tahu Kubelet di mana server API mesin itu berjalan. - -### Merubah ukuran klaster Azure Kubernetes Service (AKS) - -Azure Kubernetes Service memungkinkan perubahan ukuran klaster yang dimulai oleh pengguna dari CLI atau -portal Azure dan dijelaskan dalam [Dokumentasi Azure AKS](https://docs.microsoft.com/en-us/azure/aks/scale-cluster). - - -### Penyekalaan otomatis klaster - -Jika kamu menggunakan GCE atau Google Kubernetes Engine, kamu dapat mengonfigurasi klaster kamu sehingga secara otomatis diskalakan berdasarkan -kebutuhan Pod. - -Seperti yang dideskripsikan dalam [Sumber daya komputasi](/id/docs/concepts/configuration/manage-resources-containers/), -pengguna dapat memesan berapa banyak CPU dan memori yang dialokasikan ke Pod. -Informasi ini digunakan oleh penjadwal Kubernetes untuk menemukan tempat menjalankan Pod. Jika -tidak ada Node yang memiliki kapasitas kosong yang cukup (atau tidak sesuai dengan persyaratan Pod yang lainnya) maka Pod -menunggu sampai beberapa Pod dihentikan atau Node baru ditambahkan. - -Penyekala otomatis klaster mencari Pod yang tidak dapat dijadwalkan dan memeriksa apakah perlu menambahkan Node baru, yang serupa -dengan Node yang lain dalam klaster untuk membantu. Jika ya, maka itu mengubah ukuran klaster agar dapat mengakomodasi Pod yang menunggu. - -Penyekala otomatis klaster juga menurunkan skala klaster jika mengetahui bahwa satu atau beberapa Node tidak diperlukan lagi untuk -periode waktu tambahan (selama 10 menit tetapi dapat berubah di masa mendatang). - -Penyekala otomatis klaster dikonfigurasikan untuk per grup _instance_ (GCE) atau kumpulan Node (Google Kubernetes Engine). - -Jika kamu menggunakan GCE, kamu dapat mengaktifkannya sambil membuat klaster dengan skrip kube-up.sh. -Untuk mengonfigurasi penyekala otomatis klaster, kamu harus menyetel tiga variabel lingkungan: - -* `KUBE_ENABLE_CLUSTER_AUTOSCALER` - mengaktifkan penyekala otomatis klaster kalau di setel menjadi _true_. -* `KUBE_AUTOSCALER_MIN_NODES` - minimal jumlah Node dalam klaster. -* `KUBE_AUTOSCALER_MAX_NODES` - maksimal jumlah Node dalam klaster. - -Contoh: - -```shell -KUBE_ENABLE_CLUSTER_AUTOSCALER=true KUBE_AUTOSCALER_MIN_NODES=3 KUBE_AUTOSCALER_MAX_NODES=10 NUM_NODES=5 ./cluster/kube-up.sh -``` - -Pada Google Kubernetes Engine, kamu mengonfigurasi penyekala otomatis klaster baik saat pembuatan atau pembaruan klaster atau saat membuat kumpulan Node tertentu -(yang ingin kamu skalakan secara otomatis) dengan meneruskan _flag_ `--enable-autoscaling`, `--min-nodes` dan `--max-nodes` -yang sesuai dengan perintah `gcloud`. - -Contoh: - -```shell -gcloud container clusters create mytestcluster --zone=us-central1-b --enable-autoscaling --min-nodes=3 --max-nodes=10 --num-nodes=5 -``` - -```shell -gcloud container clusters update mytestcluster --enable-autoscaling --min-nodes=1 --max-nodes=15 -``` - -**Penyekala otomatis klaster mengharapkan bahwa Node belum dimodifikasi secara manual (misalnya dengan menambahkan label melalui kubectl) karena properti tersebut tidak akan disebarkan ke Node baru dalam grup _instance_ yang sama.** - -Untuk detail selengkapnya tentang cara penyekala otomatis klaster memutuskan apakah, kapan dan bagaimana -melakukan penyekalaan sebuah klaster, silahkan lihat dokumentasi [FAQ](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) -dari proyek penyekala otomatis klaster. - -## Memelihara dalam Node - -Jika kamu perlu memulai ulang Node (seperti untuk pembaruan kernel, pembaruan libc, pembaruan perangkat keras, dll.) dan waktu kegagalan (_downtime_) yang -singkat, lalu ketika Kubelet memulai ulang, maka ia akan mencoba untuk memulai ulang Pod yang dijadwalkan. Jika mulai ulang membutuhkan waktu yang lebih lama -(waktu bawaan adalah 5 menit, yang dikontrol oleh `--pod-eviction-timeout` pada _controller-manager_), -maka pengontrol Node akan menghentikan Pod yang terikat ke Node yang tidak tersedia. Jika ada yang sesuai dengan -kumpulan replika (atau pengontrol replikasi), maka salinan baru dari Pod akan dimulai pada Node yang berbeda. Jadi, dalam kasus di mana semua -Pod direplikasi, pembaruan dapat dilakukan tanpa koordinasi khusus, dengan asumsi bahwa tidak semua Node akan mati pada saat yang bersamaan. - -Jika kamu ingin lebih mengontrol proses pembaruan, kamu dapat menggunakan alur kerja berikut ini: - -Gunakan `kubectl drain` untuk meghentikan perlahan-lahan semua Pod dalam Node ketika menandai Node sebagai _unschedulable_: - -```shell -kubectl drain $NODENAME -``` - -Ini mencegah Pod baru mendarat pada Node saat kamu mencoba melepaskannya. - -Untuk Pod dengan sebuah kumpulan replika, Pod tersebut akan diganti dengan Pod baru yang akan dijadwalkan ke Node baru. Selain itu, jika Pod adalah bagian dari layanan, maka klien akan secara otomatis dialihkan ke Pod baru. - -Untuk Pod yang tidak memiliki replika, kamu perlu memunculkan salinan baru dari Pod tersebut, dan menganggapnya bukan bagian dari layanan, alihkan klien ke Pod tersebut. - -Lakukan pekerjaan pemeliharaan pada Node. - -Buat Node dapat dijadwal lagi: - - -```shell -kubectl uncordon $NODENAME -``` - -Jika kamu menghapus Node dari _instance_ VM dan membuat yang baru, maka sumber daya Node baru yang dapat dijadwalkan akan -dibuat secara otomatis (jika kamu menggunakan penyedia cloud yang mendukung -pencarian Node; saat ini hanya Google Compute Engine, tidak termasuk CoreOS di Google Compute Engine menggunakan kube-register). -Lihatlah [Node](/docs/concepts/architecture/nodes/) untuk lebih detail. - -## Topik lebih lanjut - -### Mengaktifkan atau menonaktifkan versi API untuk klaster kamu - -Versi API spesifik dapat dinyalakan dan dimatikan dengan meneruskan _flag_ `--runtime-config=api/` ketika menjalankan server API. Sebagai contoh: untuk menyalakan APIv1, teruskan `--runtime-config=api/v1=false`. -_runtime-config_ juga mendukung 2 kunci khusus: api/all dan api/legacy yang masing-masing untuk mengontrol semua dan API lama. -Sebagai contoh, untuk mematikan versi API semua kecuali v1, teruskan `--runtime-config=api/all=false,api/v1=true`. -Untuk tujuan _flag_ ini, API lama adalah API yang sudah tidak digunakan lagi secara eksplisit (misalnya, `v1beta3`). - -### Mengalihkan versi API penyimpanan dari klaster kamu - -Objek yang disimpan ke diska untuk representasi internal klaster dari sumber daya Kubernetes yang aktif dalam klaster ditulis menggunakan versi API tertentu. -Saat API yang didukung berubah, objek ini mungkin perlu ditulis ulang dalam API yang lebih baru. Kegagalan melakukan ini pada akhirnya akan menghasilkan sumber daya yang tidak lagi dapat didekodekan atau digunakan -oleh server API Kubernetes. - -### Mengalihkan berkas konfigurasi kamu ke versi API baru - -Kamu dapat menggunakan perintah `kubectl convert` untuk mengubah berkas konfigurasi di antara versi API berbeda. - -```shell -kubectl convert -f pod.yaml --output-version v1 -``` - -Untuk opsi yang lainnya, silakan merujuk pada penggunaan dari perintah [kubectl convert](/docs/reference/generated/kubectl/kubectl-commands#convert). - - diff --git a/content/id/docs/tasks/administer-cluster/sysctl-cluster.md b/content/id/docs/tasks/administer-cluster/sysctl-cluster.md index 9adbb50a9f9fb..7120f087fa44f 100644 --- a/content/id/docs/tasks/administer-cluster/sysctl-cluster.md +++ b/content/id/docs/tasks/administer-cluster/sysctl-cluster.md @@ -156,7 +156,7 @@ Sangat disarankan untuk menggunakan Kubernetes [fitur _taints and toleration_](/ Pod dengan sysctl _unsafe_ akan gagal diluncurkan pada sembarang Node yang belum mengaktifkan kedua sysctl _unsafe_ secara eksplisit. Seperti halnya sysctl _node-level_ sangat disarankan untuk menggunakan [fitur _taints and toleration_](/docs/reference/generated/kubectl/kubectl-commands/#taint) atau -[pencemaran dalam Node](/docs/concepts/scheduling-eviction/taint-and-toleration/) +[pencemaran dalam Node](/id/docs/concepts/scheduling-eviction/taint-and-toleration/) untuk Pod dalam Node yang tepat. ## PodSecurityPolicy diff --git a/content/id/docs/tasks/example-task-template.md b/content/id/docs/tasks/example-task-template.md deleted file mode 100644 index a1873501f3517..0000000000000 --- a/content/id/docs/tasks/example-task-template.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Contoh Template Tugas (Task) -content_type: task -toc_hide: true ---- - - - -{{< note >}} -Pastikan juga kamu [membuat isian di daftar isi](/docs/home/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents) untuk dokumen baru kamu. -{{< /note >}} - -Halaman ini menunjukkan bagaimana ... - - - -## {{% heading "prerequisites" %}} - - -* {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -* Lakukan ini. -* Lakukan ini juga. - - - - - -## Menjalankan ... - -1. Lakukan ini. -1. Selanjutnya lakukan ini. Bila mungkin silahkan baca [penjelasan terkait](...). - - - - - -## Memahami ... -**[Bagian opsional]** - -Berikut ini hal-hal yang menarik untuk diketahui tentang langkah-langkah yang baru saja kamu lakukan. - - - -## {{% heading "whatsnext" %}} - - -**[Bagian optional]** - -* Pelajari tentang [menulis topik baru](/docs/home/contribute/write-new-topic/). -* Lihat [menggunakan _template_ halaman - _template_ tugas](/docs/home/contribute/page-templates/#task_template) untuk mengetahui cara menggunakan _template_ ini. - - - - diff --git a/content/id/docs/tasks/run-application/run-stateless-application-deployment.md b/content/id/docs/tasks/run-application/run-stateless-application-deployment.md index a069188de66b8..74e76c827be57 100644 --- a/content/id/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/id/docs/tasks/run-application/run-stateless-application-deployment.md @@ -51,7 +51,6 @@ YAML berikut mendeskripsikan sebuah Deployment yang menjalankan _image_ Docker n Keluaran dari perintah tersebut akan menyerupai: - user@computer:~/website$ kubectl describe deployment nginx-deployment Name: nginx-deployment Namespace: default CreationTimestamp: Tue, 30 Aug 2016 18:11:37 -0700 diff --git a/content/id/docs/tasks/tools/_index.md b/content/id/docs/tasks/tools/_index.md index 9bbd67d8fb1ce..8d9056c50fc41 100755 --- a/content/id/docs/tasks/tools/_index.md +++ b/content/id/docs/tasks/tools/_index.md @@ -1,5 +1,67 @@ --- title: "Menginstal Peralatan" +description: Peralatan untuk melakukan instalasi Kubernetes dalam komputer kamu. weight: 10 +no_list: true --- +## kubectl + + + +Perangkat baris perintah Kubernetes, [kubectl](/id/docs/reference/kubectl/kubectl/), +memungkinkan kamu untuk menjalankan perintah pada klaster Kubernetes. +Kamu dapat menggunakan kubectl untuk menerapkan aplikasi, memeriksa dan mengelola sumber daya klaster, +dan melihat *log* (catatan). Untuk informasi lebih lanjut termasuk daftar lengkap operasi kubectl, lihat +[referensi dokumentasi `kubectl`](/id/docs/reference/kubectl/). + +kubectl dapat diinstal pada berbagai platform Linux, macOS dan Windows. +Pilihlah sistem operasi pilihan kamu di bawah ini. + +- [Instalasi kubectl pada Linux](/en/docs/tasks/tools/install-kubectl-linux) +- [Instalasi kubectl pada macOS](/en/docs/tasks/tools/install-kubectl-macos) +- [Instalasi kubectl pada Windows](/en/docs/tasks/tools/install-kubectl-windows) + +## kind + +[`kind`](https://kind.sigs.k8s.io/docs/) memberikan kamu kemampuan untuk +menjalankan Kubernetes pada komputer lokal kamu. Perangkat ini membutuhkan +[Docker](https://docs.docker.com/get-docker/) yang sudah diinstal dan +terkonfigurasi. + +Halaman [Memulai Cepat](https://kind.sigs.k8s.io/docs/user/quick-start/) `kind` +memperlihatkan kepada kamu tentang apa yang perlu kamu lakukan untuk `kind` +berjalan dan bekerja. + +Melihat Memulai Cepat Kind + +## minikube + +Seperti halnya dengan `kind`, [`minikube`](https://minikube.sigs.k8s.io/) +merupakan perangkat yang memungkinkan kamu untuk menjalankan Kubernetes +secara lokal. `minikube` menjalankan sebuah klaster Kubernetes dengan +satu node saja dalam komputer pribadi (termasuk Windows, macOS dan Linux) +sehingga kamu dapat mencoba Kubernetes atau untuk pekerjaan pengembangan +sehari-hari. + +Kamu bisa mengikuti petunjuk resmi +[Memulai!](https://minikube.sigs.k8s.io/docs/start/) +`minikube` jika kamu ingin fokus agar perangkat ini terinstal. + +Lihat Panduan Memulai! Minikube + +Setelah kamu memiliki `minikube` yang bekerja, kamu bisa menggunakannya +untuk [menjalankan aplikasi contoh](/id/docs/tutorials/hello-minikube/). + +## kubeadm + +Kamu dapat menggunakan {{< glossary_tooltip term_id="kubeadm" text="kubeadm" >}} +untuk membuat dan mengatur klaster Kubernetes. +`kubeadm` menjalankan langkah-langkah yang diperlukan untuk mendapatkan klaster +dengan kelaikan dan keamanan minimum, aktif dan berjalan dengan cara yang mudah +bagi pengguna. + +[Instalasi kubeadm](/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) memperlihatkan tentang bagaimana melakukan instalasi kubeadm. +Setelah terinstal, kamu dapat menggunakannya untuk [membuat klaster](/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/). + +Lihat panduan instalasi kubeadm diff --git a/content/id/docs/tasks/tools/install-minikube.md b/content/id/docs/tasks/tools/install-minikube.md deleted file mode 100644 index b674c52b6d0f3..0000000000000 --- a/content/id/docs/tasks/tools/install-minikube.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: Menginstal Minikube -content_type: task -weight: 20 -card: - name: tasks - weight: 10 ---- - - - -Halaman ini menunjukkan cara instalasi [Minikube](/id/docs/tutorials/hello-minikube), sebuah alat untuk menjalankan sebuah klaster Kubernetes dengan satu Node pada mesin virtual yang ada di komputer kamu. - - - -## {{% heading "prerequisites" %}} - - -{{< tabs name="minikube_before_you_begin" >}} -{{% tab name="Linux" %}} -Untuk mengecek jika virtualisasi didukung pada Linux, jalankan perintah berikut dan pastikan keluarannya tidak kosong: -``` -grep -E --color 'vmx|svm' /proc/cpuinfo -``` -{{% /tab %}} - -{{% tab name="macOS" %}} -Untuk mengecek jika virtualisasi didukung di macOS, jalankan perintah berikut di terminal kamu. -``` -sysctl -a | grep -E --color 'machdep.cpu.features|VMX' -``` -Jika kamu melihat `VMX` pada hasil keluaran (seharusnya berwarna), artinya fitur VT-x sudah diaktifkan di mesin kamu. -{{% /tab %}} - -{{% tab name="Windows" %}} -Untuk mengecek jika virtualisasi didukung di Windows 8 ke atas, jalankan perintah berikut di terminal Windows atau _command prompt_ kamu. - -``` -systeminfo -``` -Jika kamu melihat keluaran berikut, maka virtualisasi didukung di Windows kamu. -``` -Hyper-V Requirements: VM Monitor Mode Extensions: Yes - Virtualization Enabled In Firmware: Yes - Second Level Address Translation: Yes - Data Execution Prevention Available: Yes -``` -Jika kamu melihat keluaran berikut, sistem kamu sudah memiliki sebuah Hypervisor yang terinstal dan kamu bisa melewati langkah berikutnya. -``` -Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed. -``` - - -{{% /tab %}} -{{< /tabs >}} - - - - - -## Menginstal minikube - -{{< tabs name="tab_with_md" >}} -{{% tab name="Linux" %}} - -### Instalasi kubectl - -Pastikan kamu sudah menginstal kubectl. Kamu bisa menginstal kubectl dengan mengikuti instruksi pada halaman [Menginstal dan Menyiapkan kubectl](/id/docs/tasks/tools/install-kubectl/#menginstal-kubectl-pada-linux). - -### Menginstal sebuah Hypervisor - -Jika kamu belum menginstal sebuah Hypervisor, silakan instal salah satu dari: - -• [KVM](https://www.linux-kvm.org/), yang juga menggunakan QEMU - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -Minikube juga mendukung sebuah opsi `--driver=none` untuk menjalankan komponen-komponen Kubernetes pada _host_, bukan di dalam VM. Untuk menggunakan _driver_ ini maka diperlukan [Docker](https://www.docker.com/products/docker-desktop) dan sebuah lingkungan Linux, bukan sebuah hypervisor. - -Jika kamu menggunakan _driver_ `none` pada Debian atau turunannya, gunakan paket (_package_) `.deb` untuk Docker daripada menggunakan paket _snap_-nya, karena paket _snap_ tidak berfungsi dengan Minikube. -Kamu bisa mengunduh paket `.deb` dari [Docker](https://www.docker.com/products/docker-desktop). - -{{< caution >}} -*Driver* VM `none` dapat menyebabkan masalah pada keamanan dan kehilangan data. Sebelum menggunakan opsi `--driver=none`, periksa [dokumentasi ini](https://minikube.sigs.k8s.io/docs/reference/drivers/none/) untuk informasi lebih lanjut. -{{< /caution >}} - -Minikube juga mendukung opsi `vm-driver=podman` yang mirip dengan _driver_ Docker. Podman yang berjalan dengan hak istimewa _superuser_ (pengguna _root_) adalah cara terbaik untuk memastikan kontainer-kontainer kamu memiliki akses penuh ke semua fitur yang ada pada sistem kamu. - -{{< caution >}} -_Driver_ `podman` memerlukan kontainer yang berjalan dengan akses _root_ karena akun pengguna biasa tidak memiliki akses penuh ke semua fitur sistem operasi yang mungkin diperlukan oleh kontainer. -{{< /caution >}} - -### Menginstal Minikube menggunakan sebuah paket - -Tersedia paket uji coba untuk Minikube, kamu bisa menemukan paket untuk Linux (AMD64) di laman [rilisnya](https://github.com/kubernetes/minikube/releases) Minikube di GitHub. - -Gunakan alat instalasi paket pada distribusi Linux kamu untuk menginstal paket yang sesuai. - -### Menginstal Minikube melalui pengunduhan langsung - -Jika kamu tidak menginstal melalui sebuah paket, kamu bisa mengunduh sebuah _stand-alone binary_ dan menggunakannya. - - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ - && chmod +x minikube -``` - -Berikut adalah cara mudah untuk menambahkan program Minikube ke _path_ kamu. - -```shell -sudo mkdir -p /usr/local/bin/ -sudo install minikube /usr/local/bin/ -``` - -### Menginstal Minikube menggunakan Homebrew - -Sebagai alternatif, kamu bisa menginstal Minikube menggunakan Linux [Homebrew](https://docs.brew.sh/Homebrew-on-Linux): - -```shell -brew install minikube -``` - -{{% /tab %}} -{{% tab name="macOS" %}} -### Instalasi kubectl - -Pastikan kamu sudah menginstal kubectl. Kamu bisa menginstal kubectl dengan mengikuti instruksi pada halaman [Menginstal dan Menyiapkan kubectl](/id/docs/tasks/tools/install-kubectl/#menginstal-kubectl-pada-macos). - -### Instalasi sebuah Hypervisor - -Jika kamu belum menginstal sebuah Hypervisor, silakan instal salah satu dari: - -• [HyperKit](https://github.com/moby/hyperkit) - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -• [VMware Fusion](https://www.vmware.com/products/fusion) - -### Instalasi Minikube -Cara paling mudah untuk menginstal Minikube pada macOS adalah menggunakan [Homebrew](https://brew.sh): - -```shell -brew install minikube -``` - -Kamu juga bisa menginstalnya dengan mengunduh _stand-alone binary_-nya: - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 \ - && chmod +x minikube -``` - -Berikut adalah cara mudah untuk menambahkan program Minikube ke _path_ kamu. - -```shell -sudo mv minikube /usr/local/bin -``` - -{{% /tab %}} -{{% tab name="Windows" %}} -### Instalasi kubectl - -Pastikan kamu sudah menginstal kubectl. Kamu bisa menginstal kubectl dengan mengikuti instruksi pada halaman [Menginstal dan Menyiapkan kubectl](/id/docs/tasks/tools/install-kubectl/#menginstal-kubectl-pada-windows). - -### Menginstal sebuah Hypervisor - -Jika kamu belum menginstal sebuah Hypervisor, silakan instal salah satu dari: - -• [Hyper-V](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -{{< note >}} -Hyper-V hanya dapat berjalan pada tiga versi dari Windows 10: Windows 10 Enterprise, Windows 10 Professional, dan Windows 10 Education. -{{< /note >}} - -### Menginstal Minikube menggunakan Chocolatey - -Cara paling mudah untuk menginstal Minikube pada Windows adalah menggunakan [Chocolatey](https://chocolatey.org/) (jalankan sebagai administrator): - -```shell -choco install minikube -``` - -Setelah Minikube telah selesai diinstal, tutup sesi CLI dan hidupkan ulang CLI-nya. Minikube akan ditambahkan ke _path_ kamu secara otomatis. - -### Menginstal Minikube menggunakan sebuah program penginstal - -Untuk menginstal Minikube secara manual pada Windows menggunakan [Windows Installer](https://docs.microsoft.com/en-us/windows/desktop/msi/windows-installer-portal), unduh [`minikube-installer.exe`](https://github.com/kubernetes/minikube/releases/latest/download/minikube-installer.exe) dan jalankan program penginstal tersebut. - -### Menginstal Minikube melalui pengunduhan langsung - -Untuk menginstal Minikube secara manual pada Windows, unduh [`minikube-windows-amd64`](https://github.com/kubernetes/minikube/releases/latest), ubah nama menjadi `minikube.exe`, dan tambahkan ke _path_ kamu. - -{{% /tab %}} -{{< /tabs >}} - - -## Memastikan instalasi - -Untuk memastikan keberhasilan kedua instalasi hypervisor dan Minikube, kamu bisa menjalankan perintah berikut untuk memulai sebuah klaster Kubernetes lokal: -{{< note >}} - -Untuk pengaturan `--driver` dengan `minikube start`, masukkan nama hypervisor `` yang kamu instal dengan huruf kecil seperti yang ditunjukan dibawah. Daftar lengkap nilai `--driver` tersedia di [dokumentasi menentukan *driver* VM](/docs/setup/learning-environment/minikube/#specifying-the-vm-driver). - -{{< /note >}} - -```shell -minikube start --driver= -``` - -Setelah `minikube start` selesai, jalankan perintah di bawah untuk mengecek status klaster: - -```shell -minikube status -``` - -Jika klasternya berjalan, keluaran dari `minikube status` akan mirip seperti ini: - -``` -host: Running -kubelet: Running -apiserver: Running -kubeconfig: Configured -``` - -Setelah kamu memastikan bahwa Minikube berjalan sesuai dengan hypervisor yang telah kamu pilih, kamu dapat melanjutkan untuk menggunakan Minikube atau menghentikan klaster kamu. Untuk menghentikan klaster, jalankan: - -```shell -minikube stop -``` - -## Membersihkan *state* lokal {#cleanup-local-state} - -Jika sebelumnya kamu pernah menginstal Minikube, dan menjalankan: -```shell -minikube start -``` - -dan `minikube start` memberikan pesan kesalahan: -``` -machine does not exist -``` - -maka kamu perlu membersihkan _state_ lokal Minikube: -```shell -minikube delete -``` - -## {{% heading "whatsnext" %}} - - -* [Menjalanakan Kubernetes secara lokal dengan Minikube](/docs/setup/learning-environment/minikube/) diff --git a/content/id/docs/tasks/tools/kubeadm/_index.md b/content/id/docs/tasks/tools/kubeadm/_index.md deleted file mode 100644 index e342c2da513aa..0000000000000 --- a/content/id/docs/tasks/tools/kubeadm/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Membangun klaster menggunakan kubeadm" -weight: 10 ---- diff --git a/content/id/docs/templates/feature-state-alpha.txt b/content/id/docs/templates/feature-state-alpha.txt deleted file mode 100644 index 35689778fa273..0000000000000 --- a/content/id/docs/templates/feature-state-alpha.txt +++ /dev/null @@ -1,7 +0,0 @@ -Fitur ini berada di dalam tingkatan *Alpha*, yang artinya: - -* Nama dari versi ini mengandung string `alpha` (misalnya, `v1alpha1`). -* Bisa jadi terdapat *bug*. Secara *default* fitur ini tidak diekspos. -* Ketersediaan untuk fitur yang ada bisa saja dihilangkan pada suatu waktu tanpa pemberitahuan sebelumnya. -* API yang ada mungkin saja berubah tanpa memperhatikan kompatibilitas dengan versi perangkat lunak sebelumnya. -* Hanya direkomendasikan untuk klaster yang digunakan untuk tujuan *testing*. diff --git a/content/id/docs/templates/feature-state-beta.txt b/content/id/docs/templates/feature-state-beta.txt deleted file mode 100644 index a70034e0560f6..0000000000000 --- a/content/id/docs/templates/feature-state-beta.txt +++ /dev/null @@ -1,10 +0,0 @@ -Fitur ini berada dalam tingkatan beta, yang artinya: - -* Nama dari versi ini mengandung string `beta` (misalnya `v2beta3`). -* Kode yang ada sudah melalui mekanisme *testing* yang cukup baik. Menggunakan fitur ini dianggap cukup aman. Fitur ini diekspos secara *default*. -* Ketersediaan untuk fitur secara menyeluruh tidak akan dihapus, meskipun begitu detail untuk suatu fitur bisa saja berubah. -* Skema dan/atau semantik dari suatu obyek mungkin saja berubah tanpa memerhatikan kompatibilitas pada rilis *beta* selanjutnya. - Jika hal ini terjadi, kami akan menyediakan suatu instruksi untuk melakukan migrasi di versi rilis selanjutnya. Hal ini bisa saja terdiri dari penghapusan, pengubahan, ataupun pembuatan - obyek API. Proses pengubahan mungkin saja membutuhkan pemikiran yang matang. Dampak proses ini bisa saja menyebabkan *downtime* aplikasi yang bergantung pada fitur ini. -* **Kami mohon untuk mencoba versi *beta* yang kami sediakan dan berikan masukan terhadap fitur yang kamu pakai! Apabila fitur tersebut sudah tidak lagi berada di dalam tingkatan *beta* perubahan yang kami buat terhadap fitur tersebut bisa jadi tidak lagi dapat digunakan** - diff --git a/content/id/docs/templates/feature-state-deprecated.txt b/content/id/docs/templates/feature-state-deprecated.txt deleted file mode 100644 index 599fe098cde2c..0000000000000 --- a/content/id/docs/templates/feature-state-deprecated.txt +++ /dev/null @@ -1,2 +0,0 @@ - -Fitur ini *deprecated*. Untuk informasi lebih lanjut mengenai tingkatan ini, silahkan merujuk pada [Kubernetes Deprecation Policy](/docs/reference/deprecation-policy/) diff --git a/content/id/docs/templates/feaure-state-stable.txt b/content/id/docs/templates/feaure-state-stable.txt deleted file mode 100644 index ee4e17373fd6f..0000000000000 --- a/content/id/docs/templates/feaure-state-stable.txt +++ /dev/null @@ -1,4 +0,0 @@ -Fitur ini berada di dalam tingkatan stabil, yang artinya: - -* Versi ini mengandung string `vX` dimana `X` merupakan bilangan bulat. -* Fitur yang ada pada tingkatan ini akan selalu muncul di rilis berikutnya. diff --git a/content/id/docs/tutorials/hello-minikube.md b/content/id/docs/tutorials/hello-minikube.md index faba283d89bdf..398c5a3a3f8d9 100644 --- a/content/id/docs/tutorials/hello-minikube.md +++ b/content/id/docs/tutorials/hello-minikube.md @@ -15,11 +15,11 @@ card: -Tutorial ini menunjukkan bagaimana caranya menjalankan aplikasi sederhana Node.js Halo Dunia di Kubernetes, dengan [Minikube](/docs/getting-started-guides/minikube) dan Katacoda. +Tutorial ini menunjukkan bagaimana caranya menjalankan aplikasi sederhana Node.js Halo Dunia di Kubernetes, dengan [`minikube`](/docs/getting-started-guides/minikube) dan Katacoda. Katacoda menyediakan environment Kubernetes secara gratis di dalam browser. {{< note >}} -Kamupun bisa mengikuti tutorial ini kalau sudah instalasi [Minikube di lokal](/id/docs/tasks/tools/install-minikube/) kamu. +Kamupun bisa mengikuti tutorial ini kalau sudah instalasi minikube di lokal. Silakan lihat [memulai `minikube`](https://minikube.sigs.k8s.io/docs/start/) untuk instruksi instalasi. {{< /note >}} @@ -27,7 +27,7 @@ Kamupun bisa mengikuti tutorial ini kalau sudah instalasi [Minikube di lokal](/i ## {{% heading "objectives" %}} -* Deploy aplikasi halo dunia pada Minikube. +* Deploy aplikasi halo dunia pada minikube. * Jalankan aplikasinya. * Melihat log aplikasi. @@ -54,7 +54,7 @@ Untuk info lebih lanjut tentang perintah `docker build`, baca [dokumentasi Docke {{< kat-button >}} - {{< note >}}Kalau kamu memilih instalasi Minikube secara lokal, jalankan `minikube start`.{{< /note >}} + {{< note >}}Kalau kamu memilih instalasi minikube secara lokal, jalankan `minikube start`.{{< /note >}} 2. Buka dasbor Kubernetes di dalam browser: @@ -147,7 +147,7 @@ Supaya Kontainer `hello-node` bisa diakses dari luar jaringan virtual Kubernetes ``` Untuk penyedia cloud yang memiliki load balancer, sebuah alamat IP eksternal akan disediakan untuk mengakses Servis tersebut. - Pada Minikube, tipe `LoadBalancer` membuat Servis tersebut dapat diakses melalui perintah `minikube service`. + Pada minikube, tipe `LoadBalancer` membuat Servis tersebut dapat diakses melalui perintah `minikube service`. 3. Jalankan perintah berikut: @@ -163,7 +163,7 @@ Supaya Kontainer `hello-node` bisa diakses dari luar jaringan virtual Kubernetes ## Aktifkan addons -Minikube punya beberapa addons yang bisa diaktifkan, dinon-aktifkan, maupun dibuka di dalam environment Kubernetes lokal. +Perangkat minikube meliputi sekumpulan {{< glossary_tooltip text="addons" term_id="addons" >}} bawaan yang bisa diaktifkan, dinonaktifkan, maupun dibuka di dalam environment Kubernetes lokal. 1. Daftar addons yang ada saat ini: @@ -249,13 +249,13 @@ kubectl delete service hello-node kubectl delete deployment hello-node ``` -Kamu juga boleh mematikan mesin virtual (VM) untuk Minikube: +Kamu juga boleh mematikan mesin virtual atau _virtual machine_ (VM) untuk minikube: ```shell minikube stop ``` -Kamu juga boleh menghapus Minikube VM: +Kamu juga boleh menghapus minikube VM: ```shell minikube delete diff --git a/content/id/examples/application/job/cronjob.yaml b/content/id/examples/application/job/cronjob.yaml index c9d38930278c3..34ab2a3f06007 100644 --- a/content/id/examples/application/job/cronjob.yaml +++ b/content/id/examples/application/job/cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: hello @@ -11,7 +11,7 @@ spec: containers: - name: hello image: busybox - args: + command: - /bin/sh - -c - date; echo Hello from the Kubernetes cluster diff --git a/content/id/includes/task-tutorial-prereqs.md b/content/id/includes/task-tutorial-prereqs.md index 3c8abb5b091ae..5afea1cb95068 100644 --- a/content/id/includes/task-tutorial-prereqs.md +++ b/content/id/includes/task-tutorial-prereqs.md @@ -1,8 +1,8 @@ -Kamu harus memiliki klaster Kubernetes, dan perangkat baris perintah `kubectl` -juga harus dikonfigurasikan untuk berkomunikasi dengan klaster kamu. Jika kamu -belum punya klaster, kamu dapat membuatnya dengan menggunakan -[Minikube](/docs/setup/learning-environment/minikube/), -atau kamu dapat menggunakan salah satu tempat bermain Kubernetes ini: +Kamu harus memiliki klaster Kubernetes, dan perangkat baris perintah kubectl +juga harus dikonfigurasikan untuk berkomunikasi dengan klastermu. Jika kamu +belum memiliki klaster, kamu dapat membuatnya dengan menggunakan +[minikube](/id/docs/tasks/tools/#minikube), +atau kamu juga dapat menggunakan salah satu dari tempat mencoba Kubernetes berikut ini: * [Katacoda](https://www.katacoda.com/courses/kubernetes/playground) * [Bermain dengan Kubernetes](http://labs.play-with-k8s.com/) diff --git a/content/id/docs/search.md b/content/id/search.md similarity index 100% rename from content/id/docs/search.md rename to content/id/search.md diff --git a/content/it/docs/concepts/containers/container-lifecycle-hooks.md b/content/it/docs/concepts/containers/container-lifecycle-hooks.md new file mode 100644 index 0000000000000..59140ec333be7 --- /dev/null +++ b/content/it/docs/concepts/containers/container-lifecycle-hooks.md @@ -0,0 +1,128 @@ +--- +title: Container Lifecycle Hooks +content_type: concept +weight: 30 +--- + + +Questa pagina descrive come i Container gestiti con kubelet possono utilizzare il lifecycle +hook framework dei Container per l'esecuzione di codice eseguito in corrispondenza di alcuni +eventi durante il loro ciclo di vita. + + + +## Overview + +Analogamente a molti framework di linguaggi di programmazione che hanno degli hooks legati al ciclo di +vita dei componenti, come ad esempio Angular, Kubernetes fornisce ai Container degli hook legati al loro ciclo di +vita dei Container. +Gli hook consentono ai Container di essere consapevoli degli eventi durante il loro ciclo di +gestione ed eseguire del codice implementato in un handler quando il corrispondente hook viene +eseguito. + +## Container hooks + +Esistono due tipi di hook che vengono esposti ai Container: + +`PostStart` + +Questo hook viene eseguito successivamente alla creazione del container. +Tuttavia, non vi è garanzia che questo hook venga eseguito prima dell'ENTRYPOINT del container. +Non vengono passati parametri all'handler. + +`PreStop` + +Questo hook viene eseguito prima della terminazione di un container a causa di una richiesta API o +di un evento di gestione, come ad esempio un fallimento delle sonde di liveness/startup, preemption, +risorse contese e altro. Una chiamata all'hook di `PreStop` fallisce se il container è in stato +terminated o completed e l'hook deve finire prima che possa essere inviato il segnale di TERM per +fermare il container. Il conto alla rovescia per la terminazione del Pod (grace period) inizia prima dell'esecuzione +dell'hook `PreStop`, quindi indipendentemente dall'esito dell'handler, il container terminerà entro +il grace period impostato. Non vengono passati parametri all'handler. + +Una descrizione più dettagliata riguardante al processo di terminazione dei Pod può essere trovata in +[Terminazione dei Pod](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination). + +### Implementazione degli hook handler + +I Container possono accedere a un hook implementando e registrando un handler per tale hook. +Ci sono due tipi di handler che possono essere implementati per i Container: + +* Exec - Esegue un comando specifico, tipo `pre-stop.sh`, all'interno dei cgroup e namespace del Container. +Le risorse consumate dal comando vengono contate sul Container. +* HTTP - Esegue una richiesta HTTP verso un endpoint specifico del Container. + +### Esecuzione dell'hook handler + +Quando viene richiamato l'hook legato al lifecycle del Container, il sistema di gestione di Kubernetes +esegue l'handler secondo l'azione dell'hook, `httpGet` e `tcpSocket` vengono eseguiti dal processo kubelet, +mentre `exec` è eseguito nel Container. + +Le chiamate agli handler degli hook sono sincrone rispetto al contesto del Pod che contiene il Container. +Questo significa che per un hook `PostStart`, l'ENTRYPOINT e l'hook si attivano in modo asincrono. +Tuttavia, se l'hook impiega troppo tempo per essere eseguito o si blocca, il container non può raggiungere lo +stato di `running`. + +Gli hook di `PreStop` non vengono eseguiti in modo asincrono dall'evento di stop del container; l'hook +deve completare la sua esecuzione prima che l'evento TERM possa essere inviato. Se un hook di `PreStop` +si blocca durante la sua esecuzione, la fase del Pod rimarrà `Terminating` finchè il Pod non sarà rimosso forzatamente +dopo la scadenza del suo `terminationGracePeriodSeconds`. Questo grace period si applica al tempo totale +necessario per effettuare sia l'esecuzione dell'hook di `PreStop` che per l'arresto normale del container. +Se, per esempio, il `terminationGracePeriodSeconds` è di 60, e l'hook impiega 55 secondi per essere completato, +e il container impiega 10 secondi per fermarsi normalmente dopo aver ricevuto il segnale, allora il container +verrà terminato prima di poter completare il suo arresto, poiché `terminationGracePeriodSeconds` è inferiore al tempo +totale (55+10) necessario perché queste due cose accadano. + +Se un hook `PostStart` o `PreStop` fallisce, allora il container viene terminato. + +Gli utenti dovrebbero mantenere i loro handler degli hook i più leggeri possibili. +Ci sono casi, tuttavia, in cui i comandi di lunga durata hanno senso, +come il salvataggio dello stato del container prima della sua fine. + +### Garanzia della chiamata dell'hook + +La chiamata degli hook avviene *almeno una volta*, il che significa +che un hook può essere chiamato più volte da un dato evento, come per `PostStart` +o `PreStop`. +Sta all'implementazione dell'hook gestire correttamente questo aspetto. + +Generalmente, vengono effettuate singole chiamate agli hook. +Se, per esempio, la destinazione di hook HTTP non è momentaneamente in grado di ricevere traffico, +non c'è alcun tentativo di re invio. +In alcuni rari casi, tuttavia, può verificarsi una doppia chiamata. +Per esempio, se un kubelet si riavvia nel mentre dell'invio di un hook, questo potrebbe essere +chiamato per una seconda volta dopo che il kubelet è tornato in funzione. + +### Debugging Hook handlers + +I log di un handler di hook non sono esposti negli eventi del Pod. +Se un handler fallisce per qualche ragione, trasmette un evento. +Per il `PostStart`, questo è l'evento di `FailedPostStartHook`, +e per il `PreStop`, questo è l'evento di `FailedPreStopHook`. +Puoi vedere questi eventi eseguendo `kubectl describe pod `. +Ecco alcuni esempi di output di eventi dall'esecuzione di questo comando: + +``` +Events: + FirstSeen LastSeen Count From SubObjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 1m 1m 1 {default-scheduler } Normal Scheduled Successfully assigned test-1730497541-cq1d2 to gke-test-cluster-default-pool-a07e5d30-siqd + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Pulling pulling image "test:1.0" + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Created Created container with docker id 5c6a256a2567; Security:[seccomp=unconfined] + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Pulled Successfully pulled image "test:1.0" + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Started Started container with docker id 5c6a256a2567 + 38s 38s 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Killing Killing container with docker id 5c6a256a2567: PostStart handler: Error executing in Docker Container: 1 + 37s 37s 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Killing Killing container with docker id 8df9fdfd7054: PostStart handler: Error executing in Docker Container: 1 + 38s 37s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "main" with RunContainerError: "PostStart handler: Error executing in Docker Container: 1" + 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook +``` + + + +## {{% heading "whatsnext" %}} + + +* Approfondisci [Container environment](/docs/concepts/containers/container-environment/). +* Esegui un tutorial su come + [definire degli handlers per i Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). + diff --git a/content/it/docs/concepts/containers/images.md b/content/it/docs/concepts/containers/images.md new file mode 100644 index 0000000000000..43fe439ee0b88 --- /dev/null +++ b/content/it/docs/concepts/containers/images.md @@ -0,0 +1,316 @@ +--- +title: Immagini +content_type: concept +weight: 10 +--- + + + +L'immagine di un container rappresenta dati binari che incapsulano un'applicazione e +tutte le sue dipendenze software. Le immagini sono costituite da pacchetti software +eseguibili che possono essere avviati in modalità standalone e su cui si possono fare +ipotesi ben precise circa l'ambiente in cui vengono eseguiti. + +Tipicamente viene creata un'immagine di un'applicazione ed effettuato il _push_ +su un registry (un repository pubblico di immagini) prima di poterne fare riferimento esplicito in un +{{< glossary_tooltip text="Pod" term_id="pod" >}} + +Questa pagina va a delineare nello specifico il concetto di immagine di un container. + + + +## I nomi delle immagini + +Alle immagini dei container vengono normalmente attribuiti nomi come `pause`, `example/mycontainer`, o `kube-apiserver`. +Le immagini possono anche contenere l'hostname del registry in cui le immagini sono pubblicate; +ad esempio: `registro.fittizio.esempio/nomeimmagine`, +ed è possibile che sia incluso nel nome anche il numero della porta; ad esempio: `registro.fittizio.esempio:10443/nomeimmagine`. + +Se non si specifica l'hostname di un registry, Kubernetes assume che ci si riferisca al registry pubblico di Docker. + +Dopo la parte relativa al nome dell'immagine si può aggiungere un _tag_ (come comunemente avviene per comandi come `docker` e `podman`). +I tag permettono l'identificazione di differenti versioni della stessa serie di immagini. + +I tag delle immagini sono composti da lettere minuscole e maiuscole, numeri, underscore (`_`), +punti (`.`), e trattini (`-`). +Esistono regole aggiuntive relative a dove i caratteri separatori (`_`, `-`, and `.`) +possano essere inseriti nel tag di un'immagine. +Se non si specifica un tag, Kubernetes assume il tag `latest` che va a definire l'immagine disponibile più recente. + +{{< caution >}} +Evitate di utilizzare il tag `latest` quando si rilasciano dei container in produzione, +in quanto risulta difficile tracciare quale versione dell'immagine sia stata avviata e persino più difficile +effettuare un rollback ad una versione precente. + +Invece, meglio specificare un tag specifico come ad esempio `v1.42.0`. +{{< /caution >}} + +## Aggiornamento delle immagini + +Quando un {{< glossary_tooltip text="Deployment" term_id="deployment" >}}, +{{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}, Pod, o qualsiasi altro +oggetto che includa un Pod template viene creato per la prima volta, la policy di default per il pull di tutti i container nel Pod +è impostata su `IfNotPresent` (se non presente) se non specificato diversamente. +Questa policy permette al +{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} di evitare di fare il pull +di un'immagine se questa è già presente. + +Se necessario, si può forzare il pull in ogni occasione in uno dei seguenti modi: + +- impostando `imagePullPolicy` (specifica per il pull delle immagini) del container su `Always` (sempre). +- omettendo `imagePullPolicy` ed usando il tag `:latest` (più recente) per l'immagine da utilizzare; + Kubernetes imposterà la policy su `Always` (sempre). +- omettendo `imagePullPolicy` ed il tag per l'immagine da utilizzare. +- abilitando l'admission controller [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages). + +{{< note >}} +Il valore dell'impostazione `imagePullPolicy` del container è sempre presente quando l'oggetto viene creato per la prima volta +e non viene aggiornato se il tag dell'immagine dovesse cambiare successivamente. + +Ad esempio, creando un Deployment con un'immagine il cui tag _non_ è +`:latest`, e successivamente aggiornando il tag di quell'immagine a `:latest`, il campo + `imagePullPolicy` _non_ cambierà su `Always`. +È necessario modificare manualmente la policy di pull di ogni oggetto dopo la sua creazione. +{{< /note >}} + +Quando `imagePullPolicy` è definito senza un valore specifico, esso è impostato su `Always`. + +## Multi-architecture support nelle immagini + +Oltre a fornire immagini binarie, un _container registry_ può fornire un [indice delle immagini disponibili per un container](https://github.com/opencontainers/image-spec/blob/master/image-index.md). +L'indice di un'immagine può puntare a più [file manifest](https://github.com/opencontainers/image-spec/blob/master/manifest.md) ciascuno per una versione specifica dell'architettura di un container. +L'idea è che si può avere un unico nome per una stessa immagine (ad esempio: `pause`, `example/mycontainer`, `kube-apiserver`) e permettere a diversi sistemi di recuperare l'immagine binaria corretta a seconda dell'architettura della macchina che la sta utilizzando. + + +Kubernetes stesso tipicamente nomina le immagini dei container tramite il suffisso `-$(ARCH)`. +Per la garantire la retrocompatibilità è meglio generare le vecchie immagini con dei suffissi. +L'idea è quella di generare, ad esempio, l'immagine `pause` con un manifest che include tutte le architetture supportate, +affiancata, ad esempio, da `pause-amd64` che è retrocompatibile per le vecchie configurazioni o per quei file YAML +in cui sono specificate le immagini con i suffissi. + +## Utilizzare un private registry + +I private registry possono richiedere l'utilizzo di chiavi per accedere alle immagini in essi contenute. +Le credenziali possono essere fornite in molti modi: + - configurando i nodi in modo tale da autenticarsi al private registry + - tutti i pod possono acquisire informazioni da qualsiasi private registry configurato + - è necessario che l'amministratore del cluster configuri i nodi in tal senso + - tramite pre-pulled images (immagini pre-caricate sui nodi) + - tutti i pod possono accedere alle immagini salvate sulla cache del nodo a cui si riferiscono + - è necessario effettuare l'accesso come root di sistema su ogni nodo per inserire questa impostazione + - specificando _ImagePullSecrets_ su un determinato pod + - solo i pod che forniscono le proprie chiavi hanno la possibilità di accedere al private registry + - tramite estensioni locali o specifiche di un _Vendor_ + - se si sta utilizzando una configurazione personalizzata del nodo oppure se manualmente, o tramite il _cloud provider_, + si implementa un meccanismo di autenticazione del nodo presso il _container registry_. + +Di seguito la spiegazione dettagliata di queste opzioni. + +### Configurazione dei nodi per l'autenticazione ad un private registry + +Se si sta utilizzando Docker sui nodi, si può configurare il _Docker container runtime_ +per autenticare il nodo presso un private container registry. + +Questo è un approccio possibile se si ha il controllo sulle configurazioni del nodo. + +{{< note >}} +Kubernetes di default supporta solo le sezioni `auths` e `HttpHeaders` nelle configurazioni relative a Docker. +Eventuali _helper_ per le credenziali di Docker (`credHelpers` o `credsStore`) non sono supportati. +{{< /note >}} + + +Docker salva le chiavi per i registri privati in `$HOME/.dockercfg` oppure nel file `$HOME/.docker/config.json`. +Inserendo lo stesso file nella lista seguente, kubelet lo utilizzerà per recuperare le credenziali quando deve fare il _pull_ delle immagini. + +* `{--root-dir:-/var/lib/kubelet}/config.json` +* `{cwd of kubelet}/config.json` +* `${HOME}/.docker/config.json` +* `/.docker/config.json` +* `{--root-dir:-/var/lib/kubelet}/.dockercfg` +* `{cwd of kubelet}/.dockercfg` +* `${HOME}/.dockercfg` +* `/.dockercfg` + +{{< note >}} +Potrebbe essere necessario impostare `HOME=/root` esplicitamente come variabile d'ambiente del processo _kubelet_. +{{< /note >}} + +Di seguito i passi consigliati per configurare l'utilizzo di un private registry da parte dei nodi del _cluster_. +In questo esempio, eseguire i seguenti comandi sul proprio desktop/laptop: + + 1. Esegui `docker login [server]` per ogni _set_ di credenziali che vuoi utilizzare. Questo comando aggiornerà `$HOME/.docker/config.json` sul tuo PC. + 1. Controlla il file `$HOME/.docker/config.json` in un editor di testo per assicurarti che contenga le credenziali che tu voglia utilizzare. + 1. Recupera la lista dei tuoi nodi; ad esempio: + - se vuoi utilizzare i nomi: `nodes=$( kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}' )` + - se vuoi recuperare gli indirizzi IP: `nodes=$( kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}' )` + 1. Copia il tuo file locale `.docker/config.json` in uno dei path sopra riportati nella lista di ricerca. + - ad esempio, per testare il tutto: `for n in $nodes; do scp ~/.docker/config.json root@"$n":/var/lib/kubelet/config.json; done` + +{{< note >}} +Per i cluster di produzione, utilizza un configuration management tool per poter applicare le impostazioni su tutti i nodi laddove necessario. +{{< /note >}} + +Puoi fare una verifica creando un Pod che faccia uso di un'immagine privata; ad esempio: + +```shell +kubectl apply -f - <}} +Questo approccio è possibile se si ha il controllo sulla configurazione del nodo. +Non funzionerà qualora il cloud provider gestisca i nodi e li sostituisca automaticamente. +{{< /note >}} + +Kubelet di default prova a fare il pull di ogni immagine dal registry specificato. +Tuttavia, qualora la proprietà `imagePullPolicy` (specifica di pull dell'immagine) del container sia impostata su `IfNotPresent` (vale a dire, se non è già presente) oppure su `Never` (mai), +allora l'immagine locale è utilizzata (in via preferenziale o esclusiva, rispettivamente). + +Se si vuole fare affidamento a immagini pre-scaricate per non dover incorrere in una fase di autenticazione presso il registry, +bisogna assicurarsi che tutti i nodi nel cluster abbiano scaricato le stesse versioni delle immagini. + +Questa procedura può essere utilizzata per accelerare il processo di creazione delle istanze o come alternativa all'autenticazione presso un private registry. + +Tutti i pod avranno accesso in lettura a qualsiasi immagine pre-scaricata. + +### Specificare la proprietà imagePullSecrets su un Pod + +{{< note >}} +Questo approccio è quello consigliato per l'avvio di container a partire da immagini presenti in registri privati. +{{< /note >}} + +Kubernetes da la possibilità di specificare le chiavi del _container registry_ su un Pod. + +#### Creare un Secret tramite Docker config + +Esegui il comando seguente, sostituendo i valori riportati in maiuscolo con quelli corretti: + +```shell +kubectl create secret docker-registry --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL +``` + +Se possiedi il file delle credenziali per Docker, anziché utilizzare il comando quì sopra +puoi importare il file di credenziali come un Kubernetes +{{< glossary_tooltip text="Secrets" term_id="secret" >}}. +[Creare un Secret a partire da credenziali Docker](/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials) fornisce la spiegazione dettagliata su come fare. + +Ciò è particolarmente utile se si utilizzano più _container registry_ privati, +in quanto il comando `kubectl create secret docker-registry` genera un Secret che +funziona con un solo private registry. + +{{< note >}} +I Pod possono fare riferimento ai Secret per il pull delle immagini soltanto nel proprio _namespace_, +quindi questo procedimento deve essere svolto per ogni _namespace_. +{{< /note >}} + +#### Fare riferimento ad imagePullSecrets in un Pod + +È possibile creare pod che referenzino quel Secret aggiungendo la sezione `imagePullSecrets` alla definizione del Pod. + +Ad esempio: + +```shell +cat < pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: foo + namespace: awesomeapps +spec: + containers: + - name: foo + image: janedoe/awesomeapp:v1 + imagePullSecrets: + - name: myregistrykey +EOF + +cat <> ./kustomization.yaml +resources: +- pod.yaml +EOF +``` + +Questo deve esser fatto per ogni Pod che utilizzi un private registry. + +Comunque, le impostazioni relative a questo campo possono essere automatizzate inserendo la sezione _imagePullSecrets_ +nella definizione della risorsa [ServiceAccount](/docs/tasks/configure-pod-container/configure-service-account/). + +Visitare la pagina [Aggiungere ImagePullSecrets ad un Service Account](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) per istruzioni più dettagliate. + +Puoi utilizzarlo in congiunzione al file `.docker/config.json` configurato per ogni nodo. In questo caso, si applicherà un _merge_ delle credenziali. + +## Casi d'uso + +Ci sono varie soluzioni per configurare i private registry. Di seguito, alcuni casi d'uso comuni e le soluzioni suggerite. + +1. Cluster in cui sono utilizzate soltanto immagini non proprietarie (ovvero _open-source_). In questo caso non sussiste il bisogno di nascondere le immagini. + - Utilizza immagini pubbliche da Docker hub. + - Nessuna configurazione richiesta. + - Alcuni _cloud provider_ mettono in _cache_ o effettuano il _mirror_ di immagini pubbliche, il che migliora la disponibilità delle immagini e ne riduce il tempo di _pull_. +1. Cluster con container avviati a partire da immagini proprietarie che dovrebbero essere nascoste a chi è esterno all'organizzazione, ma + visibili a tutti gli utenti abilitati nel cluster. + - Utilizza un private [Docker registry](https://docs.docker.com/registry/). + - Esso può essere ospitato da [Docker Hub](https://hub.docker.com/signup), o da qualche altra piattaforma. + - Configura manualmente il file .docker/config.json su ogni nodo come descritto sopra. + - Oppure, avvia un private registry dietro il tuo firewall con accesso in lettura libero. + - Non è necessaria alcuna configurazione di Kubernetes. + - Utilizza un servizio di _container registry_ che controlli l'accesso alle immagini + - Esso funzionerà meglio con una configurazione del cluster basata su _autoscaling_ che con una configurazione manuale del nodo. + - Oppure, su un cluster dove la modifica delle configurazioni del nodo non è conveniente, utilizza `imagePullSecrets`. +1. Cluster con immagini proprietarie, alcune delle quali richiedono un controllo sugli accessi. + - Assicurati che l'_admission controller_ [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) sia attivo. Altrimenti, tutti i Pod potenzialmente possono avere accesso a tutte le immagini. + - Sposta i dati sensibili un un _Secret_, invece di inserirli in un'immagine. +1. Un cluster multi-tenant dove ogni tenant necessiti di un private registry. + - Assicurati che l'_admission controller_ [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) sia attivo. Altrimenti, tutti i Pod di tutti i tenant potrebbero potenzialmente avere accesso a tutte le immagini. + - Avvia un private registry che richieda un'autorizzazione all'accesso. + - Genera delle credenziali di registry per ogni tenant, inseriscile in dei _Secret_, e popola i _Secret_ per ogni _namespace_ relativo ad ognuno dei tenant. + - Il singolo tenant aggiunge così quel _Secret_ all'impostazione _imagePullSecrets_ di ogni _namespace_. + + +Se si ha la necessità di accedere a più registri, si può generare un _Secret_ per ognuno di essi. +Kubelet farà il _merge_ di ogni `imagePullSecrets` in un singolo file virtuale `.docker/config.json`. + +## {{% heading "whatsnext" %}} + +* Leggi [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/master/manifest.md) \ No newline at end of file diff --git a/content/ja/community/static/cncf-code-of-conduct.md b/content/ja/community/static/cncf-code-of-conduct.md index f61005e38790e..61255d92d9a9f 100644 --- a/content/ja/community/static/cncf-code-of-conduct.md +++ b/content/ja/community/static/cncf-code-of-conduct.md @@ -25,7 +25,7 @@ CNCF コミュニティ行動規範 v1.0 Kubernetesで虐待的、嫌がらせ、または許されない行動があった場合には、から[Kubernetes Code of Conduct Committee](https://git.k8s.io/community/committee-code-of-conduct)(行動規範委員会)にご連絡ください。その他のプロジェクトにつきましては、CNCFプロジェクト管理者または仲介者にご連絡ください。 -本行動規範は、コントリビューターの合意 (http://contributor-covenant.org) バージョン 1.2.0 http://contributor-covenant.org/version/1/2/0/ から適応されています。 +本行動規範は、コントリビューターの合意 (https://contributor-covenant.org) バージョン 1.2.0 https://contributor-covenant.org/version/1/2/0/ から適応されています。 ### CNCF イベント行動規範 diff --git a/content/ja/docs/_index.md b/content/ja/docs/_index.md index a1651d8457ee3..ca2621b2c20c7 100644 --- a/content/ja/docs/_index.md +++ b/content/ja/docs/_index.md @@ -1,3 +1,4 @@ --- -title: ドキュメント +linktitle: Kubernetesドキュメント +title: ドキュメント --- diff --git a/content/ja/docs/concepts/architecture/nodes.md b/content/ja/docs/concepts/architecture/nodes.md index 05793ebde8e3e..9c47531a37a85 100644 --- a/content/ja/docs/concepts/architecture/nodes.md +++ b/content/ja/docs/concepts/architecture/nodes.md @@ -6,11 +6,12 @@ weight: 10 -Kubernetesはコンテナを_Node_上で実行されるPodに配置することで、ワークロードを実行します。 +Kubernetesはコンテナを _Node_ 上で実行されるPodに配置することで、ワークロードを実行します。 ノードはクラスターによりますが、1つのVMまたは物理的なマシンです。 各ノードは{{< glossary_tooltip text="Pod" term_id="pod" >}}やそれを制御する{{< glossary_tooltip text="コントロールプレーン" term_id="control-plane" >}}を実行するのに必要なサービスを含んでいます。 通常、1つのクラスターで複数のノードを持ちます。学習用途やリソースの制限がある環境では、1ノードかもしれません。 + 1つのノード上の[コンポーネント](/ja/docs/concepts/overview/components/#node-components)には、{{< glossary_tooltip text="kubelet" term_id="kubelet" >}}、{{< glossary_tooltip text="コンテナランタイム" term_id="container-runtime" >}}、{{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}}が含まれます。 @@ -22,7 +23,7 @@ Kubernetesはコンテナを_Node_上で実行されるPodに配置すること 1. ノード上のkubeletが、コントロールプレーンに自己登録する。 2. あなた、もしくは他のユーザーが手動でNodeオブジェクトを追加する。 -Nodeオブジェクトの作成、もしくはノード上のkubeketによる自己登録の後、コントロールプレーンはNodeオブジェクトが有効かチェックします。例えば、下記のjsonマニフェストでノードを作成してみましょう。 +Nodeオブジェクトの作成、もしくはノード上のkubeketによる自己登録の後、コントロールプレーンはNodeオブジェクトが有効かチェックします。例えば、下記のjsonマニフェストでノードを作成してみましょう: ```json { @@ -72,9 +73,9 @@ kubeletのフラグ `--register-node`がtrue(デフォルト)のとき、kub 管理者が手動でNodeオブジェクトを作成したい場合は、kubeletフラグ `--register-node = false`を設定してください。 管理者は`--register-node`の設定に関係なくNodeオブジェクトを変更することができます。 -変更には、ノードにラベルを設定し、それをunschedulableとしてマークすることが含まれます。 +例えば、ノードにラベルを設定し、それをunschedulableとしてマークすることが含まれます。 -ノード上のラベルは、スケジューリングを制御するためにPod上のノードセレクタと組み合わせて使用できます。 +ノード上のラベルは、スケジューリングを制御するためにPod上のノードセレクターと組み合わせて使用できます。 例えば、Podをノードのサブセットでのみ実行する資格があるように制限します。 ノードをunschedulableとしてマークすると、新しいPodがそのノードにスケジュールされるのを防ぎますが、ノード上の既存のPodには影響しません。 @@ -124,7 +125,7 @@ kubectl describe node <ノード名をここに挿入> {{< table caption = "ノードのConditionと、各condition適用時の概要" >}} | ノードのCondition | 概要 | |----------------------|-------------| -| `Ready` | ノードの状態がHealthyでPodを配置可能な場合に`True`になります。ノードの状態に問題があり、Podが配置できない場合に`False`になります。ノードコントローラーが、`node-monitor-grace-period`で設定された時間内(デフォルトでは40秒)に該当ノードと疎通できない場合、`Unknown`になります。 | +| `Ready` | ノードの状態が有効でPodを配置可能な場合に`True`になります。ノードの状態に問題があり、Podが配置できない場合に`False`になります。ノードコントローラーが、`node-monitor-grace-period`で設定された時間内(デフォルトでは40秒)に該当ノードと疎通できない場合、`Unknown`になります。 | | `DiskPressure` | ノードのディスク容量が圧迫されているときに`True`になります。圧迫とは、ディスクの空き容量が少ないことを指します。それ以外のときは`False`です。 | | `MemoryPressure` | ノードのメモリが圧迫されているときに`True`になります。圧迫とは、メモリの空き容量が少ないことを指します。それ以外のときは`False`です。 | | `PIDPressure` | プロセスが圧迫されているときに`True`になります。圧迫とは、プロセス数が多すぎることを指します。それ以外のときは`False`です。 | @@ -241,7 +242,7 @@ kubeletが`NodeStatus`とLeaseオブジェクトの作成および更新を担 このような場合、ノードコントローラーはマスター接続に問題があると見なし、接続が回復するまですべての退役を停止します。 ノードコントローラーは、Podがtaintを許容しない場合、 `NoExecute`のtaintを持つノード上で実行されているPodを排除する責務もあります。 -さらに、デフォルトで無効になっているアルファ機能として、ノードコントローラーはノードに到達できない、または準備ができていないなどのノードの問題に対応する{{< glossary_tooltip text="taint" term_id="taint" >}}を追加する責務があります。これはスケジューラーが、問題のあるノードにPodを配置しない事を意味しています。 +さらに、ノードコントローラーはノードに到達できない、または準備ができていないなどのノードの問題に対応する{{< glossary_tooltip text="taint" term_id="taint" >}}を追加する責務があります。これはスケジューラーが、問題のあるノードにPodを配置しない事を意味しています。 {{< caution >}} `kubectl cordon`はノードに'unschedulable'としてマークします。それはロードバランサーのターゲットリストからノードを削除するという @@ -254,8 +255,7 @@ Nodeオブジェクトはノードのリソースキャパシティ(CPUの数 [自己登録](#self-registration-of-nodes)したノードは、Nodeオブジェクトを作成するときにキャパシティを報告します。 [手動によるノード管理](#manual-node-administration)を実行している場合は、ノードを追加するときにキャパシティを設定する必要があります。 -Kubernetes{{< glossary_tooltip text="スケジューラー" term_id="kube-scheduler" >}}は、ノード上のすべてのPodに十分なリソースがあることを確認します。 -ノード上のコンテナが要求するリソースの合計がノードキャパシティ以下であることを確認します。 +Kubernetes{{< glossary_tooltip text="スケジューラー" term_id="kube-scheduler" >}}は、ノード上のすべてのPodに十分なリソースがあることを確認します。スケジューラーは、ノード上のコンテナが要求するリソースの合計がノードキャパシティ以下であることを確認します。 これは、kubeletによって管理されたすべてのコンテナを含みますが、コンテナランタイムによって直接開始されたコンテナやkubeletの制御外で実行されているプロセスは含みません。 {{< note >}} diff --git a/content/ja/docs/concepts/cluster-administration/manage-deployment.md b/content/ja/docs/concepts/cluster-administration/manage-deployment.md index 90f96547d5a23..cb9c7c0fc33ae 100644 --- a/content/ja/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/ja/docs/concepts/cluster-administration/manage-deployment.md @@ -237,7 +237,7 @@ guestbook-redis-slave-qgazl 1/1 Running 0 3m image: gb-frontend:v3 ``` -そして2つの異なるPodのセットを上書きしないようにするため、`track`ラベルに異なる値を持つ(例: `canary`)ようなguestbookフロントエンドの新しいリリースを作成できます。 +そして2つの異なるPodのセットを上書きしないようにするため、`track`ラベルに異なる値を持つ(例: `canary`)ようなguestbookフロントエンドの新しいリリースを作成できます。 ```yaml name: frontend-canary diff --git a/content/ja/docs/concepts/cluster-administration/networking.md b/content/ja/docs/concepts/cluster-administration/networking.md index 05af3690d10e8..b81c51f437215 100644 --- a/content/ja/docs/concepts/cluster-administration/networking.md +++ b/content/ja/docs/concepts/cluster-administration/networking.md @@ -45,7 +45,7 @@ KubernetesのIPアドレスは`Pod`スコープに存在します。`Pod`内の `Pod`に転送する`ノード`自体のポート(ホストポートと呼ばれる)を要求することは可能ですが、これは非常にニッチな操作です。このポート転送の実装方法も、コンテナランタイムの詳細部分です。`Pod`自体は、ホストポートの有無を認識しません。 -## Kubernetesネットワークモデルの実装方法 +## Kubernetesネットワークモデルの実装方法 {#how-to-implement-the-kubernetes-networking-model} このネットワークモデルを実装する方法はいくつかあります。このドキュメントは、こうした方法を網羅的にはカバーしませんが、いくつかの技術の紹介として、また出発点として役立つことを願っています。 diff --git a/content/ja/docs/concepts/configuration/manage-resources-containers.md b/content/ja/docs/concepts/configuration/manage-resources-containers.md index f94513b8e7ba8..761999d93740f 100644 --- a/content/ja/docs/concepts/configuration/manage-resources-containers.md +++ b/content/ja/docs/concepts/configuration/manage-resources-containers.md @@ -84,7 +84,7 @@ CPUは常に相対量としてではなく、絶対量として要求されま ### メモリーの意味 `メモリー`の制限と要求はバイト単位で測定されます。 -E、P、T、G、M、Kのいずれかのサフィックスを使用して、メモリーを整数または固定小数点整数として表すことができます。 +E、P、T、G、M、Kのいずれかのサフィックスを使用して、メモリーを整数または固定小数点数として表すことができます。 また、Ei、Pi、Ti、Gi、Mi、Kiのような2の累乗の値を使用することもできます。 たとえば、以下はほぼ同じ値を表しています。 @@ -104,11 +104,9 @@ metadata: name: frontend spec: containers: - - name: db - image: mysql + - name: app + image: images.my-company.example/app:v4 env: - - name: MYSQL_ROOT_PASSWORD - value: "password" resources: requests: memory: "64Mi" @@ -116,8 +114,8 @@ spec: limits: memory: "128Mi" cpu: "500m" - - name: wp - image: wordpress + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 resources: requests: memory: "64Mi" @@ -185,7 +183,7 @@ kubeletは、ローカルのエフェメラルストレージを使用して、P また、kubeletはこの種類のストレージを使用して、[Nodeレベルのコンテナログ](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level)、コンテナイメージ、実行中のコンテナの書き込み可能なレイヤーを保持します。 {{< caution >}} -Nodeに障害が発生すると、そのエフェメラルストレージ内のデータが失われる可能性があります。 +Nodeに障害が発生すると、そのエフェメラルストレージ内のデータが失われる可能性があります。 アプリケーションは、ローカルのエフェメラルストレージにパフォーマンスのサービス品質保証(ディスクのIOPSなど)を期待することはできません。 {{< /caution >}} @@ -242,7 +240,7 @@ Podの各コンテナは、次の1つ以上を指定できます。 * `spec.containers[].resources.requests.ephemeral-storage` `ephemeral-storage`の制限と要求はバイト単位で記します。 -ストレージは、次のいずれかの接尾辞を使用して、通常の整数または固定小数点整数として表すことができます。 +ストレージは、次のいずれかの接尾辞を使用して、通常の整数または固定小数点数として表すことができます。 E、P、T、G、M、K。Ei、Pi、Ti、Gi、Mi、Kiの2のべき乗を使用することもできます。 たとえば、以下はほぼ同じ値を表しています。 @@ -262,18 +260,15 @@ metadata: name: frontend spec: containers: - - name: db - image: mysql - env: - - name: MYSQL_ROOT_PASSWORD - value: "password" + - name: app + image: images.my-company.example/app:v4 resources: requests: ephemeral-storage: "2Gi" limits: ephemeral-storage: "4Gi" - - name: wp - image: wordpress + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 resources: requests: ephemeral-storage: "2Gi" @@ -300,6 +295,7 @@ kubeletがローカルのエフェメラルストレージをリソースとし Podが許可するよりも多くのエフェメラルストレージを使用している場合、kubeletはPodの排出をトリガーするシグナルを設定します。 コンテナレベルの分離の場合、コンテナの書き込み可能なレイヤーとログ使用量がストレージの制限を超えると、kubeletはPodに排出のマークを付けます。 + Podレベルの分離の場合、kubeletはPod内のコンテナの制限を合計し、Podの全体的なストレージ制限を計算します。 このケースでは、すべてのコンテナからのローカルのエフェメラルストレージの使用量とPodの`emptyDir`ボリュームの合計がPod全体のストレージ制限を超過する場合、 kubeletはPodをまた排出対象としてマークします。 @@ -345,7 +341,7 @@ Kubernetesでは、`1048576`から始まるプロジェクトIDを使用しま Kubernetesが使用しないようにする必要があります。 クォータはディレクトリスキャンよりも高速で正確です。 -ディレクトリがプロジェクトに割り当てられると、ディレクトリ配下に作成されたファイルはすべてそのプロジェクト内に作成され、カーネルはそのプロジェクト内のファイルによって使用されているブロックの数を追跡するだけです。 +ディレクトリがプロジェクトに割り当てられると、ディレクトリ配下に作成されたファイルはすべてそのプロジェクト内に作成され、カーネルはそのプロジェクト内のファイルによって使用されているブロックの数を追跡するだけです。 ファイルが作成されて削除されても、開いているファイルディスクリプタがあれば、スペースを消費し続けます。 クォータトラッキングはそのスペースを正確に記録しますが、ディレクトリスキャンは削除されたファイルが使用するストレージを見落としてしまいます。 @@ -354,7 +350,7 @@ Kubernetesが使用しないようにする必要があります。 * kubelet設定で、`LocalocalStorpactionCapactionIsolationFSQuotaMonitoring=true`[フィーチャーゲート](/ja/docs/reference/command-line-tools-reference/feature-gate/)を有効にします。 * ルートファイルシステム(またはオプションのランタイムファイルシステム))がプロジェクトクォータを有効にしていることを確認してください。 - すべてのXFSファイルシステムはプロジェクトクォータをサポートしています。 + すべてのXFSファイルシステムはプロジェクトクォータをサポートしています。 ext4ファイルシステムでは、ファイルシステムがマウントされていない間は、プロジェクトクォータ追跡機能を有効にする必要があります。 ```bash # ext4の場合、/dev/block-deviceがマウントされていません diff --git a/content/ja/docs/concepts/configuration/pod-priority-preemption.md b/content/ja/docs/concepts/configuration/pod-priority-preemption.md new file mode 100644 index 0000000000000..c06b3b41b0a0b --- /dev/null +++ b/content/ja/docs/concepts/configuration/pod-priority-preemption.md @@ -0,0 +1,240 @@ +--- +title: Podの優先度とプリエンプション +content_type: concept +weight: 70 +--- + + + +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + +[Pod](/ja/docs/concepts/workloads/pods/)は _priority_(優先度)を持つことができます。 +優先度は他のPodに対する相対的なPodの重要度を示します。 +もしPodをスケジューリングできないときには、スケジューラーはそのPodをスケジューリングできるようにするため、優先度の低いPodをプリエンプトする(追い出す)ことを試みます。 + + + + + + +{{< warning >}} +クラスターの全てのユーザーが信用されていない場合、悪意のあるユーザーが可能な範囲で最も高い優先度のPodを作成することが可能です。これは他のPodが追い出されたりスケジューリングできない状態を招きます。 +管理者はResourceQuotaを使用して、ユーザーがPodを高い優先度で作成することを防ぐことができます。 + +詳細は[デフォルトで優先度クラスの消費を制限する](/ja/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) +を参照してください。 +{{< /warning >}} + +## 優先度とプリエンプションを使う方法 + +優先度とプリエンプションを使うには、 + +1. 1つまたは複数の[PriorityClass](#priorityclass)を追加します + +1. 追加したPriorityClassを[`priorityClassName`](#pod-priority)に設定したPodを作成します。 + もちろんPodを直接作る必要はありません。 + 一般的には`priorityClassName`をDeploymentのようなコレクションオブジェクトのPodテンプレートに追加します。 + +これらの手順のより詳しい情報については、この先を読み進めてください。 + +{{< note >}} +Kubernetesには最初から既に2つのPriorityClassが設定された状態になっています。 +`system-cluster-critical`と`system-node-critical`です。 +これらは汎用のクラスであり、[重要なコンポーネントが常に最初にスケジュールされることを保証する](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/)ために使われます。 +{{< /note >}} + +## PriorityClass + +PriorityClassはnamespaceによらないオブジェクトで、優先度クラスの名称から優先度を表す整数値への対応を定義します。 +PriorityClassオブジェクトのメタデータの`name`フィールドにて名称を指定します。 +値は`value`フィールドで指定し、必須です。 +値が大きいほど、高い優先度を示します。 +PriorityClassオブジェクトの名称は[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)として適切であり、かつ`system-`から始まってはいけません。 + +PriorityClassオブジェクトは10億以下の任意の32ビットの整数値を持つことができます。 +それよりも大きな値は通常はプリエンプトや追い出すべきではない重要なシステム用のPodのために予約されています。 +クラスターの管理者は割り当てたい優先度に対して、PriorityClassオブジェクトを1つずつ作成すべきです。 + +PriorityClassは任意でフィールド`globalDefault`と`description`を設定可能です。 +`globalDefault`フィールドは`priorityClassName`が指定されないPodはこのPriorityClassを使うべきであることを示します。`globalDefault`がtrueに設定されたPriorityClassはシステムで一つのみ存在可能です。`globalDefault`が設定されたPriorityClassが存在しない場合は、`priorityClassName`が設定されていないPodの優先度は0に設定されます。 + +`description`フィールドは任意の文字列です。クラスターの利用者に対して、PriorityClassをどのような時に使うべきか示すことを意図しています。 + +### PodPriorityと既存のクラスターに関する注意 + +- もし既存のクラスターをこの機能がない状態でアップグレードすると、既存のPodの優先度は実質的に0になります。 + +- `globalDefault`が`true`に設定されたPriorityClassを追加しても、既存のPodの優先度は変わりません。PriorityClassのそのような値は、PriorityClassが追加された以後に作成されたPodのみに適用されます。 + +- PriorityClassを削除した場合、削除されたPriorityClassの名前を使用する既存のPodは変更されませんが、削除されたPriorityClassの名前を使うPodをそれ以上作成することはできなくなります。 + +### PriorityClassの例 + +```yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority +value: 1000000 +globalDefault: false +description: "この優先度クラスはXYZサービスのPodに対してのみ使用すべきです。" +``` + +## 非プリエンプトのPriorityClass {#non-preempting-priority-class} + +{{< feature-state for_k8s_version="v1.19" state="beta" >}} + +`PreemptionPolicy: Never`と設定されたPodは、スケジューリングのキューにおいて他の優先度の低いPodよりも優先されますが、他のPodをプリエンプトすることはありません。 +スケジューリングされるのを待つ非プリエンプトのPodは、リソースが十分に利用可能になるまでスケジューリングキューに残ります。 +非プリエンプトのPodは、他のPodと同様に、スケジューラーのバックオフの対象になります。これは、スケジューラーがPodをスケジューリングしようと試みたものの失敗した場合、低い頻度で再試行するようにして、より優先度の低いPodが先にスケジューリングされることを許します。 + +非プリエンプトのPodは、他の優先度の高いPodにプリエンプトされる可能性はあります。 + +`PreemptionPolicy`はデフォルトでは`PreemptLowerPriority`に設定されており、これが設定されているPodは優先度の低いPodをプリエンプトすることを許容します。これは既存のデフォルトの挙動です。 +`PreemptionPolicy`を`Never`に設定すると、これが設定されたPodはプリエンプトを行わないようになります。 + +ユースケースの例として、データサイエンスの処理を挙げます。 +ユーザーは他の処理よりも優先度を高くしたいジョブを追加できますが、そのとき既存の実行中のPodの処理結果をプリエンプトによって破棄させたくはありません。 +`PreemptionPolicy: Never`が設定された優先度の高いジョブは、他の既にキューイングされたPodよりも先に、クラスターのリソースが「自然に」開放されたときにスケジューリングされます。 + +### 非プリエンプトのPriorityClassの例 + +```yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority-nonpreempting +value: 1000000 +preemptionPolicy: Never +globalDefault: false +description: "この優先度クラスは他のPodをプリエンプトさせません。" +``` + +## Podの優先度 {#pod-priority} + +一つ以上のPriorityClassがあれば、仕様にPriorityClassを指定したPodを作成することができるようになります。優先度のアドミッションコントローラーは`priorityClassName`フィールドを使用し、優先度の整数値を設定します。PriorityClassが見つからない場合、そのPodの作成は拒否されます。 + +下記のYAMLは上記の例で作成したPriorityClassを使用するPodの設定の例を示します。優先度のアドミッションコントローラーは仕様を確認し、このPodの優先度は1000000であると設定します。 + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx + labels: + env: test +spec: + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent + priorityClassName: high-priority +``` + +### スケジューリング順序におけるPodの優先度の効果 + +Podの優先度が有効な場合、スケジューラーは待機状態のPodをそれらの優先度順に並べ、スケジューリングキューにおいてより優先度の低いPodよりも前に来るようにします。その結果、その条件を満たしたときには優先度の高いPodは優先度の低いPodより早くスケジューリングされます。優先度の高いPodがスケジューリングできない場合は、スケジューラーは他の優先度の低いPodのスケジューリングも試みます。 + +## プリエンプション + +Podが作成されると、スケジューリング待ちのキューに入り待機状態になります。スケジューラーはキューからPodを取り出し、ノードへのスケジューリングを試みます。Podに指定された条件を全て満たすノードが見つからない場合は、待機状態のPodのためにプリエンプションロジックが発動します。待機状態のPodをPと呼ぶことにしましょう。プリエンプションロジックはPよりも優先度の低いPodを一つ以上追い出せばPをスケジューリングできるようになるノードを探します。そのようなノードがあれば、優先度の低いPodはノードから追い出されます。Podが追い出された後に、Pはノードへスケジューリング可能になります。 + +### ユーザーへ開示される情報 + +Pod PがノードNのPodをプリエンプトした場合、ノードNの名称がPのステータスの`nominatedNodeName`フィールドに設定されます。このフィールドはスケジューラーがPod Pのために予約しているリソースの追跡を助け、ユーザーにクラスターにおけるプリエンプトに関する情報を与えます。 + +Pod Pは必ずしも「指名したノード」へスケジューリングされないことに注意してください。Podがプリエンプトされると、そのPodは終了までの猶予期間を得ます。スケジューラーがPodの終了を待つ間に他のノードが利用可能になると、スケジューラーは他のノードをPod Pのスケジューリング先にします。この結果、Podの`nominatedNodeName`と`nodeName`は必ずしも一致しません。また、スケジューラーがノードNのPodをプリエンプトさせた後に、Pod Pよりも優先度の高いPodが来た場合、スケジューラーはノードNをその新しい優先度の高いPodへ与えます。このような場合は、スケジューラーはPod Pの`nominatedNodeName`を消去します。これによって、スケジューラーはPod Pが他のノードのPodをプリエンプトさせられるようにします。 + +### プリエンプトの制限 + +#### プリエンプトされるPodの正常終了 + +Podがプリエンプトされると、[猶予期間](/ja/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)が与えられます。 + +Podは作業を完了し、終了するために十分な時間が与えられます。仮にそうでない場合、強制終了されます。この猶予期間によって、スケジューラーがPodをプリエンプトした時刻と、待機状態のPod Pがノード Nにスケジュール可能になるまでの時刻の間に間が開きます。この間、スケジューラーは他の待機状態のPodをスケジュールしようと試みます。プリエンプトされたPodが終了したら、スケジューラーは待ち行列にあるPodをスケジューリングしようと試みます。そのため、Podがプリエンプトされる時刻と、Pがスケジュールされた時刻には間が開くことが一般的です。この間を最小にするには、優先度の低いPodの猶予期間を0または小さい値にする方法があります。 + +#### PodDisruptionBudgetは対応するが、保証されない + +[PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/) (PDB)は、アプリケーションのオーナーが冗長化されたアプリケーションのPodが意図的に中断される数の上限を設定できるようにするものです。KubernetesはPodをプリエンプトする際にPDBに対応しますが、PDBはベストエフォートで考慮します。スケジューラーはプリエンプトさせたとしてもPDBに違反しないPodを探します。そのようなPodが見つからない場合でもプリエンプションは実行され、PDBに反しますが優先度の低いPodが追い出されます。 + +#### 優先度の低いPodにおけるPod間のアフィニティ + +次の条件が真の場合のみ、ノードはプリエンプションの候補に入ります。 +「待機状態のPodよりも優先度の低いPodをノードから全て追い出したら、待機状態のPodをノードへスケジュールできるか」 + +{{< note >}} +プリエンプションは必ずしも優先度の低いPodを全て追い出しません。 +優先度の低いPodを全て追い出さなくても待機状態のPodがスケジューリングできる場合、一部のPodのみ追い出されます。 +このような場合であったとしても、上記の条件は真である必要があります。偽であれば、そのノードはプリエンプションの対象とはされません。 +{{< /note >}} + +待機状態のPodが、優先度の低いPodとの間でPod間のアフィニティを持つ場合、Pod間のアフィニティはそれらの優先度の低いPodがなければ満たされません。この場合、スケジューラーはノードのどのPodもプリエンプトしようとはせず、代わりに他のノードを探します。スケジューラーは適切なノードを探せる場合と探せない場合があります。この場合、待機状態のPodがスケジューリングされる保証はありません。 + +この問題に対して推奨される解決策は、優先度が同一または高いPodに対してのみPod間のアフィニティを作成することです。 + +#### 複数ノードに対するプリエンプション + +Pod PがノードNにスケジューリングできるよう、ノードNがプリエンプションの対象となったとします。 +他のノードのPodがプリエンプトされた場合のみPが実行可能になることもあります。下記に例を示します。 + +* Pod PをノードNに配置することを検討します。 +* Pod QはノードNと同じゾーンにある別のノードで実行中です。 +* Pod Pはゾーンに対するQへのアンチアフィニティを持ちます (`topologyKey: topology.kubernetes.io/zone`)。 +* Pod Pと、ゾーン内の他のPodに対しては他のアンチアフィニティはない状態です。 +* Pod PをノードNへスケジューリングするには、Pod Qをプリエンプトすることが考えられますが、スケジューラーは複数ノードにわたるプリエンプションは行いません。そのため、Pod PはノードNへはスケジューリングできないとみなされます。 + +Pod Qがそのノードから追い出されると、Podアンチアフィニティに違反しなくなるので、Pod PはノードNへスケジューリング可能になります。 + +複数ノードに対するプリエンプションに関しては、十分な需要があり、合理的な性能を持つアルゴリズムを見つけられた場合に、追加することを検討する可能性があります。 + +## トラブルシューティング + +Podの優先度とプリエンプションは望まない副作用をもたらす可能性があります。 +いくつかの起こりうる問題と、その対策について示します。 + +### Podが不必要にプリエンプトされる + +プリエンプションは、リソースが不足している場合に優先度の高い待機状態のPodのためにクラスターの既存のPodを追い出します。 +誤って高い優先度をPodに割り当てると、意図しない高い優先度のPodはクラスター内でプリエンプションを引き起こす可能性があります。Podの優先度はPodの仕様の`priorityClassName`フィールドにて指定されます。優先度を示す整数値へと変換された後、`podSpec`の`priority`へ設定されます。 + +この問題に対処するには、Podの`priorityClassName`をより低い優先度に変更するか、このフィールドを未設定にすることができます。`priorityClassName`が未設定の場合、デフォルトでは優先度は0とされます。 + +Podがプリエンプトされたとき、プリエンプトされたPodのイベントが記録されます。 +プリエンプションはPodに必要なリソースがクラスターにない場合のみ起こるべきです。 +このような場合、プリエンプションはプリエンプトされるPodよりも待機状態のPodの優先度が高い場合のみ発生します。 +プリエンプションは待機状態のPodがない場合や待機状態のPodがプリエンプト対象のPod以下の優先度を持つ場合には決して発生しません。そのような状況でプリエンプションが発生した場合、問題を報告してください。 + +### Podはプリエンプトされたが、プリエンプトさせたPodがスケジューリングされない + +Podがプリエンプトされると、それらのPodが要求した猶予期間が与えられます。そのデフォルトは30秒です。 +Podがその期間内に終了しない場合、強制終了されます。プリエンプトされたPodがなくなれば、プリエンプトさせたPodはスケジューリング可能です。 + +プリエンプトさせたPodがプリエンプトされたPodの終了を待っている間に、より優先度の高いPodが同じノードに対して作成されることもあります。この場合、スケジューラーはプリエンプトさせたPodの代わりに優先度の高いPodをスケジューリングします。 + +これは予期された挙動です。優先度の高いPodは優先度の低いPodに取って代わります。 + +### 優先度の高いPodが優先度の低いPodより先にプリエンプトされる + +スケジューラーは待機状態のPodが実行可能なノードを探します。ノードが見つからない場合、スケジューラーは任意のノードから優先度の低いPodを追い出し、待機状態のPodのためのリソースを確保しようとします。 +仮に優先度の低いPodが動いているノードが待機状態のPodを動かすために適切ではない場合、スケジューラーは他のノードで動いているPodと比べると、優先度の高いPodが動いているノードをプリエンプションの対象に選ぶことがあります。この場合もプリエンプトされるPodはプリエンプトを起こしたPodよりも優先度が低い必要があります。 + +複数のノードがプリエンプションの対象にできる場合、スケジューラーは優先度が最も低いPodのあるノードを選ぼうとします。しかし、そのようなPodがPodDisruptionBudgetを持っており、プリエンプトするとPDBに反する場合はスケジューラーは優先度の高いPodのあるノードを選ぶこともあります。 + +複数のノードがプリエンプションの対象として利用可能で、上記の状況に当てはまらない場合、スケジューラーは優先度の最も低いノードを選択します。 + +## Podの優先度とQoSの相互作用 {#interactions-of-pod-priority-and-qos} + +Podの優先度と{{< glossary_tooltip text="QoSクラス" term_id="qos-class" >}}は直交する機能で、わずかに相互作用がありますが、デフォルトではQoSクラスによる優先度の設定の制約はありません。スケジューラーのプリエンプションのロジックはプリエンプションの対象を決めるときにQoSクラスは考慮しません。 +プリエンプションはPodの優先度を考慮し、優先度が最も低いものを候補とします。より優先度の高いPodは優先度の低いPodを追い出すだけではプリエンプトを起こしたPodのスケジューリングに不十分な場合と、`PodDisruptionBudget`により優先度の低いPodが保護されている場合のみ対象になります。 + +QoSとPodの優先度の両方を考慮するコンポーネントは[リソース不足によりkubeletがPodを追い出す](/docs/tasks/administer-cluster/out-of-resource/)のみです。 +kubeletは追い出すPodの順位付けを次の順で行います。枯渇したリソースを要求以上に使用しているか、優先度、枯渇したリソースの消費量の複数のPodの要求に対する相対値。 +詳細は[エンドユーザーのPodの追い出し](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods)を参照してください。 + + +kubeletによるリソース不足時のPodの追い出しでは、リソースの消費が要求を超えないPodは追い出されません。優先度の低いPodのリソースの利用量がその要求を超えていなければ、追い出されることはありません。より優先度が高く、要求を超えてリソースを使用しているPodが追い出されます。 + + +## {{% heading "whatsnext" %}} + +* PriorityClassと関連付けてResourceQuotaを使用することに関して [デフォルトで優先度クラスの消費を制限する](/ja/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) diff --git a/content/ja/docs/concepts/containers/_index.md b/content/ja/docs/concepts/containers/_index.md index 5b10416c0f83c..fd3506ea40658 100755 --- a/content/ja/docs/concepts/containers/_index.md +++ b/content/ja/docs/concepts/containers/_index.md @@ -21,7 +21,7 @@ no_list: true ## コンテナイメージ [コンテナイメージ](/docs/concepts/containers/images/)はすぐに実行可能なソフトウェアパッケージで、アプリケーションの実行に必要なものをすべて含んています。コードと必要なランタイム、アプリケーションとシステムのライブラリ、そして必須な設定項目のデフォルト値を含みます。 -設計上、コンテナは不変で、既に実行中のコンテナのコードを変更することはできません。コンテナ化されたアプリケーションがあり変更したい場合は、変更を含んだ新しいコンテナをビルドし、コンテナを再作成して、更新されたイメージから起動する必要があります。 +設計上、コンテナは不変で、既に実行中のコンテナのコードを変更することはできません。コンテナ化されたアプリケーションがあり変更したい場合は、変更を含んだ新しいイメージをビルドし、コンテナを再作成して、更新されたイメージから起動する必要があります。 ## コンテナランタイム diff --git a/content/ja/docs/concepts/overview/what-is-kubernetes.md b/content/ja/docs/concepts/overview/what-is-kubernetes.md index d1b792c0da9ce..dab17c9b1bad8 100644 --- a/content/ja/docs/concepts/overview/what-is-kubernetes.md +++ b/content/ja/docs/concepts/overview/what-is-kubernetes.md @@ -17,7 +17,7 @@ card: Kubernetesは、宣言的な構成管理と自動化を促進し、コンテナ化されたワークロードやサービスを管理するための、ポータブルで拡張性のあるオープンソースのプラットフォームです。Kubernetesは巨大で急速に成長しているエコシステムを備えており、それらのサービス、サポート、ツールは幅広い形で利用可能です。 -Kubernetesの名称は、ギリシャ語に由来し、操舵手やパイロットを意味しています。Googleは2014年にKubernetesプロジェクトをオープンソース化しました。Kubernetesは、本番環境で大規模なワークロードを稼働させた[Googleの15年以上の経験](/blog/2015/04/borg-predecessor-to-kubernetes/)と、コミュニティからの最高のアイディアや実践を組み合わせています。 +Kubernetesの名称は、ギリシャ語に由来し、操舵手やパイロットを意味しています。Googleは2014年にKubernetesプロジェクトをオープンソース化しました。Kubernetesは、本番環境で大規模なワークロードを稼働させた[Googleの15年以上の経験](/blog/2015/04/borg-predecessor-to-kubernetes/)と、コミュニティからの最高のアイディアや実践を組み合わせています。 ## 過去を振り返ってみると @@ -57,7 +57,7 @@ Kubernetesの名称は、ギリシャ語に由来し、操舵手やパイロッ Kubernetesは以下を提供します。 * **サービスディスカバリーと負荷分散** -Kubernetesは、DNS名または独自のIPアドレスを使ってコンテナを公開することができます。コンテナへのトラフィックが多い場合は、Kubernetesは負荷分散し、ネットワークトラフィックを振り分けることができるたため、デプロイが安定します。 +Kubernetesは、DNS名または独自のIPアドレスを使ってコンテナを公開することができます。コンテナへのトラフィックが多い場合は、Kubernetesは負荷分散し、ネットワークトラフィックを振り分けることができるため、デプロイが安定します。 * **ストレージ オーケストレーション** Kubernetesは、ローカルストレージやパブリッククラウドプロバイダーなど、選択したストレージシステムを自動でマウントすることができます。 * **自動化されたロールアウトとロールバック** diff --git a/content/ja/docs/concepts/policy/resource-quotas.md b/content/ja/docs/concepts/policy/resource-quotas.md index e7368a8f94c76..e9381ce89de71 100644 --- a/content/ja/docs/concepts/policy/resource-quotas.md +++ b/content/ja/docs/concepts/policy/resource-quotas.md @@ -22,7 +22,7 @@ weight: 10 - 異なる名前空間で異なるチームが存在するとき。現時点ではこれは自主的なものですが、将来的にはACLsを介してリソースクォータの設定を強制するように計画されています。 - 管理者は各名前空間で1つの`ResourceQuota`を作成します。 - ユーザーが名前空間内でリソース(Pod、Serviceなど)を作成し、クォータシステムが`ResourceQuota`によって定義されたハードリソースリミットを超えないことを保証するために、リソースの使用量をトラッキングします。 -- リソースの作成や更新がクォータの制約に違反しているとき、そのリクエストはHTTPステータスコード`403 FORBIDDEN`で失敗し、違反した制約を説明するメッセージが表示されます。 +- リソースの作成や更新がクォータの制約に違反しているとき、そのリクエストはHTTPステータスコード`403 FORBIDDEN`で失敗し、違反した制約を説明するメッセージが表示されます。 - `cpu`や`memory`といったコンピューターリソースに対するクォータが名前空間内で有効になっているとき、ユーザーはそれらの値に対する`requests`や`limits`を設定する必要があります。設定しないとクォータシステムがPodの作成を拒否します。 ヒント: コンピュートリソースの要求を設定しないPodに対してデフォルト値を強制するために、`LimitRanger`アドミッションコントローラーを使用してください。この問題を解決する例は[walkthrough](/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/)で参照できます。 `ResourceQuota`のオブジェクト名は、有効な[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)である必要があります. @@ -484,7 +484,7 @@ count/secrets 1 4 リソースクォータは集約されたクラスターリソースを分割しますが、ノードに対しては何の制限も行わないことに注意して下さい。例: 複数の名前空間のPodは同一のノード上で稼働する可能性があります。 -## デフォルトで優先度クラスの消費を制限する +## デフォルトで優先度クラスの消費を制限する {#limit-priority-class-consumption-by-default} 例えば"cluster-services"のように、条件に一致するクォータオブジェクトが存在する場合に限り、特定の優先度のPodを名前空間で許可することが望ましい場合があります。 diff --git a/content/ja/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/ja/docs/concepts/scheduling-eviction/assign-pod-node.md index 0733690f0b1b0..18b767cbb4cdf 100644 --- a/content/ja/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/ja/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -140,9 +140,9 @@ Nodeアフィニティでは、`In`、`NotIn`、`Exists`、`DoesNotExist`、`Gt` `nodeSelector`と`nodeAffinity`の両方を指定した場合、Podは**両方の**条件を満たすNodeにスケジュールされます。 -`nodeAffinity`内で複数の`nodeSelectorTerms`を指定した場合、Podは**全ての**`nodeSelectorTerms`を満たしたNodeへスケジュールされます。 +`nodeAffinity`内で複数の`nodeSelectorTerms`を指定した場合、Podは**いずれかの**`nodeSelectorTerms`を満たしたNodeへスケジュールされます。 -`nodeSelectorTerms`内で複数の`matchExpressions`を指定した場合にはPodは**いずれかの**`matchExpressions`を満たしたNodeへスケジュールされます。 +`nodeSelectorTerms`内で複数の`matchExpressions`を指定した場合にはPodは**全ての**`matchExpressions`を満たしたNodeへスケジュールされます。 PodがスケジュールされたNodeのラベルを削除したり変更しても、Podは削除されません。 言い換えると、アフィニティはPodをスケジュールする際にのみ考慮されます。 diff --git a/content/ja/docs/concepts/security/overview.md b/content/ja/docs/concepts/security/overview.md index b50a4ea1a520c..0157b28f78cad 100644 --- a/content/ja/docs/concepts/security/overview.md +++ b/content/ja/docs/concepts/security/overview.md @@ -77,7 +77,7 @@ Kubernetesを保護する為には2つの懸念事項があります。 ### クラスター内のコンポーネント(アプリケーション) {#cluster-applications} -アプリケーションを対象にした攻撃に応じて、セキュリティの特定側面に焦点をあてたい場合があります。例:他のリソースとの連携で重要なサービス(サービスA)と、リソース枯渇攻撃に対して脆弱な別のワークロード(サービスB)が実行されている場合、サービスBのリソースを制限していないとサービスAが危険にさらされるリスクが高くなります。次の表はセキュリティの懸念事項とKubernetesで実行されるワークロードを保護するための推奨事項を示しています。 +アプリケーションを対象にした攻撃に応じて、セキュリティの特定側面に焦点をあてたい場合があります。例:他のリソースとの連携で重要なサービス(サービスA)と、リソース枯渇攻撃に対して脆弱な別のワークロード(サービスB)が実行されている場合、サービスBのリソースを制限していないとサービスAが危険にさらされるリスクが高くなります。次の表はセキュリティの懸念事項とKubernetesで実行されるワークロードを保護するための推奨事項を示しています。 ワークロードセキュリティに関する懸念事項 | 推奨事項 | diff --git a/content/ja/docs/concepts/services-networking/endpoint-slices.md b/content/ja/docs/concepts/services-networking/endpoint-slices.md index 5a678baec723d..24a588c29eb44 100644 --- a/content/ja/docs/concepts/services-networking/endpoint-slices.md +++ b/content/ja/docs/concepts/services-networking/endpoint-slices.md @@ -20,7 +20,7 @@ Serviceのすべてのネットワークエンドポイントが単一のEndpoin ## EndpointSliceリソース {#endpointslice-resource} -Kubernetes内ではEndpointSliceにはネットワークエンドポイントの集合へのリファレンスが含まれます。EndpointSliceコントローラーは、{{< glossary_tooltip text="セレクター" term_id="selector" >}}が指定されると、Kubernetes Serviceに対するEndpointSliceを自動的に作成します。これらのEndpointSliceにはServiceセレクターに一致する任意のPodへのリファレクンスが含まれます。EndpointSliceはネットワークエンドポイントをユニークなServiceとPortの組み合わせでグループ化します。EndpointSliceオブジェクトの名前は有効な[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)である必要があります。 +Kubernetes内ではEndpointSliceにはネットワークエンドポイントの集合へのリファレンスが含まれます。EndpointSliceコントローラーは、{{< glossary_tooltip text="セレクター" term_id="selector" >}}が指定されると、Kubernetes Serviceに対するEndpointSliceを自動的に作成します。これらのEndpointSliceにはServiceセレクターに一致する任意のPodへのリファレンスが含まれます。EndpointSliceはネットワークエンドポイントをユニークなServiceとPortの組み合わせでグループ化します。EndpointSliceオブジェクトの名前は有効な[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)である必要があります。 一例として、以下に`example`というKubernetes Serviceに対するサンプルのEndpointSliceリソースを示します。 diff --git a/content/ja/docs/concepts/services-networking/ingress-controllers.md b/content/ja/docs/concepts/services-networking/ingress-controllers.md index f1af46c3a2a68..cbd652d17e38f 100644 --- a/content/ja/docs/concepts/services-networking/ingress-controllers.md +++ b/content/ja/docs/concepts/services-networking/ingress-controllers.md @@ -37,7 +37,7 @@ Ingressリソースが動作するためには、クラスターでIngressコン ## 複数のIngressコントローラーの使用 {#using-multiple-ingress-controllers} -[Ingressコントローラーは、好きな数だけ](https://git.k8s.io/ingress-nginx/docs/user-guide/multiple-ingress.md#multiple-ingress-controllers))クラスターにデプロイすることができます。Ingressを作成する際には、クラスター内に複数のIngressコントローラーが存在する場合にどのIngressコントローラーを使用するかを示すために適切な[`ingress.class`](https://git.k8s.io/ingress-gce/docs/faq/README.md#how-do-i-run-multiple-ingress-controllers-in-the-same-cluster)のアノテーションを指定します。 +[Ingressコントローラーは、好きな数だけ](https://git.k8s.io/ingress-nginx/docs/user-guide/multiple-ingress.md#multiple-ingress-controllers)クラスターにデプロイすることができます。Ingressを作成する際には、クラスター内に複数のIngressコントローラーが存在する場合にどのIngressコントローラーを使用するかを示すために適切な[`ingress.class`](https://git.k8s.io/ingress-gce/docs/faq/README.md#how-do-i-run-multiple-ingress-controllers-in-the-same-cluster)のアノテーションを指定します。 クラスを定義しない場合、クラウドプロバイダーはデフォルトのIngressコントローラーを使用する場合があります。 diff --git a/content/ja/docs/concepts/services-networking/service.md b/content/ja/docs/concepts/services-networking/service.md index 2b3d6b26e0511..d6894e959eb39 100644 --- a/content/ja/docs/concepts/services-networking/service.md +++ b/content/ja/docs/concepts/services-networking/service.md @@ -712,7 +712,7 @@ NLBの背後にあるインスタンスに対してクライアントのトラ |------|----------|---------|------------|---------------------| | ヘルスチェック | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | | クライアントのトラフィック | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (デフォルト: `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | -| MTCによるサービスディスカバリー | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (デフォルト: `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | +| MTUによるサービスディスカバリー | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (デフォルト: `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | どのクライアントIPがNLBにアクセス可能かを制限するためには、`loadBalancerSourceRanges`を指定してください。 diff --git a/content/ja/docs/concepts/workloads/controllers/deployment.md b/content/ja/docs/concepts/workloads/controllers/deployment.md index 94e04e56ee8b6..e2d720323d138 100644 --- a/content/ja/docs/concepts/workloads/controllers/deployment.md +++ b/content/ja/docs/concepts/workloads/controllers/deployment.md @@ -31,8 +31,9 @@ Deploymentによって作成されたReplicaSetを管理しないでください * ReplicaSetをロールアウトするために[Deploymentの作成](#creating-a-deployment)を行う: ReplicaSetはバックグラウンドでPodを作成します。Podの作成が完了したかどうかは、ロールアウトのステータスを確認してください。 * DeploymentのPodTemplateSpecを更新することにより[Podの新しい状態を宣言する](#updating-a-deployment): 新しいReplicaSetが作成され、Deploymentは指定された頻度で古いReplicaSetから新しいReplicaSetへのPodの移行を管理します。新しいReplicaSetはDeploymentのリビジョンを更新します。 * Deploymentの現在の状態が不安定な場合、[Deploymentのロールバック](#rolling-back-a-deployment)をする: ロールバックによる各更新作業は、Deploymentのリビジョンを更新します。 -* より多くの負荷をさばけるように、[Deploymentをスケールアップ](#scaling-a-deployment)する +* より多くの負荷をさばけるように、[Deploymentをスケールアップ](#scaling-a-deployment)する。 * PodTemplateSpecに対する複数の修正を適用するために[Deploymentを停止(Pause)し](#pausing-and-resuming-a-deployment)、それを再開して新しいロールアウトを開始します。 +* [Deploymentのステータス](#deployment-status) をロールアウトが失敗したサインとして利用する。 * 今後必要としない[古いReplicaSetのクリーンアップ](#clean-up-policy) ## Deploymentの作成 {#creating-a-deployment} @@ -82,7 +83,7 @@ Deploymentによって作成されたReplicaSetを管理しないでください ``` クラスターにてDeploymentを調査するとき、以下のフィールドが出力されます。 * `NAME`は、クラスター内にあるDeploymentの名前一覧です。 - * `READY`は、ユーザーが使用できるアプリケーションのレプリカの数です。 + * `READY`は、ユーザーが使用できるアプリケーションのレプリカの数です。使用可能な数/理想的な数の形式で表示されます。 * `UP-TO-DATE`は、理想的な状態を満たすためにアップデートが完了したレプリカの数です。 * `AVAILABLE`は、ユーザーが利用可能なレプリカの数です。 * `AGE`は、アプリケーションが稼働してからの時間です。 @@ -133,7 +134,7 @@ Deploymentによって作成されたReplicaSetを管理しないでください {{< note >}} Deploymentに対して適切なセレクターとPodテンプレートのラベルを設定する必要があります(このケースでは`app: nginx`)。 -ラベルやセレクターを他のコントローラーと重複させないでください(他のDeploymentやStatefulSetを含む)。Kubernetesはユーザーがラベルを重複させることを止めないため、複数のコントローラーでセレクターの重複が発生すると、コントローラー間で衝突し予期せぬふるまいをすることになります。 +ラベルやセレクターを他のコントローラーと重複させないでください(他のDeploymentやStatefulSetを含む)。Kubernetesはユーザーがラベルを重複させることを阻止しないため、複数のコントローラーでセレクターの重複が発生すると、コントローラー間で衝突し予期せぬふるまいをすることになります。 {{< /note >}} ### pod-template-hashラベル @@ -146,7 +147,7 @@ Deploymentに対して適切なセレクターとPodテンプレートのラベ このラベルはDeploymentが管理するReplicaSetが重複しないことを保証します。このラベルはReplicaSetの`PodTemplate`をハッシュ化することにより生成され、生成されたハッシュ値はラベル値としてReplicaSetセレクター、Podテンプレートラベル、ReplicaSetが作成した全てのPodに対して追加されます。 -## Deploymentの更新 +## Deploymentの更新 {#updating-a-deployment} {{< note >}} Deploymentのロールアウトは、DeploymentのPodテンプレート(この場合`.spec.template`)が変更された場合にのみトリガーされます。例えばテンプレートのラベルもしくはコンテナーイメージが更新された場合です。Deploymentのスケールのような更新では、ロールアウトはトリガーされません。 @@ -589,13 +590,11 @@ Deploymentのローリングアップデートは、同時に複数のバージ ``` * クラスター内で、解決できない新しいイメージに更新します。 -* You update to a new image which happens to be unresolvable from inside the cluster. ```shell kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:sometag ``` 実行結果は以下のとおりです。 - The output is similar to this: ``` deployment.apps/nginx-deployment image updated ``` @@ -604,7 +603,8 @@ Deploymentのローリングアップデートは、同時に複数のバージ ```shell kubectl get rs ``` - 実行結果は以下のとおりです。 + + 実行結果は以下のとおりです。 ``` NAME DESIRED CURRENT READY AGE nginx-deployment-1989198191 5 5 0 9s @@ -615,24 +615,26 @@ Deploymentのローリングアップデートは、同時に複数のバージ 上記の例では、3つのレプリカが古いReplicaSetに追加され、2つのレプリカが新しいReplicaSetに追加されました。ロールアウトの処理では、新しいレプリカ数のPodが正常になったと仮定すると、最終的に新しいReplicaSetに全てのレプリカを移動させます。これを確認するためには以下のコマンドを実行して下さい。 - ```shell - kubectl get deploy - ``` - 実行結果は以下のとおりです。 - ``` - NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE - nginx-deployment 15 18 7 8 7m - ``` -  ロールアウトのステータスでレプリカがどのように各ReplicaSetに追加されるか確認できます。 - ```shell - kubectl get rs - ``` - 実行結果は以下のとおりです。 - ``` - NAME DESIRED CURRENT READY AGE - nginx-deployment-1989198191 7 7 0 7m - nginx-deployment-618515232 11 11 11 7m - ``` +```shell +kubectl get deploy +``` + +実行結果は以下のとおりです。 +``` +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 15 18 7 8 7m +``` +ロールアウトのステータスでレプリカがどのように各ReplicaSetに追加されるか確認できます。 +```shell +kubectl get rs +``` + +実行結果は以下のとおりです。 +``` +NAME DESIRED CURRENT READY AGE +nginx-deployment-1989198191 7 7 0 7m +nginx-deployment-618515232 11 11 11 7m +``` ## Deployment更新の一時停止と再開 {#pausing-and-resuming-a-deployment} @@ -752,7 +754,7 @@ Deploymentのローリングアップデートは、同時に複数のバージ nginx-3926361531 3 3 3 28s ``` {{< note >}} -一時停止したDeploymentの稼働を再開させない限り、Deploymentをロールバックすることはできません。 +Deploymentの稼働を再開させない限り、一時停止したDeploymentをロールバックすることはできません。 {{< /note >}} ## Deploymentのステータス {#deployment-status} @@ -937,13 +939,13 @@ Deploymentが管理する古いReplicaSetをいくつ保持するかを指定す ## カナリアパターンによるデプロイ -Deploymentを使って一部のユーザーやサーバーに対してリリースのロールアウトをしたい場合、[リソースの管理](/docs/concepts/cluster-administration/manage-deployment/#canary-deployments)に記載されているカナリアパターンに従って、リリース毎に1つずつ、複数のDeploymentを作成できます。 +Deploymentを使って一部のユーザーやサーバーに対してリリースのロールアウトをしたい場合、[リソースの管理](/ja/docs/concepts/cluster-administration/manage-deployment/#canary-deployments-カナリアデプロイ)に記載されているカナリアパターンに従って、リリース毎に1つずつ、複数のDeploymentを作成できます。 ## Deployment Specの記述 他の全てのKubernetesの設定と同様に、Deploymentは`.apiVersion`、`.kind`や`.metadata`フィールドを必要とします。 -設定ファイルの利用に関する情報は[アプリケーションのデプロイ](/ja/docs/tasks/run-application/run-stateless-application-deployment/)を参照してください。コンテナーの設定に関しては[リソースを管理するためのkubectlの使用](/docs/concepts/overview/working-with-objects/object-management/)を参照してください。 -Deploymentオブジェクトの名前は、有効な[DNSサブドメイン名](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)でなければなりません。 +設定ファイルの利用に関する情報は[アプリケーションのデプロイ](/ja/docs/tasks/run-application/run-stateless-application-deployment/)を参照してください。コンテナーの設定に関しては[リソースを管理するためのkubectlの使用](/ja/docs/concepts/overview/working-with-objects/object-management/)を参照してください。 +Deploymentオブジェクトの名前は、有効な[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)でなければなりません。 Deploymentは[`.spec`セクション](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)も必要とします。 ### Podテンプレート @@ -992,25 +994,25 @@ Deploymentのセレクターに一致するラベルを持つPodを直接作成 `.spec.strategy.type==RollingUpdate`と指定されているとき、DeploymentはローリングアップデートによりPodを更新します。ローリングアップデートの処理をコントロールするために`maxUnavailable`と`maxSurge`を指定できます。 -##### maxUnavailable +##### Max Unavailable {#max-unavailable} `.spec.strategy.rollingUpdate.maxUnavailable`はオプションのフィールドで、更新処理において利用不可となる最大のPod数を指定します。値は絶対値(例: 5)を指定するか、理想状態のPodのパーセンテージを指定します(例: 10%)。パーセンテージを指定した場合、絶対値は小数切り捨てされて計算されます。`.spec.strategy.rollingUpdate.maxSurge`が0に指定されている場合、この値を0にできません。デフォルトでは25%です。 例えば、この値が30%と指定されているとき、ローリングアップデートが開始すると古いReplicaSetはすぐに理想状態の70%にスケールダウンされます。一度新しいPodが稼働できる状態になると、古いReplicaSetはさらにスケールダウンされ、続いて新しいReplicaSetがスケールアップされます。この間、利用可能なPodの総数は理想状態のPodの少なくとも70%以上になるように保証されます。 -##### maxSurge +##### Max Surge {#max-surge} `.spec.strategy.rollingUpdate.maxSurge`はオプションのフィールドで、理想状態のPod数を超えて作成できる最大のPod数を指定します。値は絶対値(例: 5)を指定するか、理想状態のPodのパーセンテージを指定します(例: 10%)。パーセンテージを指定した場合、絶対値は小数切り上げで計算されます。`MaxUnavailable`が0に指定されている場合、この値を0にできません。デフォルトでは25%です。 例えば、この値が30%と指定されているとき、ローリングアップデートが開始すると新しいReplicaSetはすぐに更新されます。このとき古いPodと新しいPodの総数は理想状態の130%を超えないように更新されます。一度古いPodが削除されると、新しいReplicaSetはさらにスケールアップされます。この間、利用可能なPodの総数は理想状態のPodに対して最大130%になるように保証されます。 -### progressDeadlineSeconds +### Progress Deadline Seconds `.spec.progressDeadlineSeconds`はオプションのフィールドで、システムがDeploymentの[更新に失敗](#failed-deployment)したと判断するまでに待つ秒数を指定します。更新に失敗したと判断されたとき、リソースのステータスは`Type=Progressing`、`Status=False`かつ`Reason=ProgressDeadlineExceeded`となるのを確認できます。DeploymentコントローラーはDeploymentの更新のリトライし続けます。デフォルト値は600です。今後、自動的なロールバックが実装されたとき、更新失敗状態になるとすぐにDeploymentコントローラーがロールバックを行うようになります。 この値が指定されているとき、`.spec.minReadySeconds`より大きい値を指定する必要があります。 -### minReadySeconds {#min-ready-seconds} +### Min Ready Seconds {#min-ready-seconds} `.spec.minReadySeconds`はオプションのフィールドで、新しく作成されたPodが利用可能となるために、最低どれくらいの秒数コンテナーがクラッシュすることなく稼働し続ければよいかを指定するものです。デフォルトでは0です(Podは作成されるとすぐに利用可能と判断されます)。Podが利用可能と判断された場合についてさらに学ぶために[Container Probes](/ja/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)を参照してください。 @@ -1020,7 +1022,7 @@ Deploymentのリビジョン履歴は、Deploymentが管理するReplicaSetに `.spec.revisionHistoryLimit`はオプションのフィールドで、ロールバック可能な古いReplicaSetの数を指定します。この古いReplicaSetは`etcd`内のリソースを消費し、`kubectl get rs`の出力結果を見にくくします。Deploymentの各リビジョンの設定はReplicaSetに保持されます。このため一度古いReplicaSetが削除されると、そのリビジョンのDeploymentにロールバックすることができなくなります。デフォルトでは10もの古いReplicaSetが保持されます。しかし、この値の最適値は新しいDeploymentの更新頻度と安定性に依存します。 -さらに詳しく言うと、この値を0にすると、0のレプリカを持つ古い全てのReplicaSetが削除されます。このケースでは、リビジョン履歴が完全に削除されているため新しいDeploymentのロールアウトを完了することができません。 +さらに詳しく言うと、この値を0にすると、0のレプリカを持つ古い全てのReplicaSetが削除されます。このケースでは、リビジョン履歴が完全に削除されているため新しいDeploymentのロールアウトを元に戻すことができません。 ### paused diff --git a/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md index f863cb4e92820..728e0346fea2b 100644 --- a/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -52,4 +52,4 @@ Kubernetesにおいてタイムスキューを避けるために、全てのNode * [Jobの自動クリーンアップ](/ja/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) -* [設計ドキュメント](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/0026-ttl-after-finish.md) +* [設計ドキュメント](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/592-ttl-after-finish/README.md) diff --git a/content/ja/docs/concepts/workloads/pods/ephemeral-containers.md b/content/ja/docs/concepts/workloads/pods/ephemeral-containers.md index 0fa45de94bb19..beb92b3b882cd 100644 --- a/content/ja/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/ja/docs/concepts/workloads/pods/ephemeral-containers.md @@ -42,7 +42,7 @@ weight: 80 エフェメラルコンテナを利用する場合には、他のコンテナ内のプロセスにアクセスできるように、[プロセス名前空間の共有](/ja/docs/tasks/configure-pod-container/share-process-namespace/)を有効にすると便利です。 -エフェメラルコンテナを利用してトラブルシューティングを行う例については、[デバッグ用のエフェメラルコンテナを使用してデバッグする](/docs/tasks/debug-application-cluster/debug-running-pod/#debugging-with-ephemeral-debug-container)を参照してください。 +エフェメラルコンテナを利用してトラブルシューティングを行う例については、[デバッグ用のエフェメラルコンテナを使用してデバッグする](/docs/tasks/debug-application-cluster/debug-running-pod/#ephemeral-container)を参照してください。 ## Ephemeral containers API @@ -59,7 +59,7 @@ weight: 80 "apiVersion": "v1", "kind": "EphemeralContainers", "metadata": { - "name": "example-pod" + "name": "example-pod" }, "ephemeralContainers": [{ "command": [ diff --git a/content/ja/docs/contribute/new-content/_index.md b/content/ja/docs/contribute/new-content/_index.md new file mode 100644 index 0000000000000..f9cb2e1301495 --- /dev/null +++ b/content/ja/docs/contribute/new-content/_index.md @@ -0,0 +1,4 @@ +--- +title: 新しいコンテンツの貢献 +weight: 20 +--- diff --git a/content/ja/docs/contribute/new-content/overview.md b/content/ja/docs/contribute/new-content/overview.md new file mode 100644 index 0000000000000..e3db3744e92a2 --- /dev/null +++ b/content/ja/docs/contribute/new-content/overview.md @@ -0,0 +1,54 @@ +--- +title: 新しいコンテンツの貢献の概要 +linktitle: 概要 +content_type: concept +main_menu: true +weight: 5 +--- + + + +このセクションでは、新しいコンテンツの貢献を行う前に知っておくべき情報を説明します。 + + + +## 貢献の基本 + +- KubernetesのドキュメントはMarkdownで書き、Kubernetesのウェブサイトは[Hugo](https://gohugo.io/)を使ってビルドします。 +- ソースは[GitHub](https://github.com/kubernetes/website)にあります。Kubernetesのドキュメントは`/content/en/docs/`にあります。リファレンスドキュメントの一部は、`update-imported-docs/`ディレクトリ内のスクリプトから自動的に生成されます。 +- [Page content types](/docs/contribute/style/page-content-types/)にHugoによるドキュメントのコンテンツの見え方を記述しています。 +- 標準のHugoのshortcodeに加えて、多数の[カスタムのHugo shortcode](/docs/contribute/style/hugo-shortcodes/)を使用してコンテンツの見え方をコントロールしています。 +- ドキュメントのソースは`/content/`内にある複数の言語で利用できます。各言語はそれぞれ[ISO 639-1標準](https://www.loc.gov/standards/iso639-2/php/code_list.php)で定義された2文字のコードの名前のフォルダを持ちます。たとえば、英語のドキュメントのソースは`/content/en/docs/`内に置かれています。 +- 複数言語でのドキュメントへの貢献や新しい翻訳の開始に関する情報については、[Kubernetesのドキュメントを翻訳する](/docs/contribute/localization)を参照してください。 + +## 始める前に {#before-you-begin} + +### CNCF CLAに署名する {#sign-the-cla} + +すべてのKubernetesのコントリビューターは、[コントリビューターガイド](https://github.com/kubernetes/community/blob/master/contributors/guide/README.md)を読み、[Contributor License Agreement(コントリビューターライセンス契約、CLA)への署名](https://github.com/kubernetes/community/blob/master/CLA.md)を**必ず行わなければなりません**。 + +CLAへの署名が完了していないコントリビューターからのpull requestは、自動化されたテストで失敗します。名前とメールアドレスは`git config`コマンドで表示されるものに一致し、gitの名前とメールアドレスはCNCF CLAで使われたものに一致しなければなりません。 + +### どのGitブランチを使用するかを選ぶ + +pull requestをオープンするときは、どのブランチをベースにして作業するかをあらかじめ知っておく必要があります。 + +シナリオ | ブランチ +:---------|:------------ +現在のリリースに対する既存または新しい英語のコンテンツ | `master` +機能変更のリリースに対するコンテンツ | 機能変更が含まれるメジャーおよびマイナーバージョンに対応する、`dev-`というパターンのブランチを使います。たとえば、機能変更が`v{{< skew nextMinorVersion >}}`に含まれる場合、ドキュメントの変更は``dev-{{< skew nextMinorVersion >}}``ブランチに追加します。 +他の言語内のコンテンツ(翻訳) | 各翻訳対象の言語のルールに従います。詳しい情報は、[翻訳のブランチ戦略](/docs/contribute/localization/#branching-strategy)を読んでください。 + +それでも選ぶべきブランチがわからないときは、Slack上の`#sig-docs`チャンネルで質問してください。 + +{{< note >}} +すでにpull requestを作成していて、ベースブランチが間違っていたことに気づいた場合は、作成者であるあなただけがベースブランチを変更できます。 +{{< /note >}} + +### 言語ごとのPR + +pull requestはPRごとに1つの言語に限定してください。複数の言語に同一の変更を行う必要がある場合は、言語ごとに別々のPRを作成してください。 + +## コントリビューターのためのツール + +`kubernetes/website`リポジトリ内の[doc contributors tools](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools)ディレクトリには、コントリビューターとしての旅を楽にしてくれるツールがあります。 diff --git a/content/ja/docs/contribute/participate/_index.md b/content/ja/docs/contribute/participate/_index.md new file mode 100644 index 0000000000000..bf21148c40757 --- /dev/null +++ b/content/ja/docs/contribute/participate/_index.md @@ -0,0 +1,102 @@ +--- +title: SIG Docsへの参加 +content_type: concept +weight: 60 +card: + name: contribute + weight: 60 +--- + + + +SIG Docsは、Kubernetesプロジェクト内の +[special interest groups](https://github.com/kubernetes/community/blob/master/sig-list.md)の1つであり、 +Kubernetes全体のドキュメントの作成、更新、および保守に重点を置いています。 +SIGの詳細については、[SIG DocsのGithubリポジトリ](https://github.com/kubernetes/community/blob/master/sig-list.md)を参照してください。 + +SIG Docsは、すべての寄稿者からのコンテンツとレビューを歓迎します。 +誰でもPull Request(PR)を開くことができ、コンテンツに関するissueを提出したり、進行中のPull Requestにコメントしたりできます。 + +あなたは、[member](/docs/contribute/participate/roles-and-responsibilities/#members)や、 +[reviewer](/docs/contribute/participate/roles-and-responsibilities/#reviewers)、 +[approver](/docs/contribute/participate/roles-and-responsibilities/#approvers)になることもできます。 +これらの役割にはより多くのアクセスが必要であり、変更を承認およびコミットするための特定の責任が伴います。 +Kubernetesコミュニティ内でメンバーシップがどのように機能するかについての詳細は、 +[community-membership](https://github.com/kubernetes/community/blob/master/community-membership.md) +をご覧ください。 + +このドキュメントの残りの部分では、kubernetesの中で最も広く公開されている +Kubernetesのウェブサイトとドキュメントの管理を担当しているSIG Docsの中で、これらの役割がどのように機能するのかを概説します。 + + + + +## SIG Docs chairperson + +SIG Docsを含む各SIGは、議長として機能する1人以上のSIGメンバーを選択します。 +これらは、SIGDocsとKubernetes organizationの他の部分との連絡先です。 +それらには、Kubernetesプロジェクト全体の構造と、SIG Docsがその中でどのように機能するかについての広範な知識が必要です。 +現在のchairpersonのリストについては、 +[Leadership](https://github.com/kubernetes/community/tree/master/sig-docs#leadership) +を参照してください。 + +## SIG Docs teamsと自動化 + +SIG Docsの自動化は、GitHub teamsとOWNERSファイルの2つの異なるメカニズムに依存しています。 + +### GitHub teams + +GitHubには、二つのSIG Docs +[teams](https://github.com/orgs/kubernetes/teams?query=sig-docs) +カテゴリがあります: + +- `@sig-docs-{language}-owners`は承認者かつリードです。 +- `@sig-docs-{language}-reviewers` はレビュアーです。 + +それぞれをGitHubコメントの`@name`で参照して、そのグループの全員とコミュニケーションできます。 + +ProwチームとGitHub teamsが完全に一致せずに重複する場合があります。 +問題の割り当て、Pull Request、およびPR承認のサポートのために、自動化ではOWNERSファイルからの情報を使用します。 + +### OWNERSファイルとfront-matter + +Kubernetesプロジェクトは、GitHubのissueとPull Requestに関連する自動化のためにprowと呼ばれる自動化ツールを使用します。 +[Kubernetes Webサイトリポジトリ](https://github.com/kubernetes/website) +は、2つの[prowプラグイン](https://github.com/kubernetes/test-infra/tree/master/prow/plugins)を使用します: + +- blunderbuss +- approve + +これらの2つのプラグインは`kubernetes.website`のGithubリポジトリのトップレベルにある +[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS)ファイルと、 +[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES)ファイルを使用して、 +リポジトリ内でのprowの動作を制御します。 + +OWNERSファイルには、SIG Docsのレビュー担当者および承認者であるユーザーのリストが含まれています。 +OWNERSファイルはサブディレクトリに存在することもでき、そのサブディレクトリとその子孫のファイルのレビュー担当者または承認者として機能できるユーザーを上書きできます。 +一般的なOWNERSファイルの詳細については、 +[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md)を参照してください。 + +さらに、個々のMarkdownファイルは、個々のGitHubユーザー名またはGitHubグループを一覧表示することにより、そのfront-matterでレビュー担当者と承認者を一覧表示できます。 + +OWNERSファイルとMarkdownファイルのfront-matterの組み合わせにより、PRの技術的および編集上のレビューを誰に依頼するかについてPRの所有者が自動化システムから得るアドバイスが決まります。 + +## マージの仕組み + +Pull Requestがコンテンツの公開に使用されるブランチにマージされると、そのコンテンツは http://kubernetes.io に公開されます。 +公開されたコンテンツの品質を高くするために、Pull RequestのマージはSIG Docsの承認者に限定しています。仕組みは次のとおりです。 + +- Pull Requestに`lgtm`ラベルと`approve`ラベルの両方があり、`hold`ラベルがなく、すべてのテストに合格すると、Pull Requestは自動的にマージされます。 +- Kubernetes organizationのメンバーとSIG Docsの承認者はコメントを追加して、特定のPull Requestが自動的にマージされないようにすることができます(`/hold`コメントを追加するか、`/lgtm`コメントを保留します)。 +- Kubernetesメンバーは誰でも、`/lgtm`コメントを追加することで`lgtm`ラベルを追加できます。 +- `/approve`コメントを追加してPull Requestをマージできるのは、SIG Docsの承認者だけです。一部の承認者は、[PR Wrangler](/docs/contribute/participate/pr-wranglers/)や[SIG Docsのchairperson](#sig-docs-chairperson)など、追加の特定の役割も実行します。 + + + +## {{% heading "whatsnext" %}} + +Kubernetesドキュメントへの貢献の詳細については、以下を参照してください: + +- [Contributing new content](/docs/contribute/new-content/overview/) +- [Reviewing content](/docs/contribute/review/reviewing-prs) +- [ドキュメントスタイルの概要](/ja/docs/contribute/style/) diff --git a/content/ja/docs/contribute/review/reviewing-prs.md b/content/ja/docs/contribute/review/reviewing-prs.md new file mode 100644 index 0000000000000..6659d4635463f --- /dev/null +++ b/content/ja/docs/contribute/review/reviewing-prs.md @@ -0,0 +1,86 @@ +--- +title: プルリクエストのレビュー +content_type: concept +main_menu: true +weight: 10 +--- + + + +ドキュメントのプルリクエストは誰でもレビューすることができます。Kubernetesのwebsiteリポジトリで[pull requests](https://github.com/kubernetes/website/pulls)のセクションに移動し、open状態のプルリクエストを確認してください。 + +ドキュメントのプルリクエストのレビューは、Kubernetesコミュニティに自分を知ってもらうためのよい方法の1つです。コードベースについて学んだり、他のコントリビューターとの信頼関係を築く助けともなるはずです。 + +レビューを行う前には、以下のことを理解しておくとよいでしょう。 + +- [コンテンツガイド](/docs/contribute/style/content-guide/)と[スタイルガイド](/docs/contribute/style/style-guide/)を読んで、有益なコメントを残せるようにする。 +- Kubernetesのドキュメントコミュニティにおける[役割と責任](/docs/contribute/participate/roles-and-responsibilities/)の違いを理解する。 + + + +## はじめる前に + +レビューを始める前に、以下のことを心に留めてください。 + +- [CNCFの行動規範](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)を読み、いかなる時にも行動規範にしたがって行動するようにする。 +- 礼儀正しく、思いやりを持ち、助け合う気持ちを持つ。 +- 変更点だけでなく、PRのポジティブな側面についてもコメントする。 +- 相手の気持ちに共感して、自分のレビューが相手にどのように受け取られるのかをよく意識する。 +- 相手の善意を前提として、疑問点を明確にする質問をする。 +- 経験を積んだコントリビューターの場合、コンテンツに大幅な変更が必要な新規のコントリビューターとペアを組んで作業に取り組むことを考える。 + +## レビューのプロセス + +一般に、コンテンツや文体に対するプルリクエストは、英語でレビューを行います。 + +1. [https://github.com/kubernetes/website/pulls](https://github.com/kubernetes/website/pulls)に移動します。Kubernetesのウェブサイトとドキュメントに対するopen状態のプルリクエスト一覧が表示されます。 + +2. open状態のPRに、以下に示すラベルを1つ以上使って絞り込みます。 + + - `cncf-cla: yes` (推奨): CLAにサインしていないコントリビューターが提出したPRはマージできません。詳しい情報は、[CLAの署名](/docs/contribute/new-content/overview/#sign-the-cla)を読んでください。 + - `language/en` (推奨): 英語のPRだけに絞り込みます。 + - `size/`: 特定の大きさのPRだけに絞り込みます。レビューを始めたばかりの人は、小さなPRから始めてください。 + + さらに、PRがwork in progressとしてマークされていないことも確認してください。`work in progress`ラベルの付いたPRは、まだレビューの準備ができていない状態です。 + +3. レビューするPRを選んだら、以下のことを行い、変更点について理解します。 + - PRの説明を読み、行われた変更について理解し、関連するissueがあればそれも読みます。 + - 他のレビュアのコメントがあれば読みます。 + - **Files changed**タブをクリックし、変更されたファイルと行を確認します。 + - **Conversation**タブの下にあるPRのbuild checkセクションまでスクロールし、**deploy/netlify**の行の**Details**リンクをクリックして、Netlifyのプレビュービルドで変更点をプレビューします。 + +4. **Files changed**タブに移動してレビューを始めます。 + 1. コメントしたい場合は行の横の`+`マークをクリックします。 + 2. その行に関するコメントを書き、**Add single comment**(1つのコメントだけを残したい場合)または**Start a review**(複数のコメントを行いたい場合)のいずれかをクリックします。 + 3. コメントをすべて書いたら、ページ上部の**Review changes**をクリックします。ここでは、レビューの要約を追加できます(コントリビューターにポジティブなコメントも書きましょう!)。必要に応じて、PRを承認したり、コメントしたり、変更をリクエストします。新しいコントリビューターの場合は**Comment**だけが行えます。 + +## レビューのチェックリスト + +レビューするときは、最初に以下の点を確認してみてください。 + +### 言語と文法 + +- 言語や文法に明らかな間違いはないですか? もっとよい言い方はないですか? +- もっと簡単な単語に置き換えられる複雑な単語や古い単語はありませんか? +- 使われている単語や専門用語や言い回しで差別的ではない別の言葉に置き換えられるものはありませんか? +- 言葉選びや大文字の使い方は[style guide](/docs/contribute/style/style-guide/)に従っていますか? +- もっと短くしたり単純な文に書き換えられる長い文はありませんか? +- 箇条書きやテーブルでもっとわかりやすく表現できる長いパラグラフはありませんか? + +### コンテンツ + +- 同様のコンテンツがKubernetesのサイト上のどこかに存在しませんか? +- コンテンツが外部サイト、特定のベンダー、オープンソースではないドキュメントなどに過剰にリンクを張っていませんか? + +### ウェブサイト + +- PRはページ名、slug/alias、アンカーリンクの変更や削除をしていますか? その場合、このPRの変更の結果、リンク切れは発生しませんか? ページ名を変更してslugはそのままにするなど、他の選択肢はありませんか? +- PRは新しいページを作成するものですか? その場合、次の点に注意してください。 + - ページは正しい[page content type](/docs/contribute/style/page-content-types/)と関係するHugoのshortcodeを使用していますか? + - セクションの横のナビゲーション(または全体)にページは正しく表示されますか? + - ページは[Docs Home](/docs/home/)に一覧されますか? +- Netlifyのプレビューで変更は確認できますか? 特にリスト、コードブロック、テーブル、備考、画像などに注意してください。 + +### その他 + +PRに関して誤字や空白などの小さな問題を指摘する場合は、コメントの前に`nit:`と書いてください。こうすることで、PRの作者は問題が深刻なものではないことが分かります。 diff --git a/content/ja/docs/home/supported-doc-versions.md b/content/ja/docs/home/supported-doc-versions.md index 0ca6fcee64738..7cfce77754d9a 100644 --- a/content/ja/docs/home/supported-doc-versions.md +++ b/content/ja/docs/home/supported-doc-versions.md @@ -1,28 +1,11 @@ --- -title: Kubernetesドキュメントがサポートしているバージョン -content_type: concept +title: 利用可能なドキュメントバージョン +content_type: custom +layout: supported-versions card: name: about weight: 10 - title: ドキュメントがサポートしているバージョン + title: 利用可能なドキュメントバージョン --- - - 本ウェブサイトには、現行版とその直前4バージョンのKubernetesドキュメントがあります。 - - - - - -## 現行版 - -現在のバージョンは[{{< param "version" >}}](/)です。 - -## 以前のバージョン - -{{< versions-other >}} - - - - diff --git a/content/ja/docs/reference/_index.md b/content/ja/docs/reference/_index.md index 0496a730c441e..aca4c278b5a36 100644 --- a/content/ja/docs/reference/_index.md +++ b/content/ja/docs/reference/_index.md @@ -16,7 +16,7 @@ content_type: concept ## APIリファレンス -* [Kubernetes API概要](/docs/reference/using-api/api-overview/) - Kubernetes APIの概要です。 +* [Kubernetes API概要](/docs/reference/using-api/) - Kubernetes APIの概要です。 * [Kubernetes APIリファレンス {{< latest-version >}}](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/) ## APIクライアントライブラリー @@ -30,9 +30,9 @@ content_type: concept ## CLIリファレンス -* [kubectl](/docs/reference/kubectl/overview/) - コマンドの実行やKubernetesクラスターの管理に使う主要なCLIツールです。 +* [kubectl](/ja/docs/reference/kubectl/overview/) - コマンドの実行やKubernetesクラスターの管理に使う主要なCLIツールです。 * [JSONPath](/ja/docs/reference/kubectl/jsonpath/) - kubectlで[JSONPath記法](https://goessner.net/articles/JsonPath/)を使うための構文ガイドです。 -* [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) - セキュアなKubernetesクラスターを簡単にプロビジョニングするためのCLIツールです。 +* [kubeadm](ja/docs/reference/setup-tools/kubeadm/) - セキュアなKubernetesクラスターを簡単にプロビジョニングするためのCLIツールです。 ## コンポーネントリファレンス diff --git a/content/ja/docs/reference/access-authn-authz/rbac.md b/content/ja/docs/reference/access-authn-authz/rbac.md index fee25f323e95b..04409b3b8ea90 100644 --- a/content/ja/docs/reference/access-authn-authz/rbac.md +++ b/content/ja/docs/reference/access-authn-authz/rbac.md @@ -43,7 +43,7 @@ ClusterRolesにはいくつかの用途があります。ClusterRoleを利用し 2. Namespaceに属するリソースに対する権限を定義し、すべてのNamespaceにわたって付与する 3. クラスター単位でスコープされているリソースに対するアクセス許可を定義する -NamespaceでRoleを定義する場合は、Roleを使用します。クラスター全体でRoleを定義する婆は、ClusterRoleを使用します +NamespaceでRoleを定義する場合は、Roleを使用します。クラスター全体でRoleを定義する場合は、ClusterRoleを使用します #### Roleの例 diff --git a/content/ja/docs/reference/command-line-tools-reference/kubelet-authentication-authorization.md b/content/ja/docs/reference/command-line-tools-reference/kubelet-authentication-authorization.md new file mode 100644 index 0000000000000..fe7a1626fb617 --- /dev/null +++ b/content/ja/docs/reference/command-line-tools-reference/kubelet-authentication-authorization.md @@ -0,0 +1,83 @@ +--- +title: Kubelet 認証/認可 +--- + + +## 概要 + +kubeletのHTTPSエンドポイントは、さまざまな感度のデータへのアクセスを提供するAPIを公開し、 +ノードとコンテナ内のさまざまなレベルの権限でタスクを実行できるようにします。 + +このドキュメントでは、kubeletのHTTPSエンドポイントへのアクセスを認証および承認する方法について説明します。 + +## Kubelet 認証 + +デフォルトでは、他の構成済み認証方法によって拒否されないkubeletのHTTPSエンドポイントへのリクエストは +匿名リクエストとして扱われ、ユーザー名は`system:anonymous`、 +グループは`system:unauthenticated`になります。 + +匿名アクセスを無効にし、認証されていないリクエストに対して`401 Unauthorized`応答を送信するには: + +* `--anonymous-auth=false`フラグでkubeletを開始します。 + +kubeletのHTTPSエンドポイントに対するX509クライアント証明書認証を有効にするには: + +* `--client-ca-file`フラグでkubeletを起動し、クライアント証明書を確認するためのCAバンドルを提供します。 +* `--kubelet-client-certificate`および`--kubelet-client-key`フラグを使用してapiserverを起動します。 +* 詳細については、[apiserver認証ドキュメント](/ja/docs/reference/access-authn-authz/authentication/#x509-client-certs)を参照してください。 + +APIベアラートークン(サービスアカウントトークンを含む)を使用して、kubeletのHTTPSエンドポイントへの認証を行うには: + +* APIサーバーで`authentication.k8s.io/v1beta1`グループが有効になっていることを確認します。 +* `--authentication-token-webhook`および`--kubeconfig`フラグを使用してkubeletを開始します。 +* kubeletは、構成済みのAPIサーバーで `TokenReview` APIを呼び出して、ベアラートークンからユーザー情報を判別します。 + +## Kubelet 承認 + +認証に成功した要求(匿名要求を含む)はすべて許可されます。デフォルトの認可モードは、すべての要求を許可する`AlwaysAllow`です。 + +kubelet APIへのアクセスを細分化するのは、次のような多くの理由が考えられます: + +* 匿名認証は有効になっていますが、匿名ユーザーがkubeletのAPIを呼び出す機能は制限する必要があります。 +* ベアラートークン認証は有効になっていますが、kubeletのAPIを呼び出す任意のAPIユーザー(サービスアカウントなど)の機能を制限する必要があります。 +* クライアント証明書の認証は有効になっていますが、構成されたCAによって署名されたクライアント証明書の一部のみがkubeletのAPIの使用を許可されている必要があります。 + +kubeletのAPIへのアクセスを細分化するには、APIサーバーに承認を委任します: + +* APIサーバーで`authorization.k8s.io/v1beta1` APIグループが有効になっていることを確認します。 +* `--authorization-mode=Webhook`と`--kubeconfig`フラグでkubeletを開始します。 +* kubeletは、構成されたAPIサーバーで`SubjectAccessReview` APIを呼び出して、各リクエストが承認されているかどうかを判断します。 + +kubeletは、apiserverと同じ[リクエスト属性](/docs/reference/access-authn-authz/authorization/#review-your-request-attributes)アプローチを使用してAPIリクエストを承認します。 + +動詞は、受けとったリクエストのHTTP動詞から決定されます: + +HTTP動詞 | 要求 動詞 +----------|--------------- +POST | create +GET, HEAD | get +PUT | update +PATCH | patch +DELETE | delete + +リソースとサブリソースは、受けとったリクエストのパスから決定されます: + +Kubelet API | リソース | サブリソース +-------------|----------|------------ +/stats/\* | nodes | stats +/metrics/\* | nodes | metrics +/logs/\* | nodes | log +/spec/\* | nodes | spec +*all others* | nodes | proxy + +名前空間とAPIグループの属性は常に空の文字列であり、 +リソース名は常にkubeletの`Node` APIオブジェクトの名前です。 + +このモードで実行する場合は、apiserverに渡される`--kubelet-client-certificate`フラグと`--kubelet-client-key` +フラグで識別されるユーザーが次の属性に対して許可されていることを確認します: + +* verb=\*, resource=nodes, subresource=proxy +* verb=\*, resource=nodes, subresource=stats +* verb=\*, resource=nodes, subresource=log +* verb=\*, resource=nodes, subresource=spec +* verb=\*, resource=nodes, subresource=metrics diff --git a/content/ja/docs/reference/glossary/kube-apiserver.md b/content/ja/docs/reference/glossary/kube-apiserver.md index 29885884fef03..c7a7cfec19896 100755 --- a/content/ja/docs/reference/glossary/kube-apiserver.md +++ b/content/ja/docs/reference/glossary/kube-apiserver.md @@ -2,7 +2,7 @@ title: APIサーバー id: kube-apiserver date: 2018-04-12 -full_link: /docs/reference/generated/kube-apiserver/ +full_link: /docs/concepts/overview/components/#kube-apiserver short_description: > Kubernetes APIを提供するコントロールプレーンのコンポーネントです。 diff --git a/content/ja/docs/reference/kubectl/cheatsheet.md b/content/ja/docs/reference/kubectl/cheatsheet.md index caf2fc783c272..d93d02551b12c 100644 --- a/content/ja/docs/reference/kubectl/cheatsheet.md +++ b/content/ja/docs/reference/kubectl/cheatsheet.md @@ -8,16 +8,10 @@ card: -[Kubectl概要](/ja/docs/reference/kubectl/overview/)と[JsonPathガイド](/docs/reference/kubectl/jsonpath)も合わせてご覧ください。 - -このページは`kubectl`コマンドの概要です。 - - +このページには、一般的によく使われる`kubectl`コマンドとフラグのリストが含まれています。 -# kubectl - チートシート - ## Kubectlコマンドの補完 ### BASH @@ -76,7 +70,7 @@ kubectl config set-context gce --user=cluster-admin --namespace=foo \ kubectl config unset users.foo # ユーザーfooを削除します ``` -## Apply +## Kubectl Apply `apply`はKubernetesリソースを定義するファイルを通じてアプリケーションを管理します。`kubectl apply`を実行して、クラスター内のリソースを作成および更新します。これは、本番環境でKubernetesアプリケーションを管理する推奨方法です。 詳しくは[Kubectl Book](https://kubectl.docs.kubernetes.io)をご覧ください。 @@ -372,6 +366,7 @@ kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr. kubectl get pods -A -o=custom-columns='DATA:metadata.*' ``` +kubectlに関するより多くのサンプルは[カスタムカラムのリファレンス](/ja/docs/reference/kubectl/overview/#custom-columns)を参照してください。 ### Kubectlのログレベルとデバッグ kubectlのログレベルは、レベルを表す整数が後に続く`-v`または`--v`フラグで制御されます。一般的なKubernetesのログ記録規則と関連するログレベルについて、[こちら](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)で説明します。 @@ -392,11 +387,10 @@ kubectlのログレベルは、レベルを表す整数が後に続く`-v`また ## {{% heading "whatsnext" %}} - -* kubectlについてより深く学びたい方は[kubectl概要](/ja/docs/reference/kubectl/overview/)をご覧ください。 +* kubectlについてより深く学びたい方は[kubectl概要](/ja/docs/reference/kubectl/overview/)や[JsonPath](/docs/reference/kubectl/jsonpath)をご覧ください。 * オプションについては[kubectl](/docs/reference/kubectl/kubectl/) optionsをご覧ください。 - + * また[kubectlの利用パターン](/docs/reference/kubectl/conventions/)では再利用可能なスクリプトでkubectlを利用する方法を学べます。 * コミュニティ版[kubectlチートシート](https://github.com/dennyzhang/cheatsheet-kubernetes-A4)もご覧ください。 diff --git a/content/ja/docs/reference/tools.md b/content/ja/docs/reference/tools.md new file mode 100644 index 0000000000000..0fedb1cf9d94e --- /dev/null +++ b/content/ja/docs/reference/tools.md @@ -0,0 +1,46 @@ +--- +title: ツール +content_type: concept +--- + + +Kubernetesには、Kubernetesシステムの操作に役立ついくつかの組み込みツールが含まれています。 + + +## Kubectl +[`kubectl`](/docs/tasks/tools/install-kubectl/)は、Kubernetesのためのコマンドラインツールです。このコマンドはKubernetes cluster managerを操作します。 + +## Kubeadm +[`kubeadm`](docs/setup/production-environment/tools/kubeadm/install-kubeadm/)は、物理サーバやクラウドサーバ、仮想マシン上にKubenetesクラスタを容易にプロビジョニングするためのコマンドラインツールです(現在はアルファ版です)。 + +## Minikube +[`minikube`](https://minikube.sigs.k8s.io/docs/)は、開発やテストのためにワークステーション上でシングルノードのKubernetesクラスタをローカルで実行するツールです。 + +## Dashboard +[`Dashboard`](/docs/tasks/access-application-cluster/web-ui-dashboard/)は、KubernetesのWebベースのユーザインタフェースで、コンテナ化されたアプリケーションをKubernetesクラスタにデプロイしたり、トラブルシューティングしたり、クラスタとそのリソース自体を管理したりすることが出来ます。 + +## Helm +[`Kubernetes Helm`](https://github.com/helm/helm)は、事前に設定されたKubernetesリソースのパッケージ、別名Kubernetes chartsを管理するためのツールです。 + +Helmを用いて以下のことを行います。 + +* Kubernetes chartsとしてパッケージ化された人気のあるソフトウェアの検索と利用 + +* Kubernetes chartsとして所有するアプリケーションを共有すること + +* Kubernetesアプリケーションの再現性のあるビルドの作成 + +* Kubernetesマニフェストファイルを知的な方法で管理 + +* Helmパッケージのリリース管理 + +## Kompose +[`Kompose`](https://github.com/kubernetes/kompose)は、Docker ComposeユーザがKubernetesに移行する手助けをするツールです。 + +Komposeを用いて以下のことを行います。 + +* Docker ComposeファイルのKubernetesオブジェクトへの変換 + +* ローカルのDocker開発からKubernetesを経由したアプリケーション管理への移行 + +* v1またはv2のDocker Compose用 `yaml` ファイルならびに[分散されたアプリケーションバンドル](https://docs.docker.com/compose/bundles/)の変換 diff --git a/content/ja/docs/setup/learning-environment/minikube.md b/content/ja/docs/setup/learning-environment/minikube.md index 54a0411132b3b..c197a03081636 100644 --- a/content/ja/docs/setup/learning-environment/minikube.md +++ b/content/ja/docs/setup/learning-environment/minikube.md @@ -26,7 +26,7 @@ MinikubeのサポートするKubernetesの機能: ## インストール -[Minikubeのインストール](/ja/docs/tasks/tools/install-minikube/)を参照してください。 +ツールのインストールについて知りたい場合は、公式の[Get Started!](https://minikube.sigs.k8s.io/docs/start/)のガイドに従ってください。 ## クイックスタート diff --git a/content/ja/docs/setup/production-environment/container-runtimes.md b/content/ja/docs/setup/production-environment/container-runtimes.md index 8629e2f10213e..0f48e26f64bc3 100644 --- a/content/ja/docs/setup/production-environment/container-runtimes.md +++ b/content/ja/docs/setup/production-environment/container-runtimes.md @@ -130,7 +130,7 @@ yum install -y yum-utils device-mapper-persistent-data lvm2 ``` ```shell -### Dockerリポジトリの追加 +## Dockerリポジトリの追加 yum-config-manager --add-repo \ https://download.docker.com/linux/centos/docker-ce.repo ``` @@ -215,73 +215,107 @@ sysctl --system {{< tabs name="tab-cri-cri-o-installation" >}} {{% tab name="Debian" %}} -```shell -# Debian Unstable/Sid -echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Unstable/Release.key -O- | sudo apt-key add - -``` + CRI-Oを以下のOSにインストールするには、環境変数$OSを以下の表の適切なフィールドに設定します。 -```shell -# Debian Testing -echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Testing/Release.key -O- | sudo apt-key add - -``` -```shell -# Debian 10 -echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key -O- | sudo apt-key add - -``` +| Operating system | $OS | +| ---------------- | ----------------- | +| Debian Unstable | `Debian_Unstable` | +| Debian Testing | `Debian_Testing` | -```shell -# Raspbian 10 -echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Raspbian_10/Release.key -O- | sudo apt-key add - -``` +
    +そして、`$VERSION`にKubernetesのバージョンに合わせたCRI-Oのバージョンを設定します。例えば、CRI-O 1.18をインストールしたい場合は、`VERSION=1.18` を設定します。インストールを特定のリリースに固定することができます。バージョン 1.18.3をインストールするには、`VERSION=1.18:1.18.3` を設定します。 +
    -それでは、CRI-Oをインストールします: +以下を実行します。 ```shell -sudo apt-get install cri-o-1.17 +echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list + +curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | apt-key add - +curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | apt-key add - + +apt-get update +apt-get install cri-o cri-o-runc ``` + {{% /tab %}} -{{% tab name="Ubuntu 18.04, 19.04 and 19.10" %}} +{{% tab name="Ubuntu" %}} -```shell -# パッケージレポジトリを設定する -. /etc/os-release -sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${NAME}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${NAME}_${VERSION_ID}/Release.key -O- | sudo apt-key add - -sudo apt-get update -``` + CRI-Oを以下のOSにインストールするには、環境変数$OSを以下の表の適切なフィールドに設定します。 + +| Operating system | $OS | +| ---------------- | ----------------- | +| Ubuntu 20.04 | `xUbuntu_20.04` | +| Ubuntu 19.10 | `xUbuntu_19.10` | +| Ubuntu 19.04 | `xUbuntu_19.04` | +| Ubuntu 18.04 | `xUbuntu_18.04` | +
    +次に、`$VERSION`をKubernetesのバージョンと一致するCRI-Oのバージョンに設定します。例えば、CRI-O 1.18をインストールしたい場合は、`VERSION=1.18` を設定します。インストールを特定のリリースに固定することができます。バージョン 1.18.3 をインストールするには、`VERSION=1.18:1.18.3` を設定します。 +
    + +以下を実行します。 ```shell -# CRI-Oのインストール -sudo apt-get install cri-o-1.17 +echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list + +curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | apt-key add - +curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | apt-key add - + +apt-get update +apt-get install cri-o cri-o-runc ``` {{% /tab %}} -{{% tab name="CentOS/RHEL 7.4+" %}} +{{% tab name="CentOS" %}} + + CRI-Oを以下のOSにインストールするには、環境変数$OSを以下の表の適切なフィールドに設定します。 + +| Operating system | $OS | +| ---------------- | ----------------- | +| Centos 8 | `CentOS_8` | +| Centos 8 Stream | `CentOS_8_Stream` | +| Centos 7 | `CentOS_7` | +
    +次に、`$VERSION`をKubernetesのバージョンと一致するCRI-Oのバージョンに設定します。例えば、CRI-O 1.18 をインストールしたい場合は、`VERSION=1.18` を設定します。インストールを特定のリリースに固定することができます。バージョン 1.18.3 をインストールするには、`VERSION=1.18:1.18.3` を設定します。 +
    + +以下を実行します。 ```shell -# 必要なパッケージのインストール -curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo -curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:{{< skew latestVersion >}}.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:{{< skew latestVersion >}}/CentOS_7/devel:kubic:libcontainers:stable:cri-o:{{< skew latestVersion >}}.repo +curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/devel:kubic:libcontainers:stable.repo +curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo +yum install cri-o ``` +{{% /tab %}} + +{{% tab name="openSUSE Tumbleweed" %}} + ```shell -# CRI-Oのインストール -yum install -y cri-o + sudo zypper install cri-o ``` {{% /tab %}} +{{% tab name="Fedora" %}} -{{% tab name="openSUSE Tumbleweed" %}} +$VERSIONには、Kubernetesのバージョンと一致するCRI-Oのバージョンを設定します。例えば、CRI-O 1.18をインストールしたい場合は、$VERSION=1.18を設定します。 +以下のコマンドで、利用可能なバージョンを見つけることができます。 +```shell +dnf module list cri-o +``` +CRI-OはFedoraの特定のリリースにピン留めすることをサポートしていません。 +以下を実行します。 ```shell -sudo zypper install cri-o +dnf module enable cri-o:$VERSION +dnf install cri-o ``` + {{% /tab %}} {{< /tabs >}} + ### CRI-Oの起動 ```shell @@ -321,7 +355,7 @@ sysctl --system ### containerdのインストール {{< tabs name="tab-cri-containerd-installation" >}} -{{< tab name="Ubuntu 16.04" codelang="bash" >}} +{{% tab name="Ubuntu 16.04" %}} ```shell # (containerdのインストール) @@ -335,7 +369,7 @@ apt-get update && apt-get install -y apt-transport-https ca-certificates curl so curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - ``` -``` +```shell ## Dockerのaptリポジトリの追加 add-apt-repository \ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ diff --git a/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md b/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md index 1177bcdd941dd..296e10568367f 100644 --- a/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md +++ b/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md @@ -7,7 +7,7 @@ content_type: concept [CloudStack](https://cloudstack.apache.org/) is a software to build public and private clouds based on hardware virtualization principles (traditional IaaS). To deploy Kubernetes on CloudStack there are several possibilities depending on the Cloud being used and what images are made available. CloudStack also has a vagrant plugin available, hence Vagrant could be used to deploy Kubernetes either using the existing shell provisioner or using new Salt based recipes. -[CoreOS](http://coreos.com) templates for CloudStack are built [nightly](http://stable.release.core-os.net/amd64-usr/current/). CloudStack operators need to [register](http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/templates.html) this template in their cloud before proceeding with these Kubernetes deployment instructions. +[CoreOS](https://coreos.com) templates for CloudStack are built [nightly](https://stable.release.core-os.net/amd64-usr/current/). CloudStack operators need to [register](https://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/templates.html) this template in their cloud before proceeding with these Kubernetes deployment instructions. This guide uses a single [Ansible playbook](https://github.com/apachecloudstack/k8s), which is completely automated and can deploy Kubernetes on a CloudStack based Cloud using CoreOS images. The playbook, creates an ssh key pair, creates a security group and associated rules and finally starts coreOS instances configured via cloud-init. diff --git a/content/ja/docs/setup/production-environment/tools/kops.md b/content/ja/docs/setup/production-environment/tools/kops.md index 92899a300a249..dfd2eec406577 100644 --- a/content/ja/docs/setup/production-environment/tools/kops.md +++ b/content/ja/docs/setup/production-environment/tools/kops.md @@ -27,7 +27,7 @@ kops is an automated provisioning system: * You must [install](https://github.com/kubernetes/kops#installing) `kops` on a 64-bit (AMD64 and Intel 64) device architecture. -* You must have an [AWS account](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html), generate [IAM keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) and [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) them. +* You must have an [AWS account](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html), generate [IAM keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) and [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) them. The IAM user will need [adequate permissions](https://github.com/kubernetes/kops/blob/master/docs/getting_started/aws.md#setup-iam-user). @@ -140,7 +140,7 @@ you choose for organization reasons (e.g. you are allowed to create records unde but not under `example.com`). Let's assume you're using `dev.example.com` as your hosted zone. You create that hosted zone using -the [normal process](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html), or +the [normal process](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html), or with a command such as `aws route53 create-hosted-zone --name dev.example.com --caller-reference 1`. You must then set up your NS records in the parent domain, so that records in the domain will resolve. Here, @@ -231,7 +231,7 @@ See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to expl ## {{% heading "whatsnext" %}} -* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/user-guide/kubectl-overview/). +* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/reference/kubectl/overview/). * Learn more about `kops` [advanced usage](https://kops.sigs.k8s.io/) for tutorials, best practices and advanced configuration options. * Follow `kops` community discussions on Slack: [community discussions](https://github.com/kubernetes/kops#other-ways-to-communicate-with-the-contributors) * Contribute to `kops` by addressing or raising an issue [GitHub Issues](https://github.com/kubernetes/kops/issues) diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index b2b41f9128a6c..58cd1ca133888 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -75,7 +75,7 @@ kind: ClusterConfiguration kubernetesVersion: v1.16.0 scheduler: extraArgs: - address: 0.0.0.0 + bind-address: 0.0.0.0 config: /home/johndoe/schedconfig.yaml kubeconfig: /home/johndoe/kubeconfig.yaml ``` diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 9c096a66983d5..9a47644bf6f05 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -1,12 +1,12 @@ --- -title: kubeadmを使用したシングルコントロールプレーンクラスターの作成 +title: kubeadmを使用したクラスターの作成 content_type: task weight: 30 --- -`kubeadm`ツールは、ベストプラクティスに準拠した実用最小限のKubernetesクラスターをブートストラップする手助けをします。実際、`kubeadm`を使用すれば、[Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification)に通るクラスターをセットアップすることができます。`kubeadm`は、[ブートストラップトークン](/docs/reference/access-authn-authz/bootstrap-tokens/)やクラスターのアップグレードなどのその他のクラスターのライフサイクルの機能もサポートします。 +ベストプラクティスに準拠した実用最小限のKubernetesクラスターを作成します。実際、`kubeadm`を使用すれば、[Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification)に通るクラスターをセットアップすることができます。`kubeadm`は、[ブートストラップトークン](/docs/reference/access-authn-authz/bootstrap-tokens/)やクラスターのアップグレードなどのその他のクラスターのライフサイクルの機能もサポートします。 `kubeadm`ツールは、次のようなときに適しています。 @@ -41,7 +41,7 @@ kubeadmツールの全体の機能の状態は、一般利用可能(GA)です。 ## 目的 -* シングルコントロールプレーンのKubernetesクラスターまたは[高可用性クラスター](/ja/docs/setup/production-environment/tools/kubeadm/high-availability/)をインストールする +* シングルコントロールプレーンのKubernetesクラスターをインストールする * クラスター上にPodネットワークをインストールして、Podがお互いに通信できるようにする ## 手順 @@ -76,7 +76,7 @@ kubeadm init `--apiserver-advertise-address`は、この特定のコントロールプレーンノードのAPIサーバーへのadvertise addressを設定するために使えますが、`--control-plane-endpoint`は、すべてのコントロールプレーンノード共有のエンドポイントを設定するために使えます。 -`--control-plane-endpoint`はIPアドレスを受け付けますが、IPアドレスへマッピングされるDNSネームも使用できます。利用可能なソリューションをそうしたマッピングの観点から評価するには、ネットワーク管理者に相談してください。 +`--control-plane-endpoint`はIPアドレスと、IPアドレスへマッピングできるDNS名を使用できます。利用可能なソリューションをそうしたマッピングの観点から評価するには、ネットワーク管理者に相談してください。 以下にマッピングの例を示します。 @@ -203,9 +203,14 @@ export KUBECONFIG=/etc/kubernetes/admin.conf {{< /caution >}} -CNIを使用するKubernetes Podネットワークを提供する外部のプロジェクトがいくつかあります。一部のプロジェクトでは、[ネットワークポリシー](/docs/concepts/services-networking/networkpolicies/)もサポートしています。 +{{< note >}} +現在、Calicoはkubeadmプロジェクトがe2eテストを実施している唯一のCNIプラグインです。 +もしCNIプラグインに関する問題を見つけた場合、kubeadmやkubernetesではなく、そのCNIプラグインの課題管理システムへ問題を報告してください。 +{{< /note >}} + +CNIを使用するKubernetes Podネットワークを提供する外部のプロジェクトがいくつかあります。一部のプロジェクトでは、[ネットワークポリシー](/ja/docs/concepts/services-networking/network-policies/)もサポートしています。 -利用できる[ネットワークアドオンとネットワークポリシーアドオン](/docs/concepts/cluster-administration/addons/#networking-and-network-policy)のリストを確認してください。 +[Kubernetesのネットワークモデル](/ja/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-networking-model)を実装したアドオンの一覧も確認してください。 Podネットワークアドオンをインストールするには、コントロールプレーンノード上またはkubeconfigクレデンシャルを持っているノード上で、次のコマンドを実行します。 @@ -213,91 +218,7 @@ Podネットワークアドオンをインストールするには、コント kubectl apply -f ``` -インストールできるPodネットワークは、クラスターごとに1つだけです。以下の手順で、いくつかのよく使われるPodネットワークプラグインをインストールできます。 - -{{< tabs name="tabs-pod-install" >}} - -{{% tab name="Calico" %}} -[Calico](https://docs.projectcalico.org/latest/introduction/)は、ネットワークとネットワークポリシーのプロバイダーです。Calicoは柔軟なさまざまなネットワークオプションをサポートするため、自分の状況に適した最も効果的なオプションを選択できます。たとえば、ネットワークのオーバーレイの有無や、BGPの有無が選べます。Calicoは、ホスト、Pod、(もしIstioとEnvoyを使っている場合)サービスメッシュレイヤー上のアプリケーションに対してネットワークポリシーを強制するために、同一のエンジンを使用しています。Calicoは、`amd64`、`arm64`、`ppc64le`を含む複数のアーキテクチャで動作します。 - -デフォルトでは、Calicoは`192.168.0.0/16`をPodネットワークのCIDRとして使いますが、このCIDRはcalico.yamlファイルで設定できます。Calicoを正しく動作させるためには、これと同じCIDRを`--pod-network-cidr=192.168.0.0/16`フラグまたはkubeadmの設定を使って、`kubeadm init`コマンドに渡す必要があります。 - -```shell -kubectl apply -f https://docs.projectcalico.org/v3.11/manifests/calico.yaml -``` - -{{% /tab %}} - -{{% tab name="Cilium" %}} -Ciliumを正しく動作させるためには、`kubeadm init`に `--pod-network-cidr=10.217.0.0/16`を渡さなければなりません。 - -Ciliumのデプロイは、次のコマンドを実行するだけでできます。 - -```shell -kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.6/install/kubernetes/quick-install.yaml -``` - -すべてのCilium Podが`READY`とマークされたら、クラスターを使い始められます。 - -```shell -kubectl get pods -n kube-system --selector=k8s-app=cilium -``` - -出力は次のようになります。 - -``` -NAME READY STATUS RESTARTS AGE -cilium-drxkl 1/1 Running 0 18m -``` - -Ciliumはkube-proxyの代わりに利用することもできます。詳しくは[Kubernetes without kube-proxy](https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free)を読んでください。 - -KubernetesでのCiliumの使い方に関するより詳しい情報は、[Kubernetes Install guide for Cilium](https://docs.cilium.io/en/stable/kubernetes/)を参照してください。 -{{% /tab %}} - -{{% tab name="Contiv-VPP" %}} -[Contiv-VPP](https://contivpp.io/)は、[FD.io VPP](https://fd.io/)をベースとするプログラマブルなCNF vSwitchを採用し、機能豊富で高性能なクラウドネイティブなネットワーキングとサービスを提供します。 - -Contiv-VPPは、k8sサービスとネットワークポリシーを(VPP上の)ユーザースペースで実装しています。 - -こちらのインストールガイドを参照してください: [Contiv-VPP Manual Installation](https://github.com/contiv/vpp/blob/master/docs/setup/MANUAL_INSTALL.md) -{{% /tab %}} - -{{% tab name="Flannel" %}} -`flannel`を正しく動作させるためには、`--pod-network-cidr=10.244.0.0/16`を`kubeadm init`に渡す必要があります。 - -オーバーレイネットワークに参加しているすべてのホスト上で、ファイアウォールのルールが、UDPポート8285と8472のトラフィックを許可するように設定されていることを確認してください。この設定に関するより詳しい情報は、Flannelのトラブルシューティングガイドの[Firewall](https://coreos.com/flannel/docs/latest/troubleshooting.html#firewalls)のセクションを参照してください。 - -Flannelは、Linux下の`amd64`、`arm`、`arm64`、`ppc64le`、`s390x`アーキテクチャ上で動作します。Windows(`amd64`)はv0.11.0でサポートされたとされていますが、使用方法はドキュメントに書かれていません。 - -```shell -kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml -``` - -`flannel`に関するより詳しい情報は、[GitHub上のCoreOSのflannelリポジトリ](https://github.com/coreos/flannel)を参照してください。 -{{% /tab %}} - -{{% tab name="Kube-router" %}} -Kube-routerは、ノードへのPod CIDRの割り当てをkube-controller-managerに依存しています。そのため、`kubeadm init`時に`--pod-network-cidr`フラグを使用する必要があります。 - -Kube-routerは、Podネットワーク、ネットワークポリシー、および高性能なIP Virtual Server(IPVS)/Linux Virtual Server(LVS)ベースのサービスプロキシーを提供します。 - -Kube-routerを有効にしたKubernetesクラスターをセットアップするために`kubeadm`ツールを使用する方法については、公式の[セットアップガイド](https://github.com/cloudnativelabs/kube-router/blob/master/docs/kubeadm.md)を参照してください。 -{{% /tab %}} - -{{% tab name="Weave Net" %}} -Weave Netを使用してKubernetesクラスターをセットアップするより詳しい情報は、[アドオンを使用してKubernetesを統合する](https://www.weave.works/docs/net/latest/kube-addon/)を読んでください。 - -Weave Netは、`amd64`、`arm`、`arm64`、`ppc64le`プラットフォームで追加の操作なしで動作します。Weave Netはデフォルトでharipinモードをセットします。このモードでは、Pod同士はPodIPを知らなくても、Service IPアドレス経由でアクセスできます。 - -```shell -kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" -``` - -{{% /tab %}} - -{{< /tabs >}} - +インストールできるPodネットワークは、クラスターごとに1つだけです。 Podネットワークがインストールされたら、`kubectl get pods --all-namespaces`の出力結果でCoreDNS Podが`Running`状態であることをチェックすることで、ネットワークが動作していることを確認できます。そして、一度CoreDNS Podが動作すれば、続けてノードを追加できます。 @@ -375,7 +296,7 @@ openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outfor ``` {{< note >}} -IPv6タプルを`:`に指定するためには、IPv6アドレスをブラケットで囲みます。たとえば、`[fd00::101]:2073`のように書きます。 +IPv6タプルを`:`と指定するためには、IPv6アドレスを角括弧で囲みます。たとえば、`[fd00::101]:2073`のように書きます。 {{< /note >}} 出力は次のようになります。 @@ -407,7 +328,7 @@ kubectl --kubeconfig ./admin.conf get nodes {{< note >}} 上の例では、rootユーザーに対するSSH接続が有効であることを仮定しています。もしそうでない場合は、`admin.conf`ファイルを誰か他のユーザーからアクセスできるようにコピーした上で、代わりにそのユーザーを使って`scp`してください。 -`admin.conf`ファイルはユーザーにクラスターに対する _特権ユーザー_ の権限を与えます。そのため、このファイルを使うのは控えめにしなければなりません。通常のユーザーには、一部の権限をホワイトリストに加えたユニークなクレデンシャルを生成することを推奨します。これには、`kubeadm alpha kubeconfig user --client-name `コマンドが使えます。このコマンドを実行すると、KubeConfigファイルがSTDOUTに出力されるので、ファイルに保存してユーザーに配布します。その後、`kubectl create (cluster)rolebinding`コマンドを使って権限をホワイトリストに加えます。 +`admin.conf`ファイルはユーザーにクラスターに対する _特権ユーザー_ の権限を与えます。そのため、このファイルを使うのは控えめにしなければなりません。通常のユーザーには、明示的に許可した権限を持つユニークなクレデンシャルを生成することを推奨します。これには、`kubeadm alpha kubeconfig user --client-name `コマンドが使えます。このコマンドを実行すると、KubeConfigファイルがSTDOUTに出力されるので、ファイルに保存してユーザーに配布します。その後、`kubectl create (cluster)rolebinding`コマンドを使って権限を付与します。 {{< /note >}} ### (オプション)APIサーバーをlocalhostへプロキシする @@ -433,10 +354,9 @@ kubectl --kubeconfig ./admin.conf proxy ```bash kubectl drain --delete-local-data --force --ignore-daemonsets -kubectl delete node ``` -その後、ノードが削除されたら、`kubeadm`のインストール状態をすべてリセットします。 +ノードが削除される前に、`kubeadm`によってインストールされた状態をリセットします。 ```bash kubeadm reset @@ -454,6 +374,11 @@ IPVS tablesをリセットしたい場合は、次のコマンドを実行する ipvsadm -C ``` +ノードを削除します。 +```bash +kubectl delete node +``` + クラスターのセットアップを最初から始めたいときは、`kubeadm init`や`kubeadm join`を適切な引数を付けて実行すればいいだけです。 ### コントロールプレーンのクリーンアップ @@ -469,7 +394,7 @@ ipvsadm -C * [Sonobuoy](https://github.com/heptio/sonobuoy)を使用してクラスターが適切に動作しているか検証する。 * `kubeadm`を使用したクラスターをアップグレードする方法について、[kubeadmクラスターをアップグレードする](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)を読む。 * `kubeadm`の高度な利用方法について[kubeadmリファレンスドキュメント](/docs/reference/setup-tools/kubeadm/kubeadm)で学ぶ。 -* Kubernetesの[コンセプト](/ja/docs/concepts/)や[`kubectl`](/docs/user-guide/kubectl-overview/)についてもっと学ぶ。 +* Kubernetesの[コンセプト](/ja/docs/concepts/)や[`kubectl`](/ja/docs/reference/kubectl/overview/)についてもっと学ぶ。 * Podネットワークアドオンのより完全なリストを[クラスターのネットワーク](/docs/concepts/cluster-administration/networking/)で確認する。 * ロギング、モニタリング、ネットワークポリシー、仮想化、Kubernetesクラスターの制御のためのツールなど、その他のアドオンについて、[アドオンのリスト](/docs/concepts/cluster-administration/addons/)で確認する。 * クラスターイベントやPod内で実行中のアプリケーションから送られるログをクラスターがハンドリングする方法を設定する。関係する要素の概要を理解するために、[ロギングのアーキテクチャ](/docs/concepts/cluster-administration/logging/)を読んでください。 @@ -486,9 +411,9 @@ ipvsadm -C ## バージョン互換ポリシー {#version-skew-policy} -バージョンvX.Yの`kubeadm`ツールは、バージョンvX.YまたはvX.(Y-1)のコントロールプレーンを持つクラスターをデプロイできます。また、`kubeadm` vX.Yは、kubeadmで構築された既存のvX.(Y-1)のクラスターをアップグレートできます。 +バージョンv{{< skew latestVersion >}}の`kubeadm`ツールは、バージョンv{{< skew latestVersion >}}またはv{{< skew prevMinorVersion >}}のコントロールプレーンを持つクラスターをデプロイできます。また、バージョンv{{< skew latestVersion >}}の`kubeadm`は、バージョンv{{< skew prevMinorVersion >}}のkubeadmで構築されたクラスターをアップグレートできます。 -未来を見ることはできないため、kubeadm CLI vX.YはvX.(Y+1)をデプロイすることはできません。 +未来を見ることはできないため、kubeadm CLI v{{< skew latestVersion >}}はv{{< skew nextMinorVersion >}}をデプロイできないかもしれません。 例: `kubeadm` v1.8は、v1.7とv1.8のクラスターをデプロイでき、v1.7のkubeadmで構築されたクラスターをv1.8にアップグレートできます。 @@ -507,7 +432,7 @@ kubeletとコントロールプレーンの間や、他のKubernetesコンポー * 定期的に[etcdをバックアップ](https://coreos.com/etcd/docs/latest/admin_guide.html)する。kubeadmが設定するetcdのデータディレクトリは、コントロールプレーンノードの`/var/lib/etcd`にあります。 -* 複数のコントロールプレーンノードを使用する。[高可用性トポロジーのオプション](/docs/setup/production-environment/tools/kubeadm/ha-topology/)では、より高い可用性を提供するクラスターのトポロジーの選択について説明してます。 +* 複数のコントロールプレーンノードを使用する。[高可用性トポロジーのオプション](/ja/docs/setup/production-environment/tools/kubeadm/ha-topology/)では、[より高い可用性](/ja/docs/setup/production-environment/tools/kubeadm/high-availability/)を提供するクラスターのトポロジーの選択について説明してます。 ### プラットフォームの互換性 {#multi-platform} @@ -520,4 +445,3 @@ kubeadmのdeb/rpmパッケージおよびバイナリは、[multi-platform propo ## トラブルシューティング {#troubleshooting} kubeadmに関する問題が起きたときは、[トラブルシューティングドキュメント](/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/)を確認してください。 - diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md b/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md index fa3468e1d1426..f06a8142a30d2 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md @@ -15,7 +15,10 @@ HAクラスターは次の方法で設定できます。 HAクラスターをセットアップする前に、各トポロジーの利点と欠点について注意深く考慮する必要があります。 - +{{< note >}} +kubeadmは、etcdクラスターを静的にブートストラップします。 +詳細については、etcd[クラスタリングガイド](https://github.com/etcd-io/etcd/blob/release-3.4/Documentation/op-guide/clustering.md#static)をご覧ください。 +{{< /note >}} diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md index 6b7cb8b6103d3..1d5e3f9aafee9 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -57,10 +57,10 @@ weight: 60 - ロードバランサーは、apiserverポートで、全てのコントロールプレーンノードと通信できなければなりません。また、リスニングポートに対する流入トラフィックも許可されていなければなりません。 - - [HAProxy](http://www.haproxy.org/)をロードバランサーとして使用することができます。 - - ロードバランサーのアドレスは、常にkubeadmの`ControlPlaneEndpoint`のアドレスと一致することを確認してください。 + - 詳細は[Options for Software Load Balancing](https://github.com/kubernetes/kubeadm/blob/master/docs/ha-considerations.md#options-for-software-load-balancing)をご覧ください。 + 1. ロードバランサーに、最初のコントロールプレーンノードを追加し、接続をテストする: ```sh @@ -87,7 +87,7 @@ weight: 60 {{< note >}}`kubeadm init`の`--config`フラグと`--certificate-key`フラグは混在させることはできないため、[kubeadm configuration](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2)を使用する場合は`certificateKey`フィールドを適切な場所に追加する必要があります(`InitConfiguration`と`JoinConfiguration: controlPlane`の配下)。{{< /note >}} - {{< note >}}CalicoなどのいくつかのCNIネットワークプラグインは`192.168.0.0/16`のようなCIDRを必要としますが、Weaveなどは必要としません。[CNIネットワークドキュメント](/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network)を参照してください。PodにCIDRを設定するには、`ClusterConfiguration`の`networking`オブジェクトに`podSubnet: 192.168.0.0/16`フィールドを設定してください。{{< /note >}} + {{< note >}}いくつかのCNIネットワークプラグインはPodのIPのCIDRの指定など追加の設定を必要としますが、必要としないプラグインもあります。[CNIネットワークドキュメント](/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network)を参照してください。PodにCIDRを設定するには、`ClusterConfiguration`の`networking`オブジェクトに`podSubnet: 192.168.0.0/16`フィールドを設定してください。{{< /note >}} - このような出力がされます: diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md b/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md index e061315381883..27dd20b1bf456 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md @@ -64,7 +64,7 @@ ComponentConfigの詳細については、[このセクション](#configure-kub ### `kubeadm init`実行時の流れ -`kubeadm init`を実行した場合、kubeletの設定は`/var/lib/kubelet/config.yaml`に格納され、クラスターのConfigMapにもアップロードされます。ConfigMapは`kubelet-config-1.X`という名前で、`.X`は初期化するKubernetesのマイナーバージョンを表します。またこの設定ファイルは、クラスタ内の全てのkubeletのために、クラスター全体設定の基準と共に`/etc/kubernetes/kubelet.conf`にも書き込まれます。この設定ファイルは、kubeletがAPIサーバと通信するためのクライアント証明書を指し示します。これは、[各kubeletにクラスターレベルの設定を配布](#propagating-cluster-level-configuration-to-each-kubelet)することの必要性を示しています。 +`kubeadm init`を実行した場合、kubeletの設定は`/var/lib/kubelet/config.yaml`に格納され、クラスターのConfigMapにもアップロードされます。ConfigMapは`kubelet-config-1.X`という名前で、`X`は初期化するKubernetesのマイナーバージョンを表します。またこの設定ファイルは、クラスタ内の全てのkubeletのために、クラスター全体設定の基準と共に`/etc/kubernetes/kubelet.conf`にも書き込まれます。この設定ファイルは、kubeletがAPIサーバと通信するためのクライアント証明書を指し示します。これは、[各kubeletにクラスターレベルの設定を配布](#propagating-cluster-level-configuration-to-each-kubelet)することの必要性を示しています。 二つ目のパターンである、[インスタンス固有の設定内容を適用](#providing-instance-specific-configuration-details)するために、kubeadmは環境ファイルを`/var/lib/kubelet/kubeadm-flags.env`へ書き出します。このファイルは以下のように、kubelet起動時に渡されるフラグのリストを含んでいます。 @@ -99,7 +99,7 @@ kubeletが新たな設定を読み込むと、kubeadmは、KubeConfigファイ `kubeadm`には、systemdがどのようにkubeletを実行するかを指定した設定ファイルが同梱されています。 kubeadm CLIコマンドは決してこのsystemdファイルには触れないことに注意してください。 -kubeadmの[DEBパッケージ](https://github.com/kubernetes/kubernetes/blob/master/build/debs/10-kubeadm.conf)または[RPMパッケージ](https://github.com/kubernetes/kubernetes/blob/master/build/rpms/10-kubeadm.conf)によってインストールされたこの設定ファイルは、`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf`に書き込まれ、systemdで使用されます。基本的な`kubelet.service`([RPM用](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubelet/kubelet.service)または、 [DEB用](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service))を拡張します。 +kubeadmの[DEBパッケージ](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf)または[RPMパッケージ](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubeadm/10-kubeadm.conf)によってインストールされたこの設定ファイルは、`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf`に書き込まれ、systemdで使用されます。基本的な`kubelet.service`([RPM用](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubelet/kubelet.service)または、 [DEB用](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service))を拡張します。 ```none [Service] @@ -134,6 +134,5 @@ Kubernetesに同梱されるDEB、RPMのパッケージは以下の通りです | `kubeadm` | `/usr/bin/kubeadm`CLIツールと、[kubelet用のsystemdファイル](#the-kubelet-drop-in-file-for-systemd)をインストールします。 | | `kubelet` | kubeletバイナリを`/usr/bin`に、CNIバイナリを`/opt/cni/bin`にインストールします。 | | `kubectl` | `/usr/bin/kubectl`バイナリをインストールします。 | -| `kubernetes-cni` | 公式のCNIバイナリを`/opt/cni/bin`ディレクトリにインストールします。 | | `cri-tools` | `/usr/bin/crictl`バイナリを[cri-tools gitリポジトリ](https://github.com/kubernetes-incubator/cri-tools)からインストールします。 | diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md index a7bee37727d9e..9a6ceccb252ff 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md @@ -1,68 +1,48 @@ --- -title: Configuring your kubernetes cluster to self-host the control plane +title: コントロールプレーンをセルフホストするようにkubernetesクラスターを構成する content_type: concept weight: 100 --- -### Self-hosting the Kubernetes control plane {#self-hosting} +### コントロールプレーンのセルフホスティング {#self-hosting} -kubeadm allows you to experimentally create a _self-hosted_ Kubernetes control -plane. This means that key components such as the API server, controller -manager, and scheduler run as [DaemonSet pods](/ja/docs/concepts/workloads/controllers/daemonset/) -configured via the Kubernetes API instead of [static pods](/docs/tasks/administer-cluster/static-pod/) -configured in the kubelet via static files. +kubeadmを使用すると、セルフホスト型のKubernetesコントロールプレーンを実験的に作成できます。これはAPIサーバー、コントローラーマネージャー、スケジューラーなどの主要コンポーネントは、静的ファイルを介してkubeletで構成された[static pods](/docs/tasks/configure-pod-container/static-pod/)ではなく、Kubernetes APIを介して構成された[DaemonSet pods](/ja/docs/concepts/workloads/controllers/daemonset/)として実行されることを意味します。 -To create a self-hosted cluster see the -[kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting) command. +セルフホスト型クラスターを作成する場合は[kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting)を参照してください。 -#### Caveats +#### 警告 {{< caution >}} -This feature pivots your cluster into an unsupported state, rendering kubeadm unable -to manage you cluster any longer. This includes `kubeadm upgrade`. +この機能により、クラスターがサポートされていない状態になり、kubeadmがクラスターを管理できなくなります。これには`kubeadm upgrade`が含まれます。 {{< /caution >}} -1. Self-hosting in 1.8 and later has some important limitations. In particular, a - self-hosted cluster _cannot recover from a reboot of the control-plane node_ - without manual intervention. +1. 1.8以降のセルフホスティングには、いくつかの重要な制限があります。特に、セルフホスト型クラスターは、手動の介入なしにコントロールプレーンのNode再起動から回復することはできません。 -1. By default, self-hosted control plane Pods rely on credentials loaded from - [`hostPath`](/docs/concepts/storage/volumes/#hostpath) - volumes. Except for initial creation, these credentials are not managed by - kubeadm. +1. デフォルトでは、セルフホスト型のコントロールプレーンのPodは、[`hostPath`](/docs/concepts/storage/volumes/#hostpath)ボリュームからロードされた資格情報に依存しています。最初の作成を除いて、これらの資格情報はkubeadmによって管理されません。 -1. The self-hosted portion of the control plane does not include etcd, - which still runs as a static Pod. +1. コントロールプレーンのセルフホストされた部分にはetcdが含まれていませんが、etcdは静的Podとして実行されます。 -#### Process +#### プロセス -The self-hosting bootstrap process is documented in the [kubeadm design -document](https://github.com/kubernetes/kubeadm/blob/master/docs/design/design_v1.9.md#optional-self-hosting). +セルフホスティングのブートストラッププロセスは、[kubeadm design +document](https://github.com/kubernetes/kubeadm/blob/master/docs/design/design_v1.9.md#optional-self-hosting)に記載されています。 -In summary, `kubeadm alpha selfhosting` works as follows: +要約すると、`kubeadm alpha selfhosting`は次のように機能します。 - 1. Waits for this bootstrap static control plane to be running and - healthy. This is identical to the `kubeadm init` process without self-hosting. + 1. 静的コントロールプレーンのブートストラップが起動し、正常になるのを待ちます。これは`kubeadm init`のセルフホスティングを使用しないプロセスと同じです。 - 1. Uses the static control plane Pod manifests to construct a set of - DaemonSet manifests that will run the self-hosted control plane. - It also modifies these manifests where necessary, for example adding new volumes - for secrets. + 1. 静的コントロールプレーンのPodのマニフェストを使用して、セルフホスト型コントロールプレーンを実行する一連のDaemonSetのマニフェストを構築します。また、必要に応じてこれらのマニフェストを変更します。たとえば、シークレット用の新しいボリュームを追加します。 - 1. Creates DaemonSets in the `kube-system` namespace and waits for the - resulting Pods to be running. + 1. `kube-system`のネームスペースにDaemonSetを作成し、Podの結果が起動されるのを待ちます。 - 1. Once self-hosted Pods are operational, their associated static Pods are deleted - and kubeadm moves on to install the next component. This triggers kubelet to - stop those static Pods. + 1. セルフホスト型のPodが操作可能になると、関連する静的Podが削除され、kubeadmは次のコンポーネントのインストールに進みます。これによりkubeletがトリガーされて静的Podが停止します。 - 1. When the original static control plane stops, the new self-hosted control - plane is able to bind to listening ports and become active. + 1. 元の静的なコントロールプレーンが停止すると、新しいセルフホスト型コントロールプレーンはリスニングポートにバインドしてアクティブになります。 diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index 0d73dc2df6779..356101574d357 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -29,7 +29,8 @@ when using kubeadm to set up a kubernetes cluster. * Three hosts that can talk to each other over ports 2379 and 2380. This document assumes these default ports. However, they are configurable through the kubeadm config file. -* Each host must [have docker, kubelet, and kubeadm installed][toolbox]. +* Each host must [have docker, kubelet, and kubeadm installed](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). +* Each host should have access to the Kubernetes container image registry (`k8s.gcr.io`) or list/pull the required etcd image using `kubeadm config images list/pull`. This guide will setup etcd instances as [static pods](/docs/tasks/configure-pod-container/static-pod/) managed by a kubelet. * Some infrastructure to copy files between hosts. For example `ssh` and `scp` can satisfy this requirement. diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index 8e9067a4eb4f8..b6911ba298503 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -6,68 +6,100 @@ weight: 20 -As with any program, you might run into an error installing or running kubeadm. -This page lists some common failure scenarios and have provided steps that can help you understand and fix the problem. +どのプログラムでもそうですが、kubeadmのインストールや実行でエラーが発生することがあります。このページでは、一般的な失敗例をいくつか挙げ、問題を理解して解決するための手順を示しています。 -If your problem is not listed below, please follow the following steps: +本ページに問題が記載されていない場合は、以下の手順を行ってください: -- If you think your problem is a bug with kubeadm: - - Go to [github.com/kubernetes/kubeadm](https://github.com/kubernetes/kubeadm/issues) and search for existing issues. - - If no issue exists, please [open one](https://github.com/kubernetes/kubeadm/issues/new) and follow the issue template. +- 問題がkubeadmのバグによるものと思った場合: + - [github.com/kubernetes/kubeadm](https://github.com/kubernetes/kubeadm/issues)にアクセスして、既存のIssueを探してください。 + - Issueがない場合は、テンプレートにしたがって[新しくIssueを立ててください](https://github.com/kubernetes/kubeadm/issues/new)。 -- If you are unsure about how kubeadm works, you can ask on [Slack](http://slack.k8s.io/) in #kubeadm, or open a question on [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Please include - relevant tags like `#kubernetes` and `#kubeadm` so folks can help you. +- kubeadmがどのように動作するかわからない場合は、[Slack](http://slack.k8s.io/)の#kubeadmチャンネルで質問するか、[StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes)で質問をあげてください。その際は、他の方が助けを出しやすいように`#kubernetes`や`#kubeadm`といったタグをつけてください。 + +## RBACがないため、v1.18ノードをv1.17クラスタに結合できない +v1.18では、同名のノードが既に存在する場合にクラスタ内のノードに参加しないようにする機能を追加しました。これには、ブートストラップトークンユーザがNodeオブジェクトをGETできるようにRBACを追加する必要がありました。 - +しかし、これによりv1.18の`kubeadm join`がkubeadm v1.17で作成したクラスタに参加できないという問題が発生します。 + +この問題を回避するには、次の2つの方法があります。 +- kubeadm v1.18を用いて、コントロールプレーンノード上で`kubeadm init phase bootstrap-token`を実行します。 +これには、ブートストラップトークンの残りのパーミッションも同様に有効にすることに注意してください。 + +- `kubectl apply -f ...`を使って以下のRBACを手動で適用します。 + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeadm:get-nodes +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubeadm:get-nodes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeadm:get-nodes +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:bootstrappers:kubeadm:default-node-token +``` ## インストール中に`ebtables`もしくは他の似たような実行プログラムが見つからない -If you see the following warnings while running `kubeadm init` +`kubeadm init`の実行中に以下のような警告が表示された場合は、以降に記載するやり方を行ってください。 ```sh [preflight] WARNING: ebtables not found in system path [preflight] WARNING: ethtool not found in system path ``` -Then you may be missing `ebtables`, `ethtool` or a similar executable on your node. You can install them with the following commands: +このような場合、ノード上に`ebtables`, `ethtool`などの実行ファイルがない可能性があります。これらをインストールするには、以下のコマンドを実行します。 -- For Ubuntu/Debian users, run `apt install ebtables ethtool`. -- For CentOS/Fedora users, run `yum install ebtables ethtool`. +- Ubuntu/Debianユーザーは、`apt install ebtables ethtool`を実行してください。 +- CentOS/Fedoraユーザーは、`yum install ebtables ethtool`を実行してください。 ## インストール中にkubeadmがコントロールプレーンを待ち続けて止まる -If you notice that `kubeadm init` hangs after printing out the following line: +以下のを出力した後に`kubeadm init`が止まる場合は、`kubeadm init`を実行してください: ```sh [apiclient] Created API client, waiting for the control plane to become ready ``` -This may be caused by a number of problems. The most common are: +これはいくつかの問題が原因となっている可能性があります。最も一般的なのは: -- network connection problems. Check that your machine has full network connectivity before continuing. -- the default cgroup driver configuration for the kubelet differs from that used by Docker. - Check the system log file (e.g. `/var/log/message`) or examine the output from `journalctl -u kubelet`. If you see something like the following: +- ネットワーク接続の問題が挙げられます。続行する前に、お使いのマシンがネットワークに完全に接続されていることを確認してください。 +- kubeletのデフォルトのcgroupドライバの設定がDockerで使用されているものとは異なっている場合も考えられます。 + システムログファイル(例: `/var/log/message`)をチェックするか、`journalctl -u kubelet`の出力を調べてください: ```shell error: failed to run Kubelet: failed to create kubelet: misconfiguration: kubelet cgroup driver: "systemd" is different from docker cgroup driver: "cgroupfs" ``` - There are two common ways to fix the cgroup driver problem: + 以上のようなエラーが現れていた場合、cgroupドライバの問題を解決するには、以下の2つの方法があります: - 1. Install Docker again following instructions - [here](/ja/docs/setup/independent/install-kubeadm/#installing-docker). + 1. [ここ](/ja/docs/setup/independent/install-kubeadm/#installing-docker)の指示に従ってDockerを再度インストールします。 - 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to - [Configure cgroup driver used by kubelet on Master Node](/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-master-node) + 1. Dockerのcgroupドライバに合わせてkubeletの設定を手動で変更します。その際は、[マスターノード上でkubeletが使用するcgroupドライバを設定する](/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-master-node)を参照してください。 -- control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`. +- control plane Dockerコンテナがクラッシュループしたり、ハングしたりしています。これは`docker ps`を実行し、`docker logs`を実行して各コンテナを調査することで確認できます。 ## 管理コンテナを削除する時にkubeadmが止まる -The following could happen if Docker halts and does not remove any Kubernetes-managed containers: +Dockerが停止して、Kubernetesで管理されているコンテナを削除しないと、以下のようなことが起こる可能性があります: ```bash sudo kubeadm reset @@ -78,95 +110,70 @@ sudo kubeadm reset (block) ``` -A possible solution is to restart the Docker service and then re-run `kubeadm reset`: +考えられる解決策は、Dockerサービスを再起動してから`kubeadm reset`を再実行することです: ```bash sudo systemctl restart docker.service sudo kubeadm reset ``` -Inspecting the logs for docker may also be useful: +dockerのログを調べるのも有効な場合があります: ```sh -journalctl -ul docker +journalctl -u docker ``` -## Podの状態が`RunContainerError`、`CrashLoopBackOff`、または`Error` +## Podの状態が`RunContainerError`、`CrashLoopBackOff`、または`Error`となる -Right after `kubeadm init` there should not be any pods in these states. +`kubeadm init`の直後には、これらの状態ではPodは存在しないはずです。 -- If there are pods in one of these states _right after_ `kubeadm init`, please open an - issue in the kubeadm repo. `coredns` (or `kube-dns`) should be in the `Pending` state - until you have deployed the network solution. -- If you see Pods in the `RunContainerError`, `CrashLoopBackOff` or `Error` state - after deploying the network solution and nothing happens to `coredns` (or `kube-dns`), - it's very likely that the Pod Network solution that you installed is somehow broken. - You might have to grant it more RBAC privileges or use a newer version. Please file - an issue in the Pod Network providers' issue tracker and get the issue triaged there. -- If you install a version of Docker older than 1.12.1, remove the `MountFlags=slave` option - when booting `dockerd` with `systemd` and restart `docker`. You can see the MountFlags in `/usr/lib/systemd/system/docker.service`. - MountFlags can interfere with volumes mounted by Kubernetes, and put the Pods in `CrashLoopBackOff` state. - The error happens when Kubernetes does not find `var/run/secrets/kubernetes.io/serviceaccount` files. +- `kubeadm init`の _直後_ にこれらの状態のいずれかにPodがある場合は、kubeadmのリポジトリにIssueを立ててください。ネットワークソリューションをデプロイするまでは`coredns`(または`kube-dns`)は`Pending`状態でなければなりません。 +- ネットワークソリューションをデプロイしても`coredns`(または`kube-dns`)に何も起こらない場合にRunContainerError`、`CrashLoopBackOff`、`Error`の状態でPodが表示された場合は、インストールしたPodネットワークソリューションが壊れている可能性が高いです。より多くのRBACの特権を付与するか、新しいバージョンを使用する必要があるかもしれません。PodネットワークプロバイダのイシュートラッカーにIssueを出して、そこで問題をトリアージしてください。 +- 1.12.1よりも古いバージョンのDockerをインストールした場合は、`systemd`で`dockerd`を起動する際に`MountFlags=slave`オプションを削除して`docker`を再起動してください。マウントフラグは`/usr/lib/systemd/system/docker.service`で確認できます。MountFlagsはKubernetesがマウントしたボリュームに干渉し、Podsを`CrashLoopBackOff`状態にすることがあります。このエラーは、Kubernetesが`var/run/secrets/kubernetes.io/serviceaccount`ファイルを見つけられない場合に発生します。 ## `coredns`(もしくは`kube-dns`)が`Pending`状態でスタックする -This is **expected** and part of the design. kubeadm is network provider-agnostic, so the admin -should [install the pod network solution](/docs/concepts/cluster-administration/addons/) -of choice. You have to install a Pod Network -before CoreDNS may be deployed fully. Hence the `Pending` state before the network is set up. +kubeadmはネットワークプロバイダに依存しないため、管理者は選択した[Podネットワークソリューションをインストール](/docs/concepts/cluster-administration/addons/)をする必要があります。CoreDNSを完全にデプロイする前にPodネットワークをインストールする必要があります。したがって、ネットワークがセットアップされる前の `Pending`状態になります。 ## `HostPort`サービスが動かない -The `HostPort` and `HostIP` functionality is available depending on your Pod Network -provider. Please contact the author of the Pod Network solution to find out whether -`HostPort` and `HostIP` functionality are available. +`HostPort`と`HostIP`の機能は、ご使用のPodネットワークプロバイダによって利用可能です。Podネットワークソリューションの作者に連絡して、`HostPort`と`HostIP`機能が利用可能かどうかを確認してください。 -Calico, Canal, and Flannel CNI providers are verified to support HostPort. +Calico、Canal、FlannelのCNIプロバイダは、HostPortをサポートしていることが確認されています。 -For more information, see the [CNI portmap documentation](https://github.com/containernetworking/plugins/blob/master/plugins/meta/portmap/README.md). +詳細については、[CNI portmap documentation] (https://github.com/containernetworking/plugins/blob/master/plugins/meta/portmap/README.md) を参照してください。 -If your network provider does not support the portmap CNI plugin, you may need to use the [NodePort feature of -services](/ja/docs/concepts/services-networking/service/#nodeport) or use `HostNetwork=true`. +ネットワークプロバイダが portmap CNI プラグインをサポートしていない場合は、[NodePortサービス](/ja/docs/concepts/services-networking/service/#nodeport)を使用するか、`HostNetwork=true`を使用してください。 ## サービスIP経由でPodにアクセスすることができない -- Many network add-ons do not yet enable [hairpin mode](/docs/tasks/debug-application-cluster/debug-service/#a-pod-cannot-reach-itself-via-service-ip) - which allows pods to access themselves via their Service IP. This is an issue related to - [CNI](https://github.com/containernetworking/cni/issues/476). Please contact the network - add-on provider to get the latest status of their support for hairpin mode. +- 多くのネットワークアドオンは、PodがサービスIPを介して自分自身にアクセスできるようにする[ヘアピンモード](/docs/tasks/debug-application-cluster/debug-service/#a-pod-cannot-reach-itself-via-service-ip)を有効にしていません。これは[CNI](https://github.com/containernetworking/cni/issues/476)に関連する問題です。ヘアピンモードのサポート状況については、ネットワークアドオンプロバイダにお問い合わせください。 -- If you are using VirtualBox (directly or via Vagrant), you will need to - ensure that `hostname -i` returns a routable IP address. By default the first - interface is connected to a non-routable host-only network. A work around - is to modify `/etc/hosts`, see this [Vagrantfile](https://github.com/errordeveloper/k8s-playground/blob/22dd39dfc06111235620e6c4404a96ae146f26fd/Vagrantfile#L11) - for an example. +- VirtualBoxを使用している場合(直接またはVagrant経由)は、`hostname -i`がルーティング可能なIPアドレスを返すことを確認する必要があります。デフォルトでは、最初のインターフェースはルーティング可能でないホスト専用のネットワークに接続されています。これを回避するには`/etc/hosts`を修正する必要があります。例としてはこの[Vagrantfile](https://github.com/errordeveloper/k8s-playground/blob/22dd39dfc06111235620e6c4404a96ae146f26fd/Vagrantfile#L11)を参照してください。 ## TLS証明書のエラー -The following error indicates a possible certificate mismatch. +以下のエラーは、証明書の不一致の可能性を示しています。 ```none # kubectl get pods Unable to connect to the server: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes") ``` -- Verify that the `$HOME/.kube/config` file contains a valid certificate, and - regenerate a certificate if necessary. The certificates in a kubeconfig file - are base64 encoded. The `base64 --decode` command can be used to decode the certificate - and `openssl x509 -text -noout` can be used for viewing the certificate information. -- Unset the `KUBECONFIG` environment variable using: +- `HOME/.kube/config`ファイルに有効な証明書が含まれていることを確認し、必要に応じて証明書を再生成します。kubeconfigファイル内の証明書はbase64でエンコードされています。証明書をデコードするには`base64 --decode`コマンドを、証明書情報を表示するには`openssl x509 -text -noout`コマンドを用いてください。 +- 環境変数`KUBECONFIG`の設定を解除するには以下のコマンドを実行するか: ```sh unset KUBECONFIG ``` - Or set it to the default `KUBECONFIG` location: + 設定をデフォルトの`KUBECONFIG`の場所に設定します: ```sh export KUBECONFIG=/etc/kubernetes/admin.conf ``` -- Another workaround is to overwrite the existing `kubeconfig` for the "admin" user: +- もう一つの回避策は、既存の`kubeconfig`を"admin"ユーザに上書きすることです: ```sh mv $HOME/.kube $HOME/.kube.bak @@ -177,38 +184,38 @@ Unable to connect to the server: x509: certificate signed by unknown authority ( ## Vagrant内でPodネットワークとしてflannelを使用する時のデフォルトNIC -The following error might indicate that something was wrong in the pod network: +以下のエラーは、Podネットワークに何か問題があったことを示している可能性を示しています: ```sh Error from server (NotFound): the server could not find the requested resource ``` -- If you're using flannel as the pod network inside Vagrant, then you will have to specify the default interface name for flannel. +- Vagrant内のPodネットワークとしてflannelを使用している場合は、flannelのデフォルトのインターフェース名を指定する必要があります。 - Vagrant typically assigns two interfaces to all VMs. The first, for which all hosts are assigned the IP address `10.0.2.15`, is for external traffic that gets NATed. + Vagrantは通常、2つのインターフェースを全てのVMに割り当てます。1つ目は全てのホストにIPアドレス`10.0.2.15`が割り当てられており、NATされる外部トラフィックのためのものです。 - This may lead to problems with flannel, which defaults to the first interface on a host. This leads to all hosts thinking they have the same public IP address. To prevent this, pass the `--iface eth1` flag to flannel so that the second interface is chosen. + これは、ホストの最初のインターフェイスをデフォルトにしているflannelの問題につながるかもしれません。これは、すべてのホストが同じパブリックIPアドレスを持っていると考えます。これを防ぐには、2番目のインターフェイスが選択されるように `--iface eth1`フラグをflannelに渡してください。 ## 公開されていないIPがコンテナに使われている -In some situations `kubectl logs` and `kubectl run` commands may return with the following errors in an otherwise functional cluster: +状況によっては、`kubectl logs`や`kubectl run`コマンドが以下のようなエラーを返すことがあります: ```sh Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc65b868-glc5m/mysql: dial tcp 10.19.0.41:10250: getsockopt: no route to host ``` -- This may be due to Kubernetes using an IP that can not communicate with other IPs on the seemingly same subnet, possibly by policy of the machine provider. -- DigitalOcean assigns a public IP to `eth0` as well as a private one to be used internally as anchor for their floating IP feature, yet `kubelet` will pick the latter as the node's `InternalIP` instead of the public one. +- これには、おそらくマシンプロバイダのポリシーによって、一見同じサブネット上の他のIPと通信できないIPをKubernetesが使用している可能性があります。 +- DigitalOceanはパブリックIPとプライベートIPを`eth0`に割り当てていますが、`kubelet`はパブリックIPではなく、ノードの`InternalIP`として後者を選択します。 - Use `ip addr show` to check for this scenario instead of `ifconfig` because `ifconfig` will not display the offending alias IP address. Alternatively an API endpoint specific to DigitalOcean allows to query for the anchor IP from the droplet: + `ifconfig`ではエイリアスIPアドレスが表示されないため、`ifconfig`の代わりに`ip addr show`を使用してこのシナリオをチェックしてください。あるいは、DigitalOcean専用のAPIエンドポイントを使用して、ドロップレットからアンカーIPを取得することもできます: ```sh curl http://169.254.169.254/metadata/v1/interfaces/public/0/anchor_ipv4/address ``` - The workaround is to tell `kubelet` which IP to use using `--node-ip`. When using DigitalOcean, it can be the public one (assigned to `eth0`) or the private one (assigned to `eth1`) should you want to use the optional private network. The [`KubeletExtraArgs` section of the kubeadm `NodeRegistrationOptions` structure](https://github.com/kubernetes/kubernetes/blob/release-1.13/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go) can be used for this. + 回避策としては、`--node-ip`を使ってどのIPを使うかを`kubelet`に伝えることです。DigitalOceanを使用する場合、オプションのプライベートネットワークを使用したい場合は、パブリックIP(`eth0`に割り当てられている)かプライベートIP(`eth1`に割り当てられている)のどちらかを指定します。これにはkubeadm `NodeRegistrationOptions`構造体の [`KubeletExtraArgs`セクション](https://github.com/kubernetes/kubernetes/blob/release-1.13/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go) が利用できます。 - Then restart `kubelet`: + `kubelet`を再起動してください: ```sh systemctl daemon-reload @@ -217,13 +224,12 @@ Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc6 ## `coredns`のPodが`CrashLoopBackOff`もしくは`Error`状態になる -If you have nodes that are running SELinux with an older version of Docker you might experience a scenario -where the `coredns` pods are not starting. To solve that you can try one of the following options: +SELinuxを実行しているノードで古いバージョンのDockerを使用している場合、`coredns` Podが起動しないということが起きるかもしれません。この問題を解決するには、以下のオプションのいずれかを試してみてください: -- Upgrade to a [newer version of Docker](/ja/docs/setup/independent/install-kubeadm/#installing-docker). +- [新しいDockerのバージョン](/ja/docs/setup/independent/install-kubeadm/#installing-docker)にアップグレードする。 -- [Disable SELinux](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/security-enhanced_linux/sect-security-enhanced_linux-enabling_and_disabling_selinux-disabling_selinux). -- Modify the `coredns` deployment to set `allowPrivilegeEscalation` to `true`: +- [SELinuxを無効化する](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/security-enhanced_linux/sect-security-enhanced_linux-enabling_and_disabling_selinux-disabling_selinux)。 +- `coredns`を変更して、`allowPrivilegeEscalation`を`true`に設定: ```bash kubectl -n kube-system get deployment coredns -o yaml | \ @@ -231,108 +237,84 @@ kubectl -n kube-system get deployment coredns -o yaml | \ kubectl apply -f - ``` -Another cause for CoreDNS to have `CrashLoopBackOff` is when a CoreDNS Pod deployed in Kubernetes detects a loop. [A number of workarounds](https://github.com/coredns/coredns/tree/master/plugin/loop#troubleshooting-loops-in-kubernetes-clusters) -are available to avoid Kubernetes trying to restart the CoreDNS Pod every time CoreDNS detects the loop and exits. +CoreDNSに`CrashLoopBackOff`が発生する別の原因は、KubernetesにデプロイされたCoreDNS Podがループを検出したときに発生します。CoreDNSがループを検出して終了するたびに、KubernetesがCoreDNS Podを再起動しようとするのを避けるために、[いくつかの回避策](https://github.com/coredns/coredns/tree/master/plugin/loop#troubleshooting-loops-in-kubernetes-clusters)が用意されています。 {{< warning >}} -Disabling SELinux or setting `allowPrivilegeEscalation` to `true` can compromise -the security of your cluster. +SELinuxを無効にするか`allowPrivilegeEscalation`を`true`に設定すると、クラスタのセキュリティが損なわれる可能性があります。 {{< /warning >}} ## etcdのpodが継続的に再起動する -If you encounter the following error: +以下のエラーが発生した場合は: ``` rpc error: code = 2 desc = oci runtime error: exec failed: container_linux.go:247: starting container process caused "process_linux.go:110: decoding init error from pipe caused \"read parent: connection reset by peer\"" ``` -this issue appears if you run CentOS 7 with Docker 1.13.1.84. -This version of Docker can prevent the kubelet from executing into the etcd container. +この問題は、CentOS 7をDocker 1.13.1.84で実行した場合に表示されます。このバージョンのDockerでは、kubeletがetcdコンテナに実行されないようにすることができます。 -To work around the issue, choose one of these options: +この問題を回避するには、以下のいずれかのオプションを選択します: -- Roll back to an earlier version of Docker, such as 1.13.1-75 +- 1.13.1-75のような以前のバージョンのDockerにロールバックする ``` yum downgrade docker-1.13.1-75.git8633870.el7.centos.x86_64 docker-client-1.13.1-75.git8633870.el7.centos.x86_64 docker-common-1.13.1-75.git8633870.el7.centos.x86_64 ``` -- Install one of the more recent recommended versions, such as 18.06: +- 18.06のような最新の推奨バージョンをインストールする: ```bash sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo yum install docker-ce-18.06.1.ce-3.el7.x86_64 ``` -## Not possible to pass a comma separated list of values to arguments inside a `--component-extra-args` flag - -`kubeadm init` flags such as `--component-extra-args` allow you to pass custom arguments to a control-plane -component like the kube-apiserver. However, this mechanism is limited due to the underlying type used for parsing -the values (`mapStringString`). +## コンマで区切られた値のリストを`--component-extra-args`フラグ内の引数に渡すことができない -If you decide to pass an argument that supports multiple, comma-separated values such as -`--apiserver-extra-args "enable-admission-plugins=LimitRanger,NamespaceExists"` this flag will fail with -`flag: malformed pair, expect string=string`. This happens because the list of arguments for -`--apiserver-extra-args` expects `key=value` pairs and in this case `NamespacesExists` is considered -as a key that is missing a value. +`-component-extra-args`のような`kubeadm init`フラグを使うと、kube-apiserverのようなコントロールプレーンコンポーネントにカスタム引数を渡すことができます。しかし、このメカニズムは値の解析に使われる基本的な型 (`mapStringString`) のために制限されています。 -Alternatively, you can try separating the `key=value` pairs like so: -`--apiserver-extra-args "enable-admission-plugins=LimitRanger,enable-admission-plugins=NamespaceExists"` -but this will result in the key `enable-admission-plugins` only having the value of `NamespaceExists`. +もし、`--apiserver-extra-args "enable-admission plugins=LimitRanger,NamespaceExists"`のようにカンマで区切られた複数の値をサポートする引数を渡した場合、このフラグは`flag: malformed pair, expect string=string`で失敗します。これは`--apiserver-extra-args`の引数リストが`key=value`のペアを期待しており、この場合`NamespacesExists`は値を欠いたキーとみなされるためです。 -A known workaround is to use the kubeadm [configuration file](/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags). +別の方法として、`key=value`のペアを以下のように分離してみることもできます: +`--apiserver-extra-args "enable-admission-plugins=LimitRanger,enable-admission-plugins=NamespaceExists"`しかし、この場合は、キー`enable-admission-plugins`は`NamespaceExists`の値しか持ちません。既知の回避策としては、kubeadm[設定ファイル](/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags)を使用することが挙げられます。 -## kube-proxy scheduled before node is initialized by cloud-controller-manager +## cloud-controller-managerによってノードが初期化される前にkube-proxyがスケジューリングされる -In cloud provider scenarios, kube-proxy can end up being scheduled on new worker nodes before -the cloud-controller-manager has initialized the node addresses. This causes kube-proxy to fail -to pick up the node's IP address properly and has knock-on effects to the proxy function managing -load balancers. +クラウドプロバイダのシナリオでは、クラウドコントローラマネージャがノードアドレスを初期化する前に、kube-proxyが新しいワーカーノードでスケジューリングされてしまうことがあります。これにより、kube-proxyがノードのIPアドレスを正しく拾えず、ロードバランサを管理するプロキシ機能に悪影響を及ぼします。 -The following error can be seen in kube-proxy Pods: +kube-proxy Podsでは以下のようなエラーが発生します: ``` server.go:610] Failed to retrieve node IP: host IP unknown; known addresses: [] proxier.go:340] invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP ``` -A known solution is to patch the kube-proxy DaemonSet to allow scheduling it on control-plane -nodes regardless of their conditions, keeping it off of other nodes until their initial guarding -conditions abate: +既知の解決策は、初期のガード条件が緩和されるまで他のノードから離しておき、条件に関係なくコントロールプレーンノード上でスケジューリングできるように、キューブプロキシDaemonSetにパッチを当てることです: + ``` kubectl -n kube-system patch ds kube-proxy -p='{ "spec": { "template": { "spec": { "tolerations": [ { "key": "CriticalAddonsOnly", "operator": "Exists" }, { "effect": "NoSchedule", "key": "node-role.kubernetes.io/master" } ] } } } }' ``` -The tracking issue for this problem is [here](https://github.com/kubernetes/kubeadm/issues/1027). +Tこの問題のトラッキング問題は[こちら](https://github.com/kubernetes/kubeadm/issues/1027)。 -## The NodeRegistration.Taints field is omitted when marshalling kubeadm configuration +## kubeadmの設定をマーシャリングする際、NodeRegistration.Taintsフィールドが省略される -*Note: This [issue](https://github.com/kubernetes/kubeadm/issues/1358) only applies to tools that marshal kubeadm types (e.g. to a YAML configuration file). It will be fixed in kubeadm API v1beta2.* +*注意: この[Issue](https://github.com/kubernetes/kubeadm/issues/1358)は、kubeadmタイプをマーシャルするツール(YAML設定ファイルなど)にのみ適用されます。これはkubeadm API v1beta2で修正される予定です。* -By default, kubeadm applies the `node-role.kubernetes.io/master:NoSchedule` taint to control-plane nodes. -If you prefer kubeadm to not taint the control-plane node, and set `InitConfiguration.NodeRegistration.Taints` to an empty slice, -the field will be omitted when marshalling. When the field is omitted, kubeadm applies the default taint. +デフォルトでは、kubeadmはコントロールプレーンノードに`node-role.kubernetes.io/master:NoSchedule`のテイントを適用します。kubeadmがコントロールプレーンノードに影響を与えないようにし、`InitConfiguration.NodeRegistration.Taints`を空のスライスに設定すると、マーシャリング時にこのフィールドは省略されます。フィールドが省略された場合、kubeadmはデフォルトのテイントを適用します。 -There are at least two workarounds: +少なくとも2つの回避策があります: -1. Use the `node-role.kubernetes.io/master:PreferNoSchedule` taint instead of an empty slice. [Pods will get scheduled on masters](/docs/concepts/configuration/taint-and-toleration/), unless other nodes have capacity. +1. 空のスライスの代わりに`node-role.kubernetes.io/master:PreferNoSchedule`テイントを使用します。他のノードに容量がない限り、[Podsはマスター上でスケジュールされます](/docs/concepts/scheduling-eviction/taint-and-toleration/)。 -2. Remove the taint after kubeadm init exits: +2. kubeadm init終了後のテイントの除去: ```bash kubectl taint nodes NODE_NAME node-role.kubernetes.io/master:NoSchedule- ``` -## `/usr` is mounted read-only on nodes {#usr-mounted-read-only} +## ノード{#usr-mounted-read-only}に`/usr`が読み取り専用でマウントされる -On Linux distributions such as Fedora CoreOS, the directory `/usr` is mounted as a read-only filesystem. -For [flex-volume support](https://github.com/kubernetes/community/blob/ab55d85/contributors/devel/sig-storage/flexvolume.md), -Kubernetes components like the kubelet and kube-controller-manager use the default path of -`/usr/libexec/kubernetes/kubelet-plugins/volume/exec/`, yet the flex-volume directory _must be writeable_ -for the feature to work. +Fedora CoreOSなどのLinuxディストリビューションでは、ディレクトリ`/usr`が読み取り専用のファイルシステムとしてマウントされます。 [flex-volumeサポート](https://github.com/kubernetes/community/blob/ab55d85/contributors/devel/sig-storage/flexvolume.md)では、kubeletやkube-controller-managerのようなKubernetesコンポーネントはデフォルトで`/usr/libexec/kubernetes/kubelet-plugins/volume/exec/`のパスを使用していますが、この機能を動作させるためにはflex-volumeディレクトリは _書き込み可能_ な状態でなければなりません。 -To workaround this issue you can configure the flex-volume directory using the kubeadm -[configuration file](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2). +この問題を回避するには、kubeadm[設定ファイル](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2)を使用してflex-volumeディレクトリを設定します。 -On the primary control-plane Node (created using `kubeadm init`) pass the following -file using `--config`: +プライマリコントロールプレーンノード(`kubeadm init`で作成されたもの)上で、`--config`で以下のファイルを渡します: ```yaml apiVersion: kubeadm.k8s.io/v1beta2 @@ -348,7 +330,7 @@ controllerManager: flex-volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" ``` -On joining Nodes: +ノードをジョインするには: ```yaml apiVersion: kubeadm.k8s.io/v1beta2 @@ -358,5 +340,9 @@ nodeRegistration: volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" ``` -Alternatively, you can modify `/etc/fstab` to make the `/usr` mount writeable, but please -be advised that this is modifying a design principle of the Linux distribution. +あるいは、`/usr`マウントを書き込み可能にするために `/etc/fstab`を変更することもできますが、これはLinuxディストリビューションの設計原理を変更していることに注意してください。 + +## `kubeadm upgrade plan`が`context deadline exceeded`エラーメッセージを表示する +このエラーメッセージは、外部etcdを実行している場合に`kubeadm`でKubernetesクラスタをアップグレードする際に表示されます。これは致命的なバグではなく、古いバージョンのkubeadmが外部etcdクラスタのバージョンチェックを行うために発生します。`kubeadm upgrade apply ...`で進めることができます。 + +この問題はバージョン1.19で修正されます。 \ No newline at end of file diff --git a/content/ja/docs/setup/production-environment/tools/kubespray.md b/content/ja/docs/setup/production-environment/tools/kubespray.md index 6c02ca5374aa5..e8c49078fd45b 100644 --- a/content/ja/docs/setup/production-environment/tools/kubespray.md +++ b/content/ja/docs/setup/production-environment/tools/kubespray.md @@ -8,7 +8,7 @@ weight: 30 This quickstart helps to install a Kubernetes cluster hosted on GCE, Azure, OpenStack, AWS, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental) or Baremetal with [Kubespray](https://github.com/kubernetes-sigs/kubespray). -Kubespray is a composition of [Ansible](http://docs.ansible.com/) playbooks, [inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md), provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration management tasks. Kubespray provides: +Kubespray is a composition of [Ansible](https://docs.ansible.com/) playbooks, [inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md), provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration management tasks. Kubespray provides: * a highly available cluster * composable attributes @@ -21,7 +21,8 @@ Kubespray is a composition of [Ansible](http://docs.ansible.com/) playbooks, [in * openSUSE Leap 15 * continuous integration tests -To choose a tool which best fits your use case, read [this comparison](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md) to [kubeadm](/docs/admin/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). +To choose a tool which best fits your use case, read [this comparison](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md) to +[kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). @@ -50,7 +51,7 @@ Kubespray provides the following utilities to help provision your environment: ### (2/5) インベントリファイルの用意 -After you provision your servers, create an [inventory file for Ansible](http://docs.ansible.com/ansible/intro_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". +After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/intro_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". ### (3/5) クラスタ作成の計画 @@ -68,7 +69,7 @@ Kubespray provides the ability to customize many aspects of the deployment: * {{< glossary_tooltip term_id="cri-o" >}} * Certificate generation methods -Kubespray customizations can be made to a [variable file](http://docs.ansible.com/ansible/playbooks_variables.html). If you are just getting started with Kubespray, consider using the Kubespray defaults to deploy your cluster and explore Kubernetes. +Kubespray customizations can be made to a [variable file](https://docs.ansible.com/ansible/playbooks_variables.html). If you are just getting started with Kubespray, consider using the Kubespray defaults to deploy your cluster and explore Kubernetes. ### (4/5) クラスタのデプロイ @@ -110,7 +111,7 @@ When running the reset playbook, be sure not to accidentally target your product ## フィードバック -* Slack Channel: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) (You can get your invite [here](http://slack.k8s.io/)) +* Slack Channel: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) (You can get your invite [here](https://slack.k8s.io/)) * [GitHub Issues](https://github.com/kubernetes-sigs/kubespray/issues) diff --git a/content/ja/docs/setup/production-environment/turnkey/aws.md b/content/ja/docs/setup/production-environment/turnkey/aws.md index 1fc53a1f28d2e..03246c1b06e85 100644 --- a/content/ja/docs/setup/production-environment/turnkey/aws.md +++ b/content/ja/docs/setup/production-environment/turnkey/aws.md @@ -20,9 +20,7 @@ AWS上でKubernetesクラスターを作成するには、AWSからアクセス * [Kubernetes Operations](https://github.com/kubernetes/kops) - プロダクショングレードなKubernetesのインストール、アップグレード、管理が可能です。AWS上のDebian、Ubuntu、CentOS、RHELをサポートしています。 -* [CoreOS Tectonic](https://coreos.com/tectonic/)はAWS上のContainer Linuxノードを含むKubernetesクラスターを作成できる、オープンソースの[Tectonic Installer](https://github.com/coreos/tectonic-installer)を含みます。 - -* CoreOSから生まれ、Kubernetes IncubatorがメンテナンスしているCLIツール[kube-aws](https://github.com/kubernetes-incubator/kube-aws)は、[Container Linux](https://coreos.com/why/)ノードを使用したAWSツール(EC2、CloudFormation、Auto Scaling)によるKubernetesクラスターを作成および管理できます。 +* [kube-aws](https://github.com/kubernetes-incubator/kube-aws) EC2、CloudFormation、Auto Scalingを使用して、[Flatcar Linux](https://www.flatcar-linux.org/)ノードでKubernetesクラスターを作成および管理します。 * [KubeOne](https://github.com/kubermatic/kubeone)は可用性の高いKubernetesクラスターを作成、アップグレード、管理するための、オープンソースのライフサイクル管理ツールです。 @@ -46,10 +44,10 @@ export PATH=/platforms/darwin/amd64:$PATH export PATH=/platforms/linux/amd64:$PATH ``` -ツールに関する最新のドキュメントページはこちらです: [kubectl manual](/docs/user-guide/kubectl/) +ツールに関する最新のドキュメントページはこちらです: [kubectl manual](/docs/reference/kubectl/kubectl/) デフォルトでは、`kubectl`はクラスターの起動中に生成された`kubeconfig`ファイルをAPIに対する認証に使用します。 -詳細な情報は、[kubeconfig files](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)を参照してください。 +詳細な情報は、[kubeconfig files](/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)を参照してください。 ### 例 @@ -61,7 +59,7 @@ export PATH=/platforms/linux/amd64:$PATH ## クラスターのスケーリング -`kubectl`を使用したノードの追加および削除はサポートしていません。インストール中に作成された[Auto Scaling Group](http://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html)内の'Desired'および'Max'プロパティを手動で調整することで、ノード数をスケールさせることができます。 +`kubectl`を使用したノードの追加および削除はサポートしていません。インストール中に作成された[Auto Scaling Group](https://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html)内の'Desired'および'Max'プロパティを手動で調整することで、ノード数をスケールさせることができます。 ## クラスターの解体 @@ -77,12 +75,8 @@ cluster/kube-down.sh IaaS プロバイダー | 構成管理 | OS | ネットワーク | ドキュメント | 適合 | サポートレベル -------------------- | ------------ | ------------- | ------------ | --------------------------------------------- | ---------| ---------------------------- AWS | kops | Debian | k8s (VPC) | [docs](https://github.com/kubernetes/kops) | | Community ([@justinsb](https://github.com/justinsb)) -AWS | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/aws) | | Community -AWS | Juju | Ubuntu | flannel, calico, canal | [docs](/docs/getting-started-guides/ubuntu) | 100% | Commercial, Community +AWS | CoreOS | CoreOS | flannel | - | | Community +AWS | Juju | Ubuntu | flannel, calico, canal | - | 100% | Commercial, Community AWS | KubeOne | Ubuntu, CoreOS, CentOS | canal, weavenet | [docs](https://github.com/kubermatic/kubeone) | 100% | Commercial, Community -## 参考文献 - -Kubernetesクラスターの利用と管理に関する詳細は、[Kubernetesドキュメント](/ja/docs/)を参照してください。 - diff --git a/content/ja/docs/setup/production-environment/turnkey/clc.md b/content/ja/docs/setup/production-environment/turnkey/clc.md deleted file mode 100644 index b700456b87d51..0000000000000 --- a/content/ja/docs/setup/production-environment/turnkey/clc.md +++ /dev/null @@ -1,340 +0,0 @@ ---- -title: CenturyLink Cloud上でKubernetesを動かす ---- - - -These scripts handle the creation, deletion and expansion of Kubernetes clusters on CenturyLink Cloud. - -You can accomplish all these tasks with a single command. We have made the Ansible playbooks used to perform these tasks available [here](https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc/blob/master/ansible/README.md). - -## ヘルプの検索 - -If you run into any problems or want help with anything, we are here to help. Reach out to use via any of the following ways: - -- Submit a github issue -- Send an email to Kubernetes AT ctl DOT io -- Visit [http://info.ctl.io/kubernetes](http://info.ctl.io/kubernetes) - -## 仮想マシンもしくは物理サーバーのクラスター、その選択 - -- We support Kubernetes clusters on both Virtual Machines or Physical Servers. If you want to use physical servers for the worker nodes (minions), simple use the --minion_type=bareMetal flag. -- For more information on physical servers, visit: [https://www.ctl.io/bare-metal/](https://www.ctl.io/bare-metal/) -- Physical serves are only available in the VA1 and GB3 data centers. -- VMs are available in all 13 of our public cloud locations - -## 必要条件 - -The requirements to run this script are: - -- A linux administrative host (tested on ubuntu and macOS) -- python 2 (tested on 2.7.11) - - pip (installed with python as of 2.7.9) -- git -- A CenturyLink Cloud account with rights to create new hosts -- An active VPN connection to the CenturyLink Cloud from your linux host - -## スクリプトのインストール - -After you have all the requirements met, please follow these instructions to install this script. - -1) Clone this repository and cd into it. - -```shell -git clone https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc -``` - -2) Install all requirements, including - - * Ansible - * CenturyLink Cloud SDK - * Ansible Modules - -```shell -sudo pip install -r ansible/requirements.txt -``` - -3) Create the credentials file from the template and use it to set your ENV variables - -```shell -cp ansible/credentials.sh.template ansible/credentials.sh -vi ansible/credentials.sh -source ansible/credentials.sh - -``` - -4) Grant your machine access to the CenturyLink Cloud network by using a VM inside the network or [ configuring a VPN connection to the CenturyLink Cloud network.](https://www.ctl.io/knowledge-base/network/how-to-configure-client-vpn/) - - -#### スクリプトのインストールの例: Ububtu 14の手順 - -If you use an ubuntu 14, for your convenience we have provided a step by step -guide to install the requirements and install the script. - -```shell -# system -apt-get update -apt-get install -y git python python-crypto -curl -O https://bootstrap.pypa.io/get-pip.py -python get-pip.py - -# installing this repository -mkdir -p ~home/k8s-on-clc -cd ~home/k8s-on-clc -git clone https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc.git -cd adm-kubernetes-on-clc/ -pip install -r requirements.txt - -# getting started -cd ansible -cp credentials.sh.template credentials.sh; vi credentials.sh -source credentials.sh -``` - - - -## クラスターの作成 - -To create a new Kubernetes cluster, simply run the ```kube-up.sh``` script. A complete -list of script options and some examples are listed below. - -```shell -CLC_CLUSTER_NAME=[name of kubernetes cluster] -cd ./adm-kubernetes-on-clc -bash kube-up.sh -c="$CLC_CLUSTER_NAME" -``` - -It takes about 15 minutes to create the cluster. Once the script completes, it -will output some commands that will help you setup kubectl on your machine to -point to the new cluster. - -When the cluster creation is complete, the configuration files for it are stored -locally on your administrative host, in the following directory - -```shell -> CLC_CLUSTER_HOME=$HOME/.clc_kube/$CLC_CLUSTER_NAME/ -``` - - -#### クラスターの作成: スクリプトのオプション - -```shell -Usage: kube-up.sh [OPTIONS] -Create servers in the CenturyLinkCloud environment and initialize a Kubernetes cluster -Environment variables CLC_V2_API_USERNAME and CLC_V2_API_PASSWD must be set in -order to access the CenturyLinkCloud API - -All options (both short and long form) require arguments, and must include "=" -between option name and option value. - - -h (--help) display this help and exit - -c= (--clc_cluster_name=) set the name of the cluster, as used in CLC group names - -t= (--minion_type=) standard -> VM (default), bareMetal -> physical] - -d= (--datacenter=) VA1 (default) - -m= (--minion_count=) number of kubernetes minion nodes - -mem= (--vm_memory=) number of GB ram for each minion - -cpu= (--vm_cpu=) number of virtual cps for each minion node - -phyid= (--server_conf_id=) physical server configuration id, one of - physical_server_20_core_conf_id - physical_server_12_core_conf_id - physical_server_4_core_conf_id (default) - -etcd_separate_cluster=yes create a separate cluster of three etcd nodes, - otherwise run etcd on the master node -``` - -## クラスターの拡張 - -To expand an existing Kubernetes cluster, run the ```add-kube-node.sh``` -script. A complete list of script options and some examples are listed [below](#cluster-expansion-script-options). -This script must be run from the same host that created the cluster (or a host -that has the cluster artifact files stored in ```~/.clc_kube/$cluster_name```). - -```shell -cd ./adm-kubernetes-on-clc -bash add-kube-node.sh -c="name_of_kubernetes_cluster" -m=2 -``` - -#### クラスターの拡張: スクリプトのオプション - -```shell -Usage: add-kube-node.sh [OPTIONS] -Create servers in the CenturyLinkCloud environment and add to an -existing CLC kubernetes cluster - -Environment variables CLC_V2_API_USERNAME and CLC_V2_API_PASSWD must be set in -order to access the CenturyLinkCloud API - - -h (--help) display this help and exit - -c= (--clc_cluster_name=) set the name of the cluster, as used in CLC group names - -m= (--minion_count=) number of kubernetes minion nodes to add -``` - -## クラスターの削除 - -There are two ways to delete an existing cluster: - -1) Use our python script: - -```shell -python delete_cluster.py --cluster=clc_cluster_name --datacenter=DC1 -``` - -2) Use the CenturyLink Cloud UI. To delete a cluster, log into the CenturyLink -Cloud control portal and delete the parent server group that contains the -Kubernetes Cluster. We hope to add a scripted option to do this soon. - -## 例 - -Create a cluster with name of k8s_1, 1 master node and 3 worker minions (on physical machines), in VA1 - -```shell -bash kube-up.sh --clc_cluster_name=k8s_1 --minion_type=bareMetal --minion_count=3 --datacenter=VA1 -``` - -Create a cluster with name of k8s_2, an ha etcd cluster on 3 VMs and 6 worker minions (on VMs), in VA1 - -```shell -bash kube-up.sh --clc_cluster_name=k8s_2 --minion_type=standard --minion_count=6 --datacenter=VA1 --etcd_separate_cluster=yes -``` - -Create a cluster with name of k8s_3, 1 master node, and 10 worker minions (on VMs) with higher mem/cpu, in UC1: - -```shell -bash kube-up.sh --clc_cluster_name=k8s_3 --minion_type=standard --minion_count=10 --datacenter=VA1 -mem=6 -cpu=4 -``` - - - -## クラスターの機能とアーキテクチャ - -We configure the Kubernetes cluster with the following features: - -* KubeDNS: DNS resolution and service discovery -* Heapster/InfluxDB: For metric collection. Needed for Grafana and auto-scaling. -* Grafana: Kubernetes/Docker metric dashboard -* KubeUI: Simple web interface to view Kubernetes state -* Kube Dashboard: New web interface to interact with your cluster - -We use the following to create the Kubernetes cluster: - -* Kubernetes 1.1.7 -* Ubuntu 14.04 -* Flannel 0.5.4 -* Docker 1.9.1-0~trusty -* Etcd 2.2.2 - -## 任意のアドオン - -* Logging: We offer an integrated centralized logging ELK platform so that all - Kubernetes and docker logs get sent to the ELK stack. To install the ELK stack - and configure Kubernetes to send logs to it, follow [the log - aggregation documentation](https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc/blob/master/log_aggregration.md). Note: We don't install this by default as - the footprint isn't trivial. - -## クラスターの管理 - -The most widely used tool for managing a Kubernetes cluster is the command-line -utility ```kubectl```. If you do not already have a copy of this binary on your -administrative machine, you may run the script ```install_kubectl.sh``` which will -download it and install it in ```/usr/bin/local```. - -The script requires that the environment variable ```CLC_CLUSTER_NAME``` be defined. ```install_kubectl.sh``` also writes a configuration file which will embed the necessary -authentication certificates for the particular cluster. The configuration file is -written to the ```${CLC_CLUSTER_HOME}/kube``` directory - - -```shell -export KUBECONFIG=${CLC_CLUSTER_HOME}/kube/config -kubectl version -kubectl cluster-info -``` - -### プログラムでクラスターへアクセス - -It's possible to use the locally stored client certificates to access the apiserver. For example, you may want to use any of the [Kubernetes API client libraries](/docs/reference/using-api/client-libraries/) to program against your Kubernetes cluster in the programming language of your choice. - -To demonstrate how to use these locally stored certificates, we provide the following example of using ```curl``` to communicate to the master apiserver via https: - -```shell -curl \ - --cacert ${CLC_CLUSTER_HOME}/pki/ca.crt \ - --key ${CLC_CLUSTER_HOME}/pki/kubecfg.key \ - --cert ${CLC_CLUSTER_HOME}/pki/kubecfg.crt https://${MASTER_IP}:6443 -``` - -But please note, this *does not* work out of the box with the ```curl``` binary -distributed with macOS. - -### ブラウザーを使ったクラスターへのアクセス - -We install [the kubernetes dashboard](/docs/tasks/web-ui-dashboard/). When you -create a cluster, the script should output URLs for these interfaces like this: - -kubernetes-dashboard is running at ```https://${MASTER_IP}:6443/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy```. - -Note on Authentication to the UIs: - -The cluster is set up to use basic authentication for the user _admin_. -Hitting the url at ```https://${MASTER_IP}:6443``` will -require accepting the self-signed certificate -from the apiserver, and then presenting the admin -password written to file at: ```> _${CLC_CLUSTER_HOME}/kube/admin_password.txt_``` - - -### 設定ファイル - -Various configuration files are written into the home directory *CLC_CLUSTER_HOME* under ```.clc_kube/${CLC_CLUSTER_NAME}``` in several subdirectories. You can use these files -to access the cluster from machines other than where you created the cluster from. - -* ```config/```: Ansible variable files containing parameters describing the master and minion hosts -* ```hosts/```: hosts files listing access information for the Ansible playbooks -* ```kube/```: ```kubectl``` configuration files, and the basic-authentication password for admin access to the Kubernetes API -* ```pki/```: public key infrastructure files enabling TLS communication in the cluster -* ```ssh/```: SSH keys for root access to the hosts - - -## ```kubectl``` usage examples - -There are a great many features of _kubectl_. Here are a few examples - -List existing nodes, pods, services and more, in all namespaces, or in just one: - -```shell -kubectl get nodes -kubectl get --all-namespaces pods -kubectl get --all-namespaces services -kubectl get --namespace=kube-system replicationcontrollers -``` - -The Kubernetes API server exposes services on web URLs, which are protected by requiring -client certificates. If you run a kubectl proxy locally, ```kubectl``` will provide -the necessary certificates and serve locally over http. - -```shell -kubectl proxy -p 8001 -``` - -Then, you can access urls like ```http://127.0.0.1:8001/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy/``` without the need for client certificates in your browser. - - -## どのKubernetesの機能がCenturyLink Cloud上で動かないのか - -These are the known items that don't work on CenturyLink cloud but do work on other cloud providers: - -- At this time, there is no support services of the type [LoadBalancer](/docs/tasks/access-application-cluster/create-external-load-balancer/). We are actively working on this and hope to publish the changes sometime around April 2016. - -- At this time, there is no support for persistent storage volumes provided by - CenturyLink Cloud. However, customers can bring their own persistent storage - offering. We ourselves use Gluster. - - -## Ansibleのファイル - -If you want more information about our Ansible files, please [read this file](https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc/blob/master/ansible/README.md) - -## 参考文献 - -Please see the [Kubernetes docs](/ja/docs/) for more details on administering -and using a Kubernetes cluster. - - - diff --git a/content/ja/docs/setup/production-environment/turnkey/gce.md b/content/ja/docs/setup/production-environment/turnkey/gce.md index b00d34ade6185..dcd269446a2e8 100644 --- a/content/ja/docs/setup/production-environment/turnkey/gce.md +++ b/content/ja/docs/setup/production-environment/turnkey/gce.md @@ -67,7 +67,7 @@ cluster/kube-up.sh If you want more than one cluster running in your project, want to use a different name, or want a different number of worker nodes, see the `/cluster/gce/config-default.sh` file for more fine-grained configuration before you start up your cluster. If you run into trouble, please see the section on [troubleshooting](/ja/docs/setup/production-environment/turnkey/gce/#troubleshooting), post to the -[Kubernetes Forum](https://discuss.kubernetes.io), or come ask questions on [Slack](/docs/troubleshooting/#slack). +[Kubernetes Forum](https://discuss.kubernetes.io), or come ask questions on `#gke` Slack channel. The next few steps will show you: @@ -80,7 +80,7 @@ The next few steps will show you: The cluster startup script will leave you with a running cluster and a `kubernetes` directory on your workstation. -The [kubectl](/docs/user-guide/kubectl/) tool controls the Kubernetes cluster +The [kubectl](/docs/reference/kubectl/kubectl/) tool controls the Kubernetes cluster manager. It lets you inspect your cluster resources, create, delete, and update components, and much more. You will use it to look at your new cluster and bring up example apps. @@ -93,7 +93,7 @@ gcloud components install kubectl {{< note >}} The kubectl version bundled with `gcloud` may be older than the one -downloaded by the get.k8s.io install script. See [Installing kubectl](/docs/tasks/kubectl/install/) +The [kubectl](/ja/docs/reference/kubectl/kubectl/) tool controls the Kubernetes cluster document to see how you can set up the latest `kubectl` on your workstation. {{< /note >}} @@ -107,7 +107,7 @@ Once `kubectl` is in your path, you can use it to look at your cluster. E.g., ru kubectl get --all-namespaces services ``` -should show a set of [services](/docs/user-guide/services) that look something like this: +should show a set of [services](/docs/concepts/services-networking/service/) that look something like this: ```shell NAMESPACE NAME TYPE CLUSTER_IP EXTERNAL_IP PORT(S) AGE @@ -117,7 +117,7 @@ kube-system kube-ui ClusterIP 10.0.0.3 ... ``` -Similarly, you can take a look at the set of [pods](/docs/user-guide/pods) that were created during cluster startup. +Similarly, you can take a look at the set of [pods](/ja/docs/concepts/workloads/pods/) that were created during cluster startup. You can do this via the ```shell @@ -144,7 +144,7 @@ Some of the pods may take a few seconds to start up (during this time they'll sh ### いくつかの例の実行 -Then, see [a simple nginx example](/docs/user-guide/simple-nginx) to try out your new cluster. +Then, see [a simple nginx example](/ja/docs/tasks/run-application/run-stateless-application-deployment/) to try out your new cluster. For more complete applications, please look in the [examples directory](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/). The [guestbook example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) is a good "getting started" walkthrough. @@ -215,10 +215,3 @@ IaaS Provider | Config. Mgmt | OS | Networking | Docs -------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- GCE | Saltstack | Debian | GCE | [docs](/ja/docs/setup/production-environment/turnkey/gce/) | | Project - -## 参考文献 - -Please see the [Kubernetes docs](/ja/docs/) for more details on administering -and using a Kubernetes cluster. - - diff --git a/content/ja/docs/setup/production-environment/turnkey/icp.md b/content/ja/docs/setup/production-environment/turnkey/icp.md index 9d1a0a17b3047..1313f37ff0c84 100644 --- a/content/ja/docs/setup/production-environment/turnkey/icp.md +++ b/content/ja/docs/setup/production-environment/turnkey/icp.md @@ -25,13 +25,9 @@ The following modules are available where you can deploy IBM Cloud Private by us ## AWS上でのIBM Cloud Private -You can deploy an IBM Cloud Private cluster on Amazon Web Services (AWS) by using either AWS CloudFormation or Terraform. +You can deploy an IBM Cloud Private cluster on Amazon Web Services (AWS) using Terraform. -IBM Cloud Private has a Quick Start that automatically deploys IBM Cloud Private into a new virtual private cloud (VPC) on the AWS Cloud. A regular deployment takes about 60 minutes, and a high availability (HA) deployment takes about 75 minutes to complete. The Quick Start includes AWS CloudFormation templates and a deployment guide. - -This Quick Start is for users who want to explore application modernization and want to accelerate meeting their digital transformation goals, by using IBM Cloud Private and IBM tooling. The Quick Start helps users rapidly deploy a high availability (HA), production-grade, IBM Cloud Private reference architecture on AWS. For all of the details and the deployment guide, see the [IBM Cloud Private on AWS Quick Start](https://aws.amazon.com/quickstart/architecture/ibm-cloud-private/). - -IBM Cloud Private can also run on the AWS cloud platform by using Terraform. To deploy IBM Cloud Private in an AWS EC2 environment, see [Installing IBM Cloud Private on AWS](https://github.com/ibm-cloud-architecture/refarch-privatecloud/blob/master/Installing_ICp_on_aws.md). +IBM Cloud Private can also run on the AWS cloud platform by using Terraform. To deploy IBM Cloud Private in an AWS EC2 environment, see [Installing IBM Cloud Private on AWS](https://github.com/ibm-cloud-architecture/terraform-icp-aws). ## Azure上でのIBM Cloud Private @@ -64,4 +60,4 @@ You can install IBM Cloud Private on VMware with either Ubuntu or RHEL images. F The IBM Cloud Private Hosted service automatically deploys IBM Cloud Private Hosted on your VMware vCenter Server instances. This service brings the power of microservices and containers to your VMware environment on IBM Cloud. With this service, you can extend the same familiar VMware and IBM Cloud Private operational model and tools from on-premises into the IBM Cloud. -For more information, see [IBM Cloud Private Hosted service](https://cloud.ibm.com/docs/services/vmwaresolutions/vmonic?topic=vmware-solutions-prod_overview#ibm-cloud-private-hosted). +For more information, see [IBM Cloud Private Hosted service](https://cloud.ibm.com/docs/vmwaresolutions?topic=vmwaresolutions-icp_overview). diff --git a/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index 676f7f8a48f55..fec66df50ae36 100644 --- a/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -14,7 +14,7 @@ Windowsアプリケーションは、多くの組織で実行されているサ ## KubernetesのWindowsコンテナ -KubernetesでWindowsコンテナのオーケストレーションを有効にする方法は、既存のLinuxクラスターにWindowsノードを含めるだけです。Kubernetesの[Pod](/ja/docs/concepts/workloads/pods/pod-overview/)でWindowsコンテナをスケジュールすることは、Linuxベースのコンテナをスケジュールするのと同じくらいシンプルで簡単です。 +KubernetesでWindowsコンテナのオーケストレーションを有効にする方法は、既存のLinuxクラスターにWindowsノードを含めるだけです。Kubernetesの{{< glossary_tooltip text="Pod" term_id="pod" >}}でWindowsコンテナをスケジュールすることは、Linuxベースのコンテナをスケジュールするのと同じくらいシンプルで簡単です。 Windowsコンテナを実行するには、Kubernetesクラスターに複数のオペレーティングシステムを含める必要があります。コントロールプレーンノードはLinux、ワーカーノードはワークロードのニーズに応じてWindowsまたはLinuxで実行します。Windows Server 2019は、サポートされている唯一のWindowsオペレーティングシステムであり、Windows (kubelet、[コンテナランタイム](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/containerd)、kube-proxyを含む)で[Kubernetesノード](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node)を有効にします。Windowsディストリビューションチャンネルの詳細については、[Microsoftのドキュメント](https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19)を参照してください。 @@ -52,7 +52,7 @@ Windows Serverホストオペレーティングシステムには、[Windows Ser Kubernetesの主要な要素は、WindowsでもLinuxと同じように機能します。このセクションでは、主要なワークロードイネーブラーのいくつかと、それらがWindowsにどのようにマップされるかについて説明します。 -* [Pods](/ja/docs/concepts/workloads/pods/pod-overview/) +* [Pods](/ja/docs/concepts/workloads/pods/) Podは、Kubernetesにおける最も基本的な構成要素です。人間が作成またはデプロイするKubernetesオブジェクトモデルの中で最小かつ最もシンプルな単位です。WindowsとLinuxのコンテナを同じPodにデプロイすることはできません。Pod内のすべてのコンテナは、各ノードが特定のプラットフォームとアーキテクチャを表す単一のノードにスケジュールされます。次のPod機能、プロパティ、およびイベントがWindowsコンテナでサポートされています。: @@ -96,7 +96,27 @@ Pod、Controller、Serviceは、KubernetesでWindowsワークロードを管理 #### コンテナランタイム -KubernetesのWindows Server 2019/1809ノードでは、Docker EE-basic 18.09が必要です。これは、kubeletに含まれているdockershimコードで動作します。CRI-ContainerDなどの追加のランタイムは、Kubernetesの以降のバージョンでサポートされる可能性があります。 +##### Docker EE + +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + +Docker EE-basic 18.09+は、Kubernetesを実行しているWindows Server 2019 / 1809ノードに推奨されるコンテナランタイムです。kubeletに含まれるdockershimコードで動作します。 + +##### CRI-ContainerD + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +ContainerDはLinux上のKubernetesで動作するOCI準拠のランタイムです。Kubernetes v1.18では、Windows上での{{< glossary_tooltip term_id="containerd" text="ContainerD" >}}のサポートが追加されています。Windows上でのContainerDの進捗状況は[enhancements#1001](https://github.com/kubernetes/enhancements/issues/1001)で確認できます。 + +{{< caution >}} + +Kubernetes v1.18におけるWindows上でのContainerDは以下の既知の欠点があります: + +* ContainerDは公式リリースではWindowsをサポートしていません。すなわち、Kubernetesでのすべての開発はアクティブなContainerD開発ブランチに対して行われています。本番環境へのデプロイは常に、完全にテストされセキュリティ修正をサポートした公式リリースを利用するべきです。 +* ContainerDを利用した場合、Group Managed Service Accountsは実装されていません。詳細は[containerd/cri#1276](https://github.com/containerd/cri/issues/1276)を参照してください。 + +{{< /caution >}} + #### 永続ストレージ @@ -404,7 +424,6 @@ Kubernetesクラスターのトラブルシューティングの主なヘルプ # kubelet.exeを登録 # マイクロソフトは、mcr.microsoft.com/k8s/core/pause:1.2.0としてポーズインフラストラクチャコンテナをリリース - # 詳細については、「KubernetesにWindowsノードを追加するためのガイド」で「pause」を検索してください nssm install kubelet C:\k\kubelet.exe nssm set kubelet AppParameters --hostname-override= --v=6 --pod-infra-container-image=mcr.microsoft.com/k8s/core/pause:1.2.0 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns= --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir= --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config nssm set kubelet AppDirectory C:\k @@ -516,7 +535,7 @@ Kubernetesクラスターのトラブルシューティングの主なヘルプ PauseイメージがOSバージョンと互換性があることを確認してください。[説明](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/deploying-resources)では、OSとコンテナの両方がバージョン1803であると想定しています。それ以降のバージョンのWindowsを使用している場合は、Insiderビルドなどでは、それに応じてイメージを調整する必要があります。イメージについては、Microsoftの[Dockerレジストリ](https://hub.docker.com/u/microsoft/)を参照してください。いずれにしても、PauseイメージのDockerfileとサンプルサービスの両方で、イメージに:latestのタグが付けられていると想定しています。 - Kubernetes v1.14以降、MicrosoftはPauseインフラストラクチャコンテナを`mcr.microsoft.com/k8s/core/pause:1.2.0`でリリースしています。詳細については、[KubernetesにWindowsノードを追加するためのガイド](../user-guide-windows-nodes)で「Pause」を検索してください。 + Kubernetes v1.14以降、MicrosoftはPauseインフラストラクチャコンテナを`mcr.microsoft.com/k8s/core/pause:1.2.0`でリリースしています。 1. DNS名前解決が正しく機能していない @@ -568,18 +587,16 @@ Kubernetesクラスターのトラブルシューティングの主なヘルプ ロードマップには多くの機能があります。高レベルの簡略リストを以下に示しますが、[ロードマッププロジェクト](https://github.com/orgs/kubernetes/projects/8)を見て、[貢献すること](https://github.com/kubernetes/community/blob/master/sig-windows/)によってWindowsサポートを改善することをお勧めします。 -### CRI-ContainerD -{{< glossary_tooltip term_id="containerd" >}}は、最近{{< glossary_tooltip text="CNCF" term_id="cncf" >}}プロジェクトとして卒業した、もう1つのOCI準拠ランタイムです。現在Linuxでテストされていますが、1.3はWindowsとHyper-Vをサポートします。[[リファレンス](https://blog.docker.com/2019/02/containerd-graduates-within-the-cncf/)] +### Hyper-V分離 -CRI-ContainerDインターフェイスは、Hyper-Vに基づいてサンドボックスを管理できるようになります。これにより、RuntimeClassを次のような新しいユースケースに実装できる基盤が提供されます: +Hyper-V分離はKubernetesで以下のWindowsコンテナのユースケースを実現するために必要です。 * Pod間のハイパーバイザーベースの分離により、セキュリティを強化 * 下位互換性により、コンテナの再構築を必要とせずにノードで新しいWindows Serverバージョンを実行 * Podの特定のCPU/NUMA設定 * メモリの分離と予約 -### Hyper-V分離 既存のHyper-V分離サポートは、v1.10の試験的な機能であり、上記のCRI-ContainerD機能とRuntimeClass機能を優先して将来廃止される予定です。現在の機能を使用してHyper-V分離コンテナを作成するには、kubeletのフィーチャーゲートを`HyperVContainer=true`で開始し、Podにアノテーション`experimental.windows.kubernetes.io/isolation-type=hyperv`を含める必要があります。実験的リリースでは、この機能はPodごとに1つのコンテナに制限されています。 @@ -609,7 +626,7 @@ spec: ### kubeadmとクラスターAPIを使用したデプロイ -Kubeadmは、ユーザーがKubernetesクラスターをデプロイするための事実上の標準になりつつあります。kubeadmのWindowsノードのサポートは、将来のリリースで提供予定です。Windowsノードが適切にプロビジョニングされるように、クラスターAPIにも投資しています。 +Kubeadmは、ユーザーがKubernetesクラスターをデプロイするための事実上の標準になりつつあります。kubeadmのWindowsノードのサポートは進行中ですが、ガイドはすでに[ここ](/ja/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/)で利用可能です。Windowsノードが適切にプロビジョニングされるように、クラスターAPIにも投資しています。 ### その他の主な機能 * グループ管理サービスアカウントのベータサポート diff --git a/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-install.gif b/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-install.gif deleted file mode 100644 index e3d94b9b54ac2..0000000000000 Binary files a/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-install.gif and /dev/null differ diff --git a/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-join.gif b/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-join.gif deleted file mode 100644 index 828417d685c69..0000000000000 Binary files a/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-join.gif and /dev/null differ diff --git a/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-reset.gif b/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-reset.gif deleted file mode 100644 index e71d40d6dfb09..0000000000000 Binary files a/content/ja/docs/setup/production-environment/windows/kubecluster.ps1-reset.gif and /dev/null differ diff --git a/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md index 9e218fae531bd..6f1ed4558ea2e 100644 --- a/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -19,7 +19,7 @@ Windowsアプリケーションは、多くの組織で実行されるサービ ## 始める前に -* [Windows Serverを実行するマスターノードとワーカーノード](/ja/docs/setup/production-environment/windows/user-guide-windows-nodes/)を含むKubernetesクラスターを作成します +* [Windows Serverを実行するマスターノードとワーカーノード](/ja/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes)を含むKubernetesクラスターを作成します * Kubernetes上にServiceとワークロードを作成してデプロイすることは、LinuxコンテナとWindowsコンテナ共に、ほぼ同じように動作することに注意してください。クラスターとのインタフェースとなる[Kubectlコマンド](/docs/reference/kubectl/overview/)も同じです。Windowsコンテナをすぐに体験できる例を以下セクションに用意しています。 ## はじめに:Windowsコンテナのデプロイ @@ -96,7 +96,7 @@ spec: * ネットワークを介したノードとPod間通信、LinuxマスターからのPod IPのポート80に向けて`curl`して、ウェブサーバーの応答をチェックします * docker execまたはkubectl execを使用したPod間通信、Pod間(および複数のWindowsノードがある場合はホスト間)へのpingします * ServiceからPodへの通信、Linuxマスターおよび個々のPodからの仮想Service IP(`kubectl get services`で表示される)に`curl`します - * サービスディスカバリ、Kuberntesの[default DNS suffix](/ja/docs/concepts/services-networking/dns-pod-service/#services)と共にService名に`curl`します + * サービスディスカバリ、Kubernetesの[default DNS suffix](/ja/docs/concepts/services-networking/dns-pod-service/#services)と共にService名に`curl`します * Inbound connectivity, `curl` the NodePort from the Linux master or machines outside of the cluster * インバウンド接続、Linuxマスターまたはクラスター外のマシンからNodePortに`curl`します * アウトバウンド接続、kubectl execを使用したPod内からの外部IPに`curl`します diff --git a/content/ja/docs/setup/production-environment/windows/user-guide-windows-nodes.md b/content/ja/docs/setup/production-environment/windows/user-guide-windows-nodes.md deleted file mode 100644 index 9f54861a945fa..0000000000000 --- a/content/ja/docs/setup/production-environment/windows/user-guide-windows-nodes.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -title: Guide for adding Windows Nodes in Kubernetes -content_type: concept -weight: 70 ---- - - - -The Kubernetes platform can now be used to run both Linux and Windows containers. One or more Windows nodes can be registered to a cluster. This guide shows how to: - -* Register a Windows node to the cluster -* Configure networking so pods on Linux and Windows can communicate - - - - - -## Before you begin - -* Obtain a [Windows Server license](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) in order to configure the Windows node that hosts Windows containers. You can use your organization's licenses for the cluster, or acquire one from Microsoft, a reseller, or via the major cloud providers such as GCP, AWS, and Azure by provisioning a virtual machine running Windows Server through their marketplaces. A [time-limited trial](https://www.microsoft.com/en-us/cloud-platform/windows-server-trial) is also available. - -* Build a Linux-based Kubernetes cluster in which you have access to the control plane (some examples include [Creating a single control-plane cluster with kubeadm](/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/), [AKS Engine](/ja/docs/setup/production-environment/turnkey/azure/), [GCE](/ja/docs/setup/production-environment/turnkey/gce/), [AWS](/ja/docs/setup/production-environment/turnkey/aws/). - -## Getting Started: Adding a Windows Node to Your Cluster - -### Plan IP Addressing - -Kubernetes cluster management requires careful planning of your IP addresses so that you do not inadvertently cause network collision. This guide assumes that you are familiar with the [Kubernetes networking concepts](/docs/concepts/cluster-administration/networking/). - -In order to deploy your cluster you need the following address spaces: - -| Subnet / address range | Description | Default value | -| --- | --- | --- | -| Service Subnet | A non-routable, purely virtual subnet that is used by pods to uniformly access services without caring about the network topology. It is translated to/from routable address space by `kube-proxy` running on the nodes. | 10.96.0.0/12 | -| Cluster Subnet | This is a global subnet that is used by all pods in the cluster. Each node is assigned a smaller /24 subnet from this for their pods to use. It must be large enough to accommodate all pods used in your cluster. To calculate *minimumsubnet* size: `(number of nodes) + (number of nodes * maximum pods per node that you configure)`. Example: for a 5 node cluster for 100 pods per node: `(5) + (5 * 100) = 505.` | 10.244.0.0/16 | -| Kubernetes DNS Service IP | IP address of `kube-dns` service that is used for DNS resolution & cluster service discovery. | 10.96.0.10 | - -Review the networking options supported in 'Intro to Windows containers in Kubernetes: Supported Functionality: Networking' to determine how you need to allocate IP addresses for your cluster. - -### Components that run on Windows - -While the Kubernetes control plane runs on your Linux node(s), the following components are configured and run on your Windows node(s). - -1. kubelet -2. kube-proxy -3. kubectl (optional) -4. Container runtime - -Get the latest binaries from [https://github.com/kubernetes/kubernetes/releases](https://github.com/kubernetes/kubernetes/releases), starting with v1.14 or later. The Windows-amd64 binaries for kubeadm, kubectl, kubelet, and kube-proxy can be found under the CHANGELOG link. - -### Networking Configuration - -Once you have a Linux-based Kubernetes master node you are ready to choose a networking solution. This guide illustrates using Flannel in VXLAN mode for simplicity. - -#### Configuring Flannel in VXLAN mode on the Linux controller - -1. Prepare Kubernetes master for Flannel - - Some minor preparation is recommended on the Kubernetes master in our cluster. It is recommended to enable bridged IPv4 traffic to iptables chains when using Flannel. This can be done using the following command: - - ```bash - sudo sysctl net.bridge.bridge-nf-call-iptables=1 - ``` - -1. Download & configure Flannel - - Download the most recent Flannel manifest: - - ```bash - wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml - ``` - - There are two sections you should modify to enable the vxlan networking backend: - - After applying the steps below, the `net-conf.json` section of `kube-flannel.yml` should look as follows: - - ```json - net-conf.json: | - { - "Network": "10.244.0.0/16", - "Backend": { - "Type": "vxlan", - "VNI" : 4096, - "Port": 4789 - } - } - ``` - - {{< note >}}The VNI must be set to 4096 and port 4789 for Flannel on Linux to interoperate with Flannel on Windows. Support for other VNIs is coming soon. See the [VXLAN documentation](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan) - for an explanation of these fields.{{< /note >}} - -1. In the `net-conf.json` section of your `kube-flannel.yml`, double-check: - 1. The cluster subnet (e.g. "10.244.0.0/16") is set as per your IP plan. - * VNI 4096 is set in the backend - * Port 4789 is set in the backend - 1. In the `cni-conf.json` section of your `kube-flannel.yml`, change the network name to `vxlan0`. - - - Your `cni-conf.json` should look as follows: - - ```json - cni-conf.json: | - { - "name": "vxlan0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] - } - ``` - -1. Apply the Flannel yaml and Validate - - Let's apply the Flannel configuration: - - ```bash - kubectl apply -f kube-flannel.yml - ``` - - After a few minutes, you should see all the pods as running if the Flannel pod network was deployed. - - ```bash - kubectl get pods --all-namespaces - ``` - - The output looks like as follows: - - ``` - NAMESPACE NAME READY STATUS RESTARTS AGE - kube-system etcd-flannel-master 1/1 Running 0 1m - kube-system kube-apiserver-flannel-master 1/1 Running 0 1m - kube-system kube-controller-manager-flannel-master 1/1 Running 0 1m - kube-system kube-dns-86f4d74b45-hcx8x 3/3 Running 0 12m - kube-system kube-flannel-ds-54954 1/1 Running 0 1m - kube-system kube-proxy-Zjlxz 1/1 Running 0 1m - kube-system kube-scheduler-flannel-master 1/1 Running 0 1m - ``` - - Verify that the Flannel DaemonSet has the NodeSelector applied. - - ```bash - kubectl get ds -n kube-system - ``` - - The output looks like as follows. The NodeSelector `beta.kubernetes.io/os=linux` is applied. - - ``` - NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE - kube-flannel-ds 2 2 2 2 2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux 21d - kube-proxy 2 2 2 2 2 beta.kubernetes.io/os=linux 26d - ``` - -#### Join Windows Worker - -In this section we'll cover configuring a Windows node from scratch to join a cluster on-prem. If your cluster is on a cloud you'll likely want to follow the cloud specific guides in the next section. - -#### Preparing a Windows Node - -{{< note >}} -All code snippets in Windows sections are to be run in a PowerShell environment with elevated permissions (Admin). -{{< /note >}} - -1. Install Docker (requires a system reboot) - - Kubernetes uses [Docker](https://www.docker.com/) as its container engine, so we need to install it. You can follow the [official Docs instructions](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-docker/configure-docker-daemon#install-docker), the [Docker instructions](https://store.docker.com/editions/enterprise/docker-ee-server-windows), or try the following *recommended* steps: - - ```PowerShell - Enable-WindowsOptionalFeature -FeatureName Containers - Restart-Computer -Force - Install-Module -Name DockerMsftProvider -Repository PSGallery -Force - Install-Package -Name Docker -ProviderName DockerMsftProvider - ``` - - If you are behind a proxy, the following PowerShell environment variables must be defined: - - ```PowerShell - [Environment]::SetEnvironmentVariable("HTTP_PROXY", "http://proxy.example.com:80/", [EnvironmentVariableTarget]::Machine) - [Environment]::SetEnvironmentVariable("HTTPS_PROXY", "http://proxy.example.com:443/", [EnvironmentVariableTarget]::Machine) - ``` - - After reboot, you can verify that the docker service is ready with the command below. - - ```PowerShell - docker version - ``` - - If you see error message like the following, you need to start the docker service manually. - - ``` - Client: - Version: 17.06.2-ee-11 - API version: 1.30 - Go version: go1.8.7 - Git commit: 06fc007 - Built: Thu May 17 06:14:39 2018 - OS/Arch: windows / amd64 - error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.30/version: open //./pipe/docker_engine: The system c - annot find the file specified. In the default daemon configuration on Windows, the docker client must be run elevated to - connect. This error may also indicate that the docker daemon is not running. - ``` - - You can start the docker service manually like below. - - ```PowerShell - Start-Service docker - ``` - - {{< note >}} - The "pause" (infrastructure) image is hosted on Microsoft Container Registry (MCR). You can access it using "docker pull mcr.microsoft.com/k8s/core/pause:1.2.0". The DOCKERFILE is available at https://github.com/kubernetes-sigs/windows-testing/blob/master/images/pause/Dockerfile. - {{< /note >}} - -1. Prepare a Windows directory for Kubernetes - - Create a "Kubernetes for Windows" directory to store Kubernetes binaries as well as any deployment scripts and config files. - - ```PowerShell - mkdir c:\k - ``` - -1. Copy Kubernetes certificate - - Copy the Kubernetes certificate file `$HOME/.kube/config` [from the Linux controller](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/creating-a-linux-master#collect-cluster-information) to this new `C:\k` directory on your Windows node. - - Tip: You can use tools such as [xcopy](https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/xcopy), [WinSCP](https://winscp.net/eng/download.php), or this [PowerShell wrapper for WinSCP](https://www.powershellgallery.com/packages/WinSCP/5.13.2.0) to transfer the config file between nodes. - -1. Download Kubernetes binaries - - To be able to run Kubernetes, you first need to download the `kubelet` and `kube-proxy` binaries. You download these from the Node Binaries links in the CHANGELOG.md file of the [latest releases](https://github.com/kubernetes/kubernetes/releases/). For example 'kubernetes-node-windows-amd64.tar.gz'. You may also optionally download `kubectl` to run on Windows which you can find under Client Binaries. - - Use the [Expand-Archive](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.archive/expand-archive?view=powershell-6) PowerShell command to extract the archive and place the binaries into `C:\k`. - -#### Join the Windows node to the Flannel cluster - -The Flannel overlay deployment scripts and documentation are available in [this repository](https://github.com/Microsoft/SDN/tree/master/Kubernetes/flannel/overlay). The following steps are a simple walkthrough of the more comprehensive instructions available there. - -Download the [Flannel start.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/start.ps1) script, the contents of which should be extracted to `C:\k`: - -```PowerShell -cd c:\k -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -wget https://raw.githubusercontent.com/Microsoft/SDN/master/Kubernetes/flannel/start.ps1 -o c:\k\start.ps1 -``` - -{{< note >}} -[start.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/start.ps1) references [install.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/install.ps1), which downloads additional files such as the `flanneld` executable and the [Dockerfile for infrastructure pod](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/Dockerfile) and install those for you. For overlay networking mode, the [firewall](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/helper.psm1#L111) is opened for local UDP port 4789. There may be multiple powershell windows being opened/closed as well as a few seconds of network outage while the new external vSwitch for the pod network is being created the first time. Run the script using the arguments as specified below: -{{< /note >}} - -```PowerShell -cd c:\k -.\start.ps1 -ManagementIP ` - -NetworkMode overlay ` - -ClusterCIDR ` - -ServiceCIDR ` - -KubeDnsServiceIP ` - -LogDir -``` - -| Parameter | Default Value | Notes | -| --- | --- | --- | -| -ManagementIP | N/A (required) | The IP address assigned to the Windows node. You can use `ipconfig` to find this. | -| -NetworkMode | l2bridge | We're using `overlay` here | -| -ClusterCIDR | 10.244.0.0/16 | Refer to your cluster IP plan | -| -ServiceCIDR | 10.96.0.0/12 | Refer to your cluster IP plan | -| -KubeDnsServiceIP | 10.96.0.10 | | -| -InterfaceName | Ethernet | The name of the network interface of the Windows host. You can use ipconfig to find this. | -| -LogDir | C:\k | The directory where kubelet and kube-proxy logs are redirected into their respective output files. | - -Now you can view the Windows nodes in your cluster by running the following: - -```bash -kubectl get nodes -``` - -{{< note >}} -You may want to configure your Windows node components like kubelet and kube-proxy to run as services. View the services and background processes section under [troubleshooting](#troubleshooting) for additional instructions. Once you are running the node components as services, collecting logs becomes an important part of troubleshooting. View the [gathering logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs) section of the contributing guide for further instructions. -{{< /note >}} - -### Public Cloud Providers - -#### Azure - -AKS-Engine can deploy a complete, customizable Kubernetes cluster with both Linux & Windows nodes. There is a step-by-step walkthrough available in the [docs on GitHub](https://github.com/Azure/aks-engine/blob/master/docs/topics/windows.md). - -#### GCP - -Users can easily deploy a complete Kubernetes cluster on GCE following this step-by-step walkthrough on [GitHub](https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/windows/README-GCE-Windows-kube-up.md) - -#### Deployment with kubeadm and cluster API - -Kubeadm is becoming the de facto standard for users to deploy a Kubernetes cluster. Windows node support in kubeadm will come in a future release. We are also making investments in cluster API to ensure Windows nodes are properly provisioned. - -### Next Steps - -Now that you've configured a Windows worker in your cluster to run Windows containers you may want to add one or more Linux nodes as well to run Linux containers. You are now ready to schedule Windows containers on your cluster. - diff --git a/content/ja/docs/setup/release/version-skew-policy.md b/content/ja/docs/setup/release/version-skew-policy.md index 5c1a18b8ee812..eb0764bcc3a4d 100644 --- a/content/ja/docs/setup/release/version-skew-policy.md +++ b/content/ja/docs/setup/release/version-skew-policy.md @@ -12,14 +12,16 @@ weight: 30 ## サポートされるバージョン {#supported-versions} -Kubernetesのバージョンは**x.y.z**の形式で表現され、**x**はメジャーバージョン、**y**はマイナーバージョン、**z**はパッチバージョンを指します。これは[セマンティック バージョニング](http://semver.org/)に従っています。詳細は、[Kubernetesのリリースバージョニング](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning)を参照してください。 +Kubernetesのバージョンは**x.y.z**の形式で表現され、**x**はメジャーバージョン、**y**はマイナーバージョン、**z**はパッチバージョンを指します。これは[セマンティック バージョニング](https://semver.org/)に従っています。詳細は、[Kubernetesのリリースバージョニング](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning)を参照してください。 -Kubernetesプロジェクトでは、最新の3つのマイナーリリースについてリリースブランチを管理しています。 +Kubernetesプロジェクトでは、最新の3つのマイナーリリースについてリリースブランチを管理しています ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}})。 + +セキュリティフィックスを含む適用可能な修正は、重大度や実行可能性によってはこれら3つのリリースブランチにバックポートされることもあります。パッチリリースは、これらのブランチから [定期的に](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence) 切り出され、必要に応じて追加の緊急リリースも行われます。 + + [リリースマネージャー](https://git.k8s.io/sig-release/release-managers.md)グループがこれを決定しています。 -セキュリティフィックスを含む適用可能な修正は、重大度や実行可能性によってはこれら3つのリリースブランチにバックポートされることもあります。パッチリリースは、定期的または必要に応じてこれらのブランチから分岐されます。[パッチリリースチーム](https://github.com/kubernetes/sig-release/blob/master/release-engineering/role-handbooks/patch-release-team.md#release-timing)がこれを決定しています。パッチリリースチームは[リリースマネージャー](https://github.com/kubernetes/sig-release/blob/master/release-managers.md)の一部です。 詳細は、[Kubernetesパッチリリース](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md)ページを参照してください。 -マイナーリリースは約3ヶ月ごとに行われるため、マイナーリリースのブランチはそれぞれ約9ヶ月保守されます。 ## サポートされるバージョンの差異 @@ -29,8 +31,8 @@ Kubernetesプロジェクトでは、最新の3つのマイナーリリースに 例: -* 最新の`kube-apiserver`が**1.13**であるとします -* ほかの`kube-apiserver`インスタンスは**1.13**および**1.12**がサポートされます +* 最新の`kube-apiserver`が**{{< skew latestVersion >}}**であるとします +* ほかの`kube-apiserver`インスタンスは**{{< skew latestVersion >}}**および**{{< skew prevMinorVersion >}}**がサポートされます ### kubelet @@ -38,8 +40,8 @@ Kubernetesプロジェクトでは、最新の3つのマイナーリリースに 例: -* `kube-apiserver`が**1.13**であるとします -* `kubelet`は**1.13**、**1.12**および**1.11**がサポートされます +* `kube-apiserver`が**{{< skew latestVersion >}}**であるとします +* `kubelet`は**{{< skew latestVersion >}}**、**{{< skew prevMinorVersion >}}**および**{{< skew oldestMinorVersion >}}**がサポートされます {{< note >}} HAクラスター内の`kube-apiserver`間にバージョンの差異がある場合、有効な`kubelet`のバージョンは少なくなります。 @@ -47,8 +49,8 @@ HAクラスター内の`kube-apiserver`間にバージョンの差異がある 例: -* `kube-apiserver`インスタンスが**1.13**および**1.12**であるとします -* `kubelet`は**1.12**および**1.11**がサポートされます(**1.13**はバージョン**1.12**の`kube-apiserver`よりも新しくなるためサポートされません) +* `kube-apiserver`インスタンスが**{{< skew latestVersion >}}**および**1.12**であるとします +* `kubelet`は**{{< skew prevMinorVersion >}}**および**{{< skew oldestMinorVersion >}}**がサポートされます(**{{< skew latestVersion >}}**はバージョン**{{< skew prevMinorVersion >}}**の`kube-apiserver`よりも新しくなるためサポートされません) ### kube-controller-manager、kube-scheduler、およびcloud-controller-manager @@ -56,8 +58,8 @@ HAクラスター内の`kube-apiserver`間にバージョンの差異がある 例: -* `kube-apiserver`が**1.13**であるとします -* `kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`は**1.13**および**1.12**がサポートされます +* `kube-apiserver`が**{{< skew latestVersion >}}**であるとします +* `kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`は**{{< skew latestVersion >}}**および**{{< skew prevMinorVersion >}}**がサポートされます {{< note >}} HAクラスター内の`kube-apiserver`間にバージョンの差異があり、これらのコンポーネントがクラスター内のいずれかの`kube-apiserver`と通信する場合(たとえばロードバランサーを経由して)、コンポーネントの有効なバージョンは少なくなります。 @@ -65,8 +67,8 @@ HAクラスター内の`kube-apiserver`間にバージョンの差異があり 例: -* `kube-apiserver`インスタンスが**1.13**および**1.12**であるとします -* いずれかの`kube-apiserver`インスタンスへ配信するロードバランサーと通信する`kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`は**1.12**がサポートされます(**1.13**はバージョン**1.12**の`kube-apiserver`よりも新しくなるためサポートされません) +* `kube-apiserver`インスタンスが**{{< skew latestVersion >}}**および**{{< skew prevMinorVersion >}}**であるとします +* いずれかの`kube-apiserver`インスタンスへ配信するロードバランサーと通信する`kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`は**{{< skew prevMinorVersion >}}**がサポートされます(**{{< skew latestVersion >}}**はバージョン**{{< skew prevMinorVersion >}}**の`kube-apiserver`よりも新しくなるためサポートされません) ### kubectl @@ -74,8 +76,8 @@ HAクラスター内の`kube-apiserver`間にバージョンの差異があり 例: -* `kube-apiserver`が**1.13**であるとします -* `kubectl`は**1.14**、**1.13**および**1.12**がサポートされます +* `kube-apiserver`が**{{< skew latestVersion >}}**であるとします +* `kubectl`は**{{< skew nextMinorVersion >}}**、**{{< skew latestVersion >}}**および**{{< skew prevMinorVersion >}}**がサポートされます {{< note >}} HAクラスター内の`kube-apiserver`間にバージョンの差異がある場合、有効な`kubectl`バージョンは少なくなります。 @@ -83,26 +85,26 @@ HAクラスター内の`kube-apiserver`間にバージョンの差異がある 例: -* `kube-apiserver`インスタンスが**1.13**および**1.12**であるとします -* `kubectl`は**1.13**および**1.12**がサポートされます(ほかのバージョンでは、ある`kube-apiserver`コンポーネントからマイナーバージョンが2つ以上離れる可能性があります) +* `kube-apiserver`インスタンスが**{{< skew latestVersion >}}**および**{{< skew prevMinorVersion >}}**であるとします +* `kubectl`は**{{< skew latestVersion >}}**および**{{< skew prevMinorVersion >}}**がサポートされます(ほかのバージョンでは、ある`kube-apiserver`コンポーネントからマイナーバージョンが2つ以上離れる可能性があります) ## サポートされるコンポーネントのアップグレード順序 -コンポーネント間でサポートされるバージョンの差異は、コンポーネントをアップグレードする順序に影響されます。このセクションでは、既存のクラスターをバージョン**1.n**から**1.(n+1)** へ移行するために、コンポーネントをアップグレードする順序を説明します。 +コンポーネント間でサポートされるバージョンの差異は、コンポーネントをアップグレードする順序に影響されます。このセクションでは、既存のクラスターをバージョン**{{< skew prevMinorVersion >}}**から**{{< skew latestVersion >}}** へ移行するために、コンポーネントをアップグレードする順序を説明します。 ### kube-apiserver 前提条件: -* シングルインスタンスのクラスターにおいて、既存の`kube-apiserver`インスタンスは**1.n**とします -* HAクラスターにおいて、既存の`kube-apiserver`は**1.n**または**1.(n+1)** とします(最新と最古の間で、最大で1つのマイナーバージョンの差異となります) -* サーバーと通信する`kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`はバージョン**1.n**とします(必ず既存のAPIサーバーのバージョンよりも新しいものでなく、かつ新しいAPIサーバーのバージョンの1つ以内のマイナーバージョンとなります) -* すべてのノードの`kubelet`インスタンスはバージョン**1.n**または**1.(n-1)** とします(必ず既存のAPIサーバーよりも新しいバージョンでなく、かつ新しいAPIサーバーのバージョンの2つ以内のマイナーバージョンとなります) +* シングルインスタンスのクラスターにおいて、既存の`kube-apiserver`インスタンスは**{{< skew prevMinorVersion >}}**とします +* HAクラスターにおいて、既存の`kube-apiserver`は**{{< skew prevMinorVersion >}}**または**{{< skew latestVersion >}}** とします(最新と最古の間で、最大で1つのマイナーバージョンの差異となります) +* サーバーと通信する`kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`はバージョン**{{< skew prevMinorVersion >}}**とします(必ず既存のAPIサーバーのバージョンよりも新しいものでなく、かつ新しいAPIサーバーのバージョンの1つ以内のマイナーバージョンとなります) +* すべてのノードの`kubelet`インスタンスはバージョン**{{< skew prevMinorVersion >}}**または**{{< skew oldestMinorVersion >}}** とします(必ず既存のAPIサーバーよりも新しいバージョンでなく、かつ新しいAPIサーバーのバージョンの2つ以内のマイナーバージョンとなります) * 登録されたAdmission webhookは、新しい`kube-apiserver`インスタンスが送信するこれらのデータを扱うことができます: - * `ValidatingWebhookConfiguration`および`MutatingWebhookConfiguration`オブジェクトは、**1.(n+1)** で追加されたRESTリソースの新しいバージョンを含んで更新されます(または、v1.15から利用可能な[`matchPolicy: Equivalent`オプション](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)を使用してください) - * Webhookは送信されたRESTリソースの新しいバージョン、および**1.(n+1)** のバージョンで追加された新しいフィールドを扱うことができます + * `ValidatingWebhookConfiguration`および`MutatingWebhookConfiguration`オブジェクトは、**{{< skew latestVersion >}}** で追加されたRESTリソースの新しいバージョンを含んで更新されます(または、v1.15から利用可能な[`matchPolicy: Equivalent`オプション](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)を使用してください) + * Webhookは送信されたRESTリソースの新しいバージョン、および**{{< skew latestVersion >}}** のバージョンで追加された新しいフィールドを扱うことができます -`kube-apiserver`を**1.(n+1)** にアップグレードしてください。 +`kube-apiserver`を**{{< skew latestVersion >}}** にアップグレードしてください。 {{< note >}} [非推奨API](/docs/reference/using-api/deprecation-policy/)および[APIの変更ガイドライン](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api_changes.md)のプロジェクトポリシーにおいては、シングルインスタンスの場合でも`kube-apiserver`のアップグレードの際にマイナーバージョンをスキップしてはなりません。 @@ -112,17 +114,17 @@ HAクラスター内の`kube-apiserver`間にバージョンの差異がある 前提条件: -* これらのコンポーネントと通信する`kube-apiserver`インスタンスが**1.(n+1)** であること(これらのコントロールプレーンコンポーネントが、クラスター内の`kube-apiserver`インスタンスと通信できるHAクラスターでは、これらのコンポーネントをアップグレードする前にすべての`kube-apiserver`インスタンスをアップグレードしなければなりません) +* これらのコンポーネントと通信する`kube-apiserver`インスタンスが**{{< skew latestVersion >}}** であること(これらのコントロールプレーンコンポーネントが、クラスター内の`kube-apiserver`インスタンスと通信できるHAクラスターでは、これらのコンポーネントをアップグレードする前にすべての`kube-apiserver`インスタンスをアップグレードしなければなりません) -`kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`を**1.(n+1)** にアップグレードしてください。 +`kube-controller-manager`、`kube-scheduler`および`cloud-controller-manager`を**{{< skew latestVersion >}}** にアップグレードしてください。 ### kubelet 前提条件: -* `kubelet`と通信する`kube-apiserver`が**1.(n+1)** であること +* `kubelet`と通信する`kube-apiserver`が**{{< skew latestVersion >}}** であること -必要に応じて、`kubelet`インスタンスを**1.(n+1)** にアップグレードしてください(**1.n**や**1.(n-1)** のままにすることもできます)。 +必要に応じて、`kubelet`インスタンスを**{{< skew latestVersion >}}** にアップグレードしてください(**{{< skew prevMinorVersion >}}**や**{{< skew oldestMinorVersion >}}** のままにすることもできます)。 {{< warning >}} `kube-apiserver`と2つのマイナーバージョンの`kubelet`インスタンスを使用してクラスターを実行させることは推奨されません: @@ -130,3 +132,18 @@ HAクラスター内の`kube-apiserver`間にバージョンの差異がある * コントロールプレーンをアップグレードする前に、インスタンスを`kube-apiserver`の1つのマイナーバージョン内にアップグレードさせる必要があります * メンテナンスされている3つのマイナーリリースよりも古いバージョンの`kubelet`を実行する可能性が高まります {{}} + + + +### kube-proxy + +* `kube-proxy`のマイナーバージョンはノード上の`kubelet`と同じマイナーバージョンでなければなりません +* `kube-proxy`は`kube-apiserver`よりも新しいものであってはなりません +* `kube-proxy`のマイナーバージョンは`kube-apiserver`のマイナーバージョンよりも2つ以上古いものでなければなりません + +例: + +`kube-proxy`のバージョンが**{{< skew oldestMinorVersion >}}**の場合: + +* `kubelet`のバージョンは**{{< skew oldestMinorVersion >}}**でなければなりません +* `kube-apiserver`のバージョンは**{{< skew oldestMinorVersion >}}**と**{{< skew latestVersion >}}**の間でなければなりません diff --git a/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index b2ebc16d18907..d5f6b72296e5b 100644 --- a/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -31,9 +31,9 @@ card: ## クラスター、ユーザー、コンテキストを設定する -例として、開発用のクラスターが一つ、実験用のクラスターが一つ、計二つのクラスターが存在する場合を考えます。`development`と呼ばれる開発用のクラスター内では、フロントエンドの開発者は`frontend`というnamespace内で、ストレージの開発者は`storage`というnamespace内で作業をします。`scratch`と呼ばれる実験用のクラスター内では、開発者はデフォルトのnamespaceで作業をするか、状況に応じて追加のnamespaceを作成します。開発用のクラスターは証明書を通しての認証を必要とします。実験用のクラスターはユーザーネームとパスワードを通しての認証を必要とします。 +例として、開発用のクラスターが一つ、実験用のクラスターが一つ、計二つのクラスターが存在する場合を考えます。`development`と呼ばれる開発用のクラスター内では、フロントエンドの開発者は`frontend`というnamespace内で、ストレージの開発者は`storage`というnamespace内で作業をします。`scratch`と呼ばれる実験用のクラスター内では、開発者はデフォルトのnamespaceで作業をするか、状況に応じて追加のnamespaceを作成します。開発用のクラスターは証明書を通しての認証を必要とします。実験用のクラスターはユーザーネームとパスワードを通しての認証を必要とします。 -`config-exercise`というディレクトリを作成してください。`config-exercise`ディレクトリ内に、以下を含む`config-demo`というファイルを作成してください: +`config-exercise`というディレクトリを作成してください。`config-exercise`ディレクトリ内に、以下を含む`config-demo`というファイルを作成してください: ```shell apiVersion: v1 @@ -61,7 +61,7 @@ contexts: 設定ファイルには、クラスター、ユーザー、コンテキストの情報が含まれています。上記の`config-demo`設定ファイルには、二つのクラスター、二人のユーザー、三つのコンテキストの情報が含まれています。 -`config-exercise`ディレクトリに移動してください。クラスター情報を設定ファイルに追加するために、以下のコマンドを実行してください: +`config-exercise`ディレクトリに移動してください。クラスター情報を設定ファイルに追加するために、以下のコマンドを実行してください: ```shell kubectl config --kubeconfig=config-demo set-cluster development --server=https://1.2.3.4 --certificate-authority=fake-ca-file @@ -89,7 +89,7 @@ kubectl config --kubeconfig=config-demo set-context dev-storage --cluster=develo kubectl config --kubeconfig=config-demo set-context exp-scratch --cluster=scratch --namespace=default --user=experimenter ``` -追加した情報を確認するために、`config-demo`ファイルを開いてください。`config-demo`ファイルを開く代わりに、`config view`のコマンドを使うこともできます。 +追加した情報を確認するために、`config-demo`ファイルを開いてください。`config-demo`ファイルを開く代わりに、`config view`のコマンドを使うこともできます。 ```shell kubectl config --kubeconfig=config-demo view diff --git a/content/ja/docs/tasks/access-application-cluster/ingress-minikube.md b/content/ja/docs/tasks/access-application-cluster/ingress-minikube.md index 563ce2478efe0..be267080996d9 100644 --- a/content/ja/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/ja/docs/tasks/access-application-cluster/ingress-minikube.md @@ -134,28 +134,12 @@ weight: 100 1. 以下の内容で`example-ingress.yaml`を作成します。 - ```yaml - apiVersion: networking.k8s.io/v1beta1 - kind: Ingress - metadata: - name: example-ingress - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$1 - spec: - rules: - - host: hello-world.info - http: - paths: - - path: / - backend: - serviceName: web - servicePort: 8080 - ``` + {{< codenew file="service/networking/example-ingress.yaml" >}} 1. 次のコマンドを実行して、Ingressリソースを作成します。 ```shell - kubectl apply -f example-ingress.yaml + kubectl apply -f https://kubernetes.io/examples/service/networking/example-ingress.yaml ``` 出力は次のようになります。 @@ -175,8 +159,8 @@ weight: 100 {{< /note >}} ```shell - NAME HOSTS ADDRESS PORTS AGE - example-ingress hello-world.info 172.17.0.15 80 38s + NAME CLASS HOSTS ADDRESS PORTS AGE + example-ingress hello-world.info 172.17.0.15 80 38s ``` 1. 次の行を`/etc/hosts`ファイルの最後に書きます。 @@ -241,9 +225,12 @@ weight: 100 ```yaml - path: /v2 + pathType: Prefix backend: - serviceName: web2 - servicePort: 8080 + service: + name: web2 + port: + number: 8080 ``` 1. 次のコマンドで変更を適用します。 @@ -300,6 +287,3 @@ weight: 100 * [Ingress](/ja/docs/concepts/services-networking/ingress/)についてさらに学ぶ。 * [Ingressコントローラー](/ja/docs/concepts/services-networking/ingress-controllers/)についてさらに学ぶ。 * [Service](/ja/docs/concepts/services-networking/service/)についてさらに学ぶ。 - - - diff --git a/content/ja/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md b/content/ja/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md new file mode 100644 index 0000000000000..1bc1b7951d46c --- /dev/null +++ b/content/ja/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md @@ -0,0 +1,258 @@ +--- +title: Namespaceに対する最小および最大メモリー制約の構成 + +content_type: task +weight: 30 +--- + + + + +このページでは、Namespaceで実行されるコンテナが使用するメモリーの最小値と最大値を設定する方法を説明します。 +[LimitRange](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#limitrange-v1-core) で最小値と最大値のメモリー値を指定します。 +PodがLimitRangeによって課される制約を満たさない場合、そのNamespaceではPodを作成できません。 + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +クラスター内の各ノードには、少なくとも1GiBのメモリーが必要です。 + + + + +## Namespaceの作成 + +この演習で作成したリソースがクラスターの他の部分から分離されるように、Namespaceを作成します。 + + +```shell +kubectl create namespace constraints-mem-example +``` + +## LimitRangeとPodを作成 + +LimitRangeの設定ファイルです。 + +{{< codenew file="admin/resource/memory-constraints.yaml" >}} + +LimitRangeを作成します。 + +```shell +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints.yaml --namespace=constraints-mem-example +``` + +LimitRangeの詳細情報を表示します。 + + +```shell +kubectl get limitrange mem-min-max-demo-lr --namespace=constraints-mem-example --output=yaml +``` + +出力されるのは、予想通りメモリー制約の最小値と最大値を示しています。 +しかし、LimitRangeの設定ファイルでデフォルト値を指定していないにもかかわらず、 +自動的に作成されていることに気づきます。 + + +``` + limits: + - default: + memory: 1Gi + defaultRequest: + memory: 1Gi + max: + memory: 1Gi + min: + memory: 500Mi + type: Container +``` + + +constraints-mem-exampleNamespaceにコンテナが作成されるたびに、 +Kubernetesは以下の手順を実行するようになっています。 + +* コンテナが独自のメモリー要求と制限を指定しない場合は、デフォルトのメモリー要求と制限をコンテナに割り当てます。 + +* コンテナに500MiB以上のメモリー要求があることを確認します。 + +* コンテナのメモリー制限が1GiB以下であることを確認します。 + +以下は、1つのコンテナを持つPodの設定ファイルです。設定ファイルのコンテナ(containers)では、600MiBのメモリー要求と800MiBのメモリー制限が指定されています。これらはLimitRangeによって課される最小と最大のメモリー制約を満たしています。 + + +{{< codenew file="admin/resource/memory-constraints-pod.yaml" >}} + +Podの作成 + +```shell +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod.yaml --namespace=constraints-mem-example +``` + +Podのコンテナが実行されていることを確認します。 + +```shell +kubectl get pod constraints-mem-demo --namespace=constraints-mem-example +``` + +Podの詳細情報を見ます + +```shell +kubectl get pod constraints-mem-demo --output=yaml --namespace=constraints-mem-example +``` + +出力は、コンテナが600MiBのメモリ要求と800MiBのメモリー制限になっていることを示しています。これらはLimitRangeによって課される制約を満たしています。 + + +```yaml +resources: + limits: + memory: 800Mi + requests: + memory: 600Mi +``` + +Podを消します。 + +```shell +kubectl delete pod constraints-mem-demo --namespace=constraints-mem-example +``` + +## 最大メモリ制約を超えるPodの作成の試み + +これは、1つのコンテナを持つPodの設定ファイルです。コンテナは800MiBのメモリー要求と1.5GiBのメモリー制限を指定しています。 + + +{{< codenew file="admin/resource/memory-constraints-pod-2.yaml" >}} + +Podを作成してみます。 + +```shell +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod-2.yaml --namespace=constraints-mem-example +``` + +出力は、コンテナが大きすぎるメモリー制限を指定しているため、Podが作成されないことを示しています。 + + +``` +Error from server (Forbidden): error when creating "examples/admin/resource/memory-constraints-pod-2.yaml": +pods "constraints-mem-demo-2" is forbidden: maximum memory usage per Container is 1Gi, but limit is 1536Mi. +``` + +## 最低限のメモリ要求を満たさないPodの作成の試み + + +これは、1つのコンテナを持つPodの設定ファイルです。コンテナは100MiBのメモリー要求と800MiBのメモリー制限を指定しています。 + + +{{< codenew file="admin/resource/memory-constraints-pod-3.yaml" >}} + +Podを作成してみます。 + +```shell +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod-3.yaml --namespace=constraints-mem-example +``` + +出力は、コンテナが小さすぎるメモリー要求を指定しているため、Podが作成されないことを示しています。 + +``` +Error from server (Forbidden): error when creating "examples/admin/resource/memory-constraints-pod-3.yaml": +pods "constraints-mem-demo-3" is forbidden: minimum memory usage per Container is 500Mi, but request is 100Mi. +``` + +## メモリ要求や制限を指定しないPodの作成 + + +これは、1つのコンテナを持つPodの設定ファイルです。コンテナはメモリー要求を指定しておらず、メモリー制限も指定していません。 + +{{< codenew file="admin/resource/memory-constraints-pod-4.yaml" >}} + +Podを作成します。 + +```shell +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod-4.yaml --namespace=constraints-mem-example +``` + +Podの詳細情報を見ます + +``` +kubectl get pod constraints-mem-demo-4 --namespace=constraints-mem-example --output=yaml +``` + +出力を見ると、Podのコンテナのメモリ要求は1GiB、メモリー制限は1GiBであることがわかります。 +コンテナはどのようにしてこれらの値を取得したのでしょうか? + + +``` +resources: + limits: + memory: 1Gi + requests: + memory: 1Gi +``` + +コンテナが独自のメモリー要求と制限を指定していなかったため、LimitRangeから与えられのです。 +コンテナが独自のメモリー要求と制限を指定していなかったため、LimitRangeから[デフォルトのメモリー要求と制限](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/)が与えられたのです。 + +この時点で、コンテナは起動しているかもしれませんし、起動していないかもしれません。このタスクの前提条件は、ノードが少なくとも1GiBのメモリーを持っていることであることを思い出してください。それぞれのノードが1GiBのメモリーしか持っていない場合、どのノードにも1GiBのメモリー要求に対応するのに十分な割り当て可能なメモリーがありません。たまたま2GiBのメモリーを持つノードを使用しているのであれば、おそらく1GiBのメモリーリクエストに対応するのに十分なスペースを持っていることになります。 + + +Podを削除します。 + +``` +kubectl delete pod constraints-mem-demo-4 --namespace=constraints-mem-example +``` + +## 最小および最大メモリー制約の強制 + +LimitRangeによってNamespaceに課される最大および最小のメモリー制約は、Podが作成または更新されたときにのみ適用されます。LimitRangeを変更しても、以前に作成されたPodには影響しません。 + + +## 最小・最大メモリー制約の動機 + + +クラスター管理者としては、Podが使用できるメモリー量に制限を課したいと思うかもしれません。 + + +例: + +* クラスター内の各ノードは2GBのメモリーを持っています。クラスタ内のどのノードもその要求をサポートできないため、2GB以上のメモリーを要求するPodは受け入れたくありません。 + + +* クラスターは運用部門と開発部門で共有されています。 本番用のワークロードでは最大8GBのメモリーを消費しますが、開発用のワークロードでは512MBに制限したいとします。本番用と開発用に別々のNamespaceを作成し、それぞれのNamespaceにメモリー制限を適用します。 + +## クリーンアップ + +Namespaceを削除します。 + +```shell +kubectl delete namespace constraints-mem-example +``` + + + +## {{% heading "whatsnext" %}} + + +### クラスター管理者向け + +* [名前空間に対するデフォルトのメモリー要求と制限の構成](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/) + +* [名前空間に対するデフォルトのCPU要求と制限の構成](/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/) + +* [名前空間に対する最小および最大CPU制約の構成](/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) + +* [名前空間に対するメモリーとCPUのクォータの構成](/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/) + +* [名前空間に対するPodクォータの設定](/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace/) + +* [APIオブジェクトのクォータの設定](/docs/tasks/administer-cluster/quota-api-object/) + +### アプリケーション開発者向け + +* [コンテナとPodへのメモリーリソースの割り当て](/docs/tasks/configure-pod-container/assign-memory-resource/) + +* [コンテナとPodへのCPUリソースの割り当て](/docs/tasks/configure-pod-container/assign-cpu-resource/) + +* [PodのQoS(サービス品質)を設定](/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/ja/docs/tasks/configmap-secret/_index.md b/content/ja/docs/tasks/configmap-secret/_index.md new file mode 100755 index 0000000000000..18a8018ce568f --- /dev/null +++ b/content/ja/docs/tasks/configmap-secret/_index.md @@ -0,0 +1,6 @@ +--- +title: "Secretの管理" +weight: 28 +description: Secretを使用した機密設定データの管理 +--- + diff --git a/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md new file mode 100644 index 0000000000000..f9572ca1f4551 --- /dev/null +++ b/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -0,0 +1,183 @@ +--- +title: 設定ファイルを使用してSecretを管理する +content_type: task +weight: 20 +description: リソース設定ファイルを使用してSecretを作成する +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## 設定ファイルを作成する + +あらかじめYAMLまたはJSON形式でSecretのマニフェストを作成したうえで、オブジェクトを作成することができます。 +[Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core)リソースには、`data`と`stringData`の2つのマップが含まれています。 +`data`フィールドは任意のデータを格納するのに使用され、base64でエンコードされます。 +`stringData`フィールドは利便性のために用意されており、Secretデータをエンコードされていない文字列として提供することができます。 +`data`と`stringData`のキーは、英数字、`-`、`_`、`.`で構成されている必要があります。 + +たとえば、`data`フィールドを使用して2つの文字列をSecretに格納するには、次のように文字列をbase64に変換します: + +```shell +echo -n 'admin' | base64 +``` + +出力は次のようになります: + +``` +YWRtaW4= +``` + +```shell +echo -n '1f2d1e2e67df' | base64 +``` + +出力は次のようになります: + +``` +MWYyZDFlMmU2N2Rm +``` + +以下のようなSecret設定ファイルを記述します: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= + password: MWYyZDFlMmU2N2Rm +``` + +なお、Secretオブジェクトの名前は、有効な[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)である必要があります。 + +{{< note >}} +SecretデータのシリアライズされたJSONおよびYAMLの値は、base64文字列としてエンコードされます。 +文字列中の改行は不正で、含まれていてはなりません。 +Darwin/macOSで`base64`ユーティリティーを使用する場合、長い行を分割するために`-b`オプションを使用するのは避けるべきです。 +逆に、Linux ユーザーは、`base64` コマンドにオプション`-w 0`を追加するか、`-w`オプションが利用できない場合には、パイプライン`base64 | tr -d '\n'`を追加する*必要があります*。 +{{< /note >}} + +特定のシナリオでは、代わりに`stringData`フィールドを使用できます。 +このフィールドでは、base64エンコードされていない文字列を直接Secretに入れることができ、Secretの作成時や更新時には、その文字列がエンコードされます。 + +たとえば、設定ファイルを保存するためにSecretを使用しているアプリケーションをデプロイする際に、デプロイプロセス中に設定ファイルの一部を入力したい場合などが考えられます。 + +たとえば、次のような設定ファイルを使用しているアプリケーションの場合: + +```yaml +apiUrl: "https://my.api.com/api/v1" +username: "" +password: "" +``` + +次のような定義でSecretに格納できます: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +stringData: + config.yaml: | + apiUrl: "https://my.api.com/api/v1" + username: + password: +``` + +## Secretを作成する + +[`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands#apply)でSecretを作成します: + +```shell +kubectl apply -f ./secret.yaml +``` + +出力は次のようになります: + +``` +secret/mysecret created +``` + +## Secretを確認する + +`stringData`フィールドは、書き込み専用の便利なフィールドです。Secretを取得する際には決して出力されません。たとえば、次のようなコマンドを実行した場合: + +```shell +kubectl get secret mysecret -o yaml +``` + +出力は次のようになります: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:40:59Z + name: mysecret + namespace: default + resourceVersion: "7225" + uid: c280ad2e-e916-11e8-98f2-025000000001 +type: Opaque +data: + config.yaml: YXBpVXJsOiAiaHR0cHM6Ly9teS5hcGkuY29tL2FwaS92MSIKdXNlcm5hbWU6IHt7dXNlcm5hbWV9fQpwYXNzd29yZDoge3twYXNzd29yZH19 +``` + +`kubectl get`と`kubectl describe`コマンドはデフォルトではSecretの内容を表示しません。 +これは、Secretが不用意に他人にさらされたり、ターミナルログに保存されたりしないようにするためです。 +エンコードされたデータの実際の内容を確認するには、[Secretのデコード](/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret)を参照してください。 + +`username`などのフィールドが`data`と`stringData`の両方に指定されている場合は、`stringData`の値が使われます。 +たとえば、以下のようなSecretの定義の場合: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= +stringData: + username: administrator +``` + +結果は以下の通りです: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:46:46Z + name: mysecret + namespace: default + resourceVersion: "7579" + uid: 91460ecb-e917-11e8-98f2-025000000001 +type: Opaque +data: + username: YWRtaW5pc3RyYXRvcg== +``` + +`YWRtaW5pc3RyYXRvcg==`をデコードすると`administrator`となります。 + +## クリーンアップ + +作成したSecretを削除するには次のコマンドを実行します: + +```shell +kubectl delete secret mysecret +``` + +## {{% heading "whatsnext" %}} + +- [Secretのコンセプト](/ja/docs/concepts/configuration/secret/)を読む +- [kubectlを使用してSecretを管理する](/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl/)方法を知る +- [kustomizeを使用してSecretを管理する](/docs/tasks/configmap-secret/managing-secret-using-kustomize/)方法を知る diff --git a/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md new file mode 100644 index 0000000000000..9e498de8acbba --- /dev/null +++ b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -0,0 +1,146 @@ +--- +title: kubectlを使用してSecretを管理する +content_type: task +weight: 10 +description: kubectlコマンドラインを使用してSecretを作成する +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## Secretを作成する + +`Secret`はデータベースにアクセスするためにPodが必要とするユーザー資格情報を含めることができます。 +たとえば、データベース接続文字列はユーザー名とパスワードで構成されます。 +ユーザー名はローカルマシンの`./username.txt`に、パスワードは`./password.txt`に保存します。 + +```shell +echo -n 'admin' > ./username.txt +echo -n '1f2d1e2e67df' > ./password.txt +``` + +上記の2つのコマンドの`-n`フラグは、生成されたファイルにテキスト末尾の余分な改行文字が含まれないようにします。 +`kubectl`がファイルを読み取り、内容をbase64文字列にエンコードすると、余分な改行文字もエンコードされるため、これは重要です。 + +`kubectl create secret`コマンドはこれらのファイルをSecretにパッケージ化し、APIサーバー上にオブジェクトを作成します。 + +```shell +kubectl create secret generic db-user-pass \ + --from-file=./username.txt \ + --from-file=./password.txt +``` + +出力は次のようになります: + +``` +secret/db-user-pass created +``` + +ファイル名がデフォルトのキー名になります。オプションで`--from-file=[key=]source`を使用してキー名を設定できます。たとえば: + +```shell +kubectl create secret generic db-user-pass \ + --from-file=username=./username.txt \ + --from-file=password=./password.txt +``` + +`--from-file`に指定したファイルに含まれるパスワードの特殊文字をエスケープする必要はありません。 + +また、`--from-literal==`タグを使用してSecretデータを提供することもできます。 +このタグは、複数のキーと値のペアを提供するために複数回指定することができます。 +`$`、`\`、`*`、`=`、`!`などの特殊文字は[シェル](https://en.wikipedia.org/wiki/Shell_(computing))によって解釈されるため、エスケープを必要とすることに注意してください。 +ほとんどのシェルでは、パスワードをエスケープする最も簡単な方法は、シングルクォート(`'`)で囲むことです。 +たとえば、実際のパスワードが`S!B\*d$zDsb=`の場合、次のようにコマンドを実行します: + +```shell +kubectl create secret generic dev-db-secret \ + --from-literal=username=devuser \ + --from-literal=password='S!B\*d$zDsb=' +``` + +## Secretを検証する + +Secretが作成されたことを確認できます: + +```shell +kubectl get secrets +``` + +出力は次のようになります: + +``` +NAME TYPE DATA AGE +db-user-pass Opaque 2 51s +``` + +`Secret`の説明を参照できます: + +```shell +kubectl describe secrets/db-user-pass +``` + +出力は次のようになります: + +``` +Name: db-user-pass +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +password: 12 bytes +username: 5 bytes +``` + +`kubectl get`と`kubectl describe`コマンドはデフォルトでは`Secret`の内容を表示しません。 +これは、`Secret`が不用意に他人にさらされたり、ターミナルログに保存されたりしないようにするためです。 + +## Secretをデコードする {#decoding-secret} + +先ほど作成したSecretの内容を見るには、以下のコマンドを実行します: + +```shell +kubectl get secret db-user-pass -o jsonpath='{.data}' +``` + +出力は次のようになります: + +```json +{"password.txt":"MWYyZDFlMmU2N2Rm","username.txt":"YWRtaW4="} +``` + +`password.txt`のデータをデコードします: + +```shell +echo 'MWYyZDFlMmU2N2Rm' | base64 --decode +``` + +出力は次のようになります: + +``` +1f2d1e2e67df +``` + +## クリーンアップ + +作成したSecretを削除するには次のコマンドを実行します: + +```shell +kubectl delete secret db-user-pass +``` + + + +## {{% heading "whatsnext" %}} + +- [Secretのコンセプト](/ja/docs/concepts/configuration/secret/)を読む +- [設定ファイルを使用してSecretを管理する](/ja/docs/tasks/configmap-secret/managing-secret-using-config-file/)方法を知る +- [kustomizeを使用してSecretを管理する](/docs/tasks/configmap-secret/managing-secret-using-kustomize/)方法を知る diff --git a/content/ja/docs/tasks/debug-application-cluster/debug-service.md b/content/ja/docs/tasks/debug-application-cluster/debug-service.md index 5daefb2f59f1a..fa8ce7109bbba 100644 --- a/content/ja/docs/tasks/debug-application-cluster/debug-service.md +++ b/content/ja/docs/tasks/debug-application-cluster/debug-service.md @@ -365,7 +365,7 @@ kubectl get service hostnames -o json kubectl get pods -l app=hostnames ``` ```none -NAME READY STATUS RESTARTS AGE +NAME READY STATUS RESTARTS AGE hostnames-632524106-bbpiw 1/1 Running 0 1h hostnames-632524106-ly40y 1/1 Running 0 1h hostnames-632524106-tlaok 1/1 Running 0 1h diff --git a/content/ja/docs/tasks/job/_index.md b/content/ja/docs/tasks/job/_index.md new file mode 100644 index 0000000000000..bde073017bc07 --- /dev/null +++ b/content/ja/docs/tasks/job/_index.md @@ -0,0 +1,6 @@ +--- +title: "Jobの実行" +description: 並列処理を使用してJobを実行します。 +weight: 50 +--- + diff --git a/content/ja/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/ja/docs/tasks/job/automated-tasks-with-cron-jobs.md new file mode 100644 index 0000000000000..17855a66d6067 --- /dev/null +++ b/content/ja/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -0,0 +1,175 @@ +--- +title: CronJobを使用して自動化タスクを実行する +min-kubernetes-server-version: v1.21 +content_type: task +weight: 10 +--- + + + +CronJobは、Kubernetes v1.21で一般利用(GA)に昇格しました。古いバージョンのKubernetesを使用している場合、正確な情報を参照できるように、使用しているバージョンのKubernetesのドキュメントを参照してください。古いKubernetesのバージョンでは、`batch/v1` CronJob APIはサポートされていません。 + +{{< glossary_tooltip text="CronJob" term_id="cronjob" >}}を使用すると、{{< glossary_tooltip text="Job" term_id="job" >}}を時間ベースのスケジュールで実行できるようになります。この自動化されたJobは、LinuxまたはUNIXシステム上の[Cron](https://ja.wikipedia.org/wiki/Cron)のように実行されます。 + +CronJobは、バックアップやメールの送信など、定期的なタスクや繰り返しのタスクを作成する時に便利です。CronJobはそれぞれのタスクを、たとえばアクティビティが少ない期間など、特定の時間にスケジューリングすることもできます。 + +CronJobには制限と特性があります。たとえば、特定の状況下では、1つのCronJobが複数のJobを作成する可能性があるため、Jobは冪等性を持つようにしなければいけません。 + +制限に関する詳しい情報については、[CronJob](/ja/docs/concepts/workloads/controllers/cron-jobs/)を参照してください。 + +## {{% heading "prerequisites" %}} + +* {{< include "task-tutorial-prereqs.md" >}} + + + +## CronJobを作成する + +CronJobには設定ファイルが必要です。次の例のCronJobの`.spec`は、現在の時刻とhelloというメッセージを1分ごとに表示します。 + +{{< codenew file="application/job/cronjob.yaml" >}} + +次のコマンドで例のCronJobを実行します。 + +```shell +kubectl create -f https://k8s.io/examples/application/job/cronjob.yaml +``` + +出力は次のようになります。 + +``` +cronjob.batch/hello created +``` + +CronJobを作成したら、次のコマンドで状態を取得します。 + +```shell +kubectl get cronjob hello +``` + +出力は次のようになります。 + +``` +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +hello */1 * * * * False 0 10s +``` + +コマンドの結果からわかるように、CronJobはまだスケジュールされておらず、まだ何のJobも実行していません。約1分以内にJobが作成されるのを見てみましょう。 + +```shell +kubectl get jobs --watch +``` + +出力は次のようになります。 + +``` +NAME COMPLETIONS DURATION AGE +hello-4111706356 0/1 0s +hello-4111706356 0/1 0s 0s +hello-4111706356 1/1 5s 5s +``` + +"hello"CronJobによってスケジュールされたJobが1つ実行中になっていることがわかります。Jobを見るのをやめて、再度CronJobを表示して、Jobがスケジュールされたことを確認してみます。 + +```shell +kubectl get cronjob hello +``` + +出力は次のようになります。 + +``` +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +hello */1 * * * * False 0 50s 75s +``` + +CronJob`hello`が、`LAST SCHEDULE`で指定された時間にJobを正しくスケジュールしたことが確認できるはずです。現在、activeなJobの数は0です。つまり、Jobは完了または失敗したことがわかります。 + +それでは、最後にスケジュールされたJobの作成と、Podの1つの標準出力を表示してみましょう。 + +{{< note >}} +Jobの名前とPodの名前は異なります。 +{{< /note >}} + +```shell +# "hello-4111706356" の部分は、あなたのシステム上のJobの名前に置き換えてください。 +pods=$(kubectl get pods --selector=job-name=hello-4111706356 --output=jsonpath={.items[*].metadata.name}) +``` + +Podのログを表示します。 + +```shell +kubectl logs $pods +``` + +出力は次のようになります。 + +``` +Fri Feb 22 11:02:09 UTC 2019 +Hello from the Kubernetes cluster +``` + +## CronJobの削除 + +CronJobが必要なくなったときは、`kubectl delete cronjob `で削除します。 + +```shell +kubectl delete cronjob hello +``` + +CronJobを削除すると、すべてのJobと、そのJobが作成したPodが削除され、追加のJobの作成が停止されます。Jobの削除について詳しく知りたい場合は、[ガベージコレクション](/ja/docs/concepts/workloads/controllers/garbage-collection/)を読んでください。 + +## CronJobのspecを書く {#writing-a-cron-job-spec} + +すべてのKubernetesの設定と同じように、CronJobにも`apiVersion`、`kind`、`metadata`のフィールドが必要です。設定ファイルの扱い方についての一般的な情報については、[アプリケーションのデプロイ](/ja/docs/tasks/run-application/run-stateless-application-deployment/)と[kubectlを使用してリソースを管理する](/ja/docs/concepts/overview/working-with-objects/object-management/)を読んでください。 + +CronJobの設定には、[`.spec`セクション](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)も必要です。 + +{{< note >}} +CronJobの特に`spec`へのすべての修正は、それ以降の実行にのみ適用されます。 +{{< /note >}} + +### Schedule + +`.spec.schedule`は、`.spec`には必須のフィールドです。`0 * * * *`や`@hourly`などの[Cron](https://ja.wikipedia.org/wiki/Cron)形式の文字列を取り、Jobの作成と実行のスケジュール時間を指定します。 + +フォーマットにはVixie cronのステップ値(step value)も指定できます。[FreeBSDのマニュアル](https://www.freebsd.org/cgi/man.cgi?crontab%285%29)では次のように説明されています。 + +> ステップ値は範囲指定と組み合わせて使用できます。範囲の後ろに`/`を付けると、範囲全体で指定したnumberの値ごとにスキップすることを意味します。たとえば、`0-23/2`をhoursフィールドに指定すると、2時間毎にコマンド実行を指定することになります(V7標準では代わりに`0,2,4,6,8,10,12,14,16,18,20,22`と指定する必要があります)。ステップはアスタリスクの後ろにつけることもできます。そのため、「2時間毎に実行」したい場合は、単純に`*/2`と指定できます。 + +{{< note >}} +スケジュール内の疑問符`?`はアスタリスク`*`と同じ意味を持ちます。つまり、与えられたフィールドには任意の値が使えるという意味になります。 +{{< /note >}} + +### Job Template + +`.spec.jobTemplate`はJobのテンプレートであり、必須です。[Job](/docs/concepts/workloads/controllers/job/)と完全に同一のスキーマを持ちますが、フィールドがネストされている点と、`apiVersion`と`kind`が存在しない点だけが異なります。Jobの`.spec`を書くための情報については、[JobのSpecを書く](/docs/concepts/workloads/controllers/job/#writing-a-job-spec)を参照してください。 + +### Starting Deadline + +`.spec.startingDeadlineSeconds`フィールドはオプションです。何かの理由でスケジュールに間に合わなかった場合に適用される、Jobの開始のデッドライン(締め切り)を秒数で指定します。デッドラインを過ぎると、CronJobはJobを開始しません。この場合にデッドラインに間に合わなかったJobは、失敗したJobとしてカウントされます。もしこのフィールドが指定されなかった場合、Jobはデッドラインを持ちません。 + +`.spec.startingDeadlineSeconds`フィールドがnull以外に設定された場合、CronJobコントローラーはJobの作成が期待される時間と現在時刻との間の時間を計測します。もしその差が制限よりも大きかった場合、その実行はスキップされます。 + +たとえば、この値が`200`に設定された場合、実際のスケジュールの最大200秒後までに作成されるJobだけが許可されます。 + +### Concurrency Policy + +`.spec.concurrencyPolicy`フィールドもオプションです。このフィールドは、このCronJobで作成されたJobの並列実行をどのように扱うかを指定します。specには以下のconcurrency policyのいずれかを指定します。 + +* `Allow` (デフォルト): CronJobがJobを並列に実行することを許可します。 +* `Forbid`: CronJobの並列実行を禁止します。もし新しいJobの実行時に過去のJobがまだ完了していなかった場合、CronJobは新しいJobの実行をスキップします。 +* `Replace`: もし新しいJobの実行の時間になっても過去のJobの実行が完了していなかった場合、CronJobは現在の実行中のJobを新しいJobで置換します。 + +concurrency policyは、同じCronJobが作成したJobにのみ適用されます。もし複数のCronJobがある場合、それぞれのJobの並列実行は常に許可されます。 + +### Suspend + +`.spec.suspend`フィールドもオプションです。このフィールドを`true`に設定すると、すべての後続の実行がサスペンド(一時停止)されます。この設定はすでに実行開始したJobには適用されません。デフォルトはfalseです。 + +{{< caution >}} +スケジュールされた時間中にサスペンドされた実行は、見逃されたJob(missed job)としてカウントされます。[starting deadline](#starting-deadline)が設定されていない既存のCronJob`.spec.suspend`が`true`から`false`に変更されると、見逃されたJobは即座にスケジュールされます。 +{{< /caution >}} + +### Job History Limit + +`.spec.successfulJobsHistoryLimit`と`.spec.failedJobsHistoryLimit`フィールドはオプションです。これらのフィールドには、完了したJobと失敗したJobをいくつ保持するかを指定します。デフォルトでは、それぞれ3と1に設定されます。リミットを`0`に設定すると、対応する種類のJobを実行完了後に何も保持しなくなります。 diff --git a/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md b/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md index 83f7b52c8573b..ac20713a8d745 100644 --- a/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md +++ b/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md @@ -33,7 +33,7 @@ kubectl delete pods 上記がグレースフルターミネーションにつながるためには、`pod.Spec.TerminationGracePeriodSeconds`に0を指定しては**いけません**。`pod.Spec.TerminationGracePeriodSeconds`を0秒に設定することは安全ではなく、StatefulSet Podには強くお勧めできません。グレースフル削除は安全で、kubeletがapiserverから名前を削除する前にPodが[適切にシャットダウンする](/ja/docs/concepts/workloads/pods/pod-lifecycle/#termination-of-pods)ことを保証します。 -Kubernetes(バージョン1.5以降)は、Nodeにアクセスできないという理由だけでPodを削除しません。到達不能なNodeで実行されているPodは、[タイムアウト](/docs/concepts/architecture/nodes/#node-condition)の後に`Terminating`または`Unknown`状態になります。到達不能なNode上のPodをユーザーが適切に削除しようとすると、Podはこれらの状態に入ることもあります。そのような状態のPodをapiserverから削除することができる唯一の方法は以下の通りです: +Kubernetes(バージョン1.5以降)は、Nodeにアクセスできないという理由だけでPodを削除しません。到達不能なNodeで実行されているPodは、[タイムアウト](/ja/docs/concepts/architecture/nodes/#condition)の後に`Terminating`または`Unknown`状態になります。到達不能なNode上のPodをユーザーが適切に削除しようとすると、Podはこれらの状態に入ることもあります。そのような状態のPodをapiserverから削除することができる唯一の方法は以下の通りです: * (ユーザーまたは[Node Controller](/ja/docs/concepts/architecture/nodes/)によって)Nodeオブジェクトが削除されます。 * 応答していないNodeのkubeletが応答を開始し、Podを終了してapiserverからエントリーを削除します。 @@ -76,4 +76,3 @@ StatefulSet Podの強制削除は、常に慎重に、関連するリスクを [StatefulSetのデバッグ](/docs/tasks/debug-application-cluster/debug-stateful-set/)の詳細 - diff --git a/content/ja/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/ja/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md new file mode 100644 index 0000000000000..a395a412ba59e --- /dev/null +++ b/content/ja/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -0,0 +1,403 @@ +--- +title: Horizontal Pod Autoscalerウォークスルー +content_type: task +weight: 100 +--- + + + +Horizontal Pod Autoscalerは、Deployment、ReplicaSetまたはStatefulSetといったレプリケーションコントローラ内のPodの数を、観測されたCPU使用率(もしくはベータサポートの、アプリケーションによって提供されるその他のメトリクス)に基づいて自動的にスケールさせます。 + +このドキュメントはphp-apacheサーバーに対しHorizontal Pod Autoscalerを有効化するという例に沿ってウォークスルーで説明していきます。Horizontal Pod Autoscalerの動作についてのより詳細な情報を知りたい場合は、[Horizontal Pod Autoscalerユーザーガイド](/docs/tasks/run-application/horizontal-pod-autoscale/)をご覧ください。 + +## {{% heading "prerequisites" %}} + +この例ではバージョン1.2以上の動作するKubernetesクラスターおよびkubectlが必要です。 +[Metrics API](https://github.com/kubernetes/metrics)を介してメトリクスを提供するために、[Metrics server](https://github.com/kubernetes-sigs/metrics-server)によるモニタリングがクラスター内にデプロイされている必要があります。 +Horizontal Pod Autoscalerはメトリクスを収集するためにこのAPIを利用します。metrics-serverをデプロイする方法を知りたい場合は[metrics-server ドキュメント](https://github.com/kubernetes-sigs/metrics-server#deployment)をご覧ください。 + +Horizontal Pod Autoscalerで複数のリソースメトリクスを利用するためには、バージョン1.6以上のKubernetesクラスターおよびkubectlが必要です。カスタムメトリクスを使えるようにするためには、あなたのクラスターがカスタムメトリクスAPIを提供するAPIサーバーと通信できる必要があります。 +最後に、Kubernetesオブジェクトと関係のないメトリクスを使うにはバージョン1.10以上のKubernetesクラスターおよびkubectlが必要で、さらにあなたのクラスターが外部メトリクスAPIを提供するAPIサーバーと通信できる必要があります。 +詳細については[Horizontal Pod Autoscaler user guide](/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics)をご覧ください。 + + + +## php-apacheの起動と公開 + +Horizontal Pod Autoscalerのデモンストレーションのために、php-apacheイメージをもとにしたカスタムのDockerイメージを使います。 +このDockerfileは下記のようになっています。 + +```dockerfile +FROM php:5-apache +COPY index.php /var/www/html/index.php +RUN chmod a+rx index.php +``` + +これはCPU負荷の高い演算を行うindex.phpを定義しています。 + +```php + +``` + +まず最初に、イメージを動かすDeploymentを起動し、Serviceとして公開しましょう。 +下記の設定を使います。 + +{{< codenew file="application/php-apache.yaml" >}} + +以下のコマンドを実行してください。 + +```shell +kubectl apply -f https://k8s.io/examples/application/php-apache.yaml +``` + +``` +deployment.apps/php-apache created +service/php-apache created +``` + +## Horizontal Pod Autoscalerを作成する + +サーバーが起動したら、[kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands#autoscale)を使ってautoscalerを作成しましょう。以下のコマンドで、最初のステップで作成したphp-apache deploymentによって制御されるPodレプリカ数を1から10の間に維持するHorizontal Pod Autoscalerを作成します。 +簡単に言うと、HPAは(Deploymentを通じて)レプリカ数を増減させ、すべてのPodにおける平均CPU使用率を50%(それぞれのPodは`kubectl run`で200 milli-coresを要求しているため、平均CPU使用率100 milli-coresを意味します)に保とうとします。 +このアルゴリズムについての詳細は[こちら](/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details)をご覧ください。 + +```shell +kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10 +``` + +``` +horizontalpodautoscaler.autoscaling/php-apache autoscaled +``` + +以下を実行して現在のAutoscalerの状況を確認できます。 + +```shell +kubectl get hpa +``` + +``` +NAME REFERENCE TARGET MINPODS MAXPODS REPLICAS AGE +php-apache Deployment/php-apache/scale 0% / 50% 1 10 1 18s +``` + +現在はサーバーにリクエストを送っていないため、CPU使用率が0%になっていることに注意してください(`TARGET`カラムは対応するDeploymentによって制御される全てのPodの平均値を示しています。)。 + +## 負荷の増加 + +Autoscalerがどのように負荷の増加に反応するか見てみましょう。 +コンテナを作成し、クエリの無限ループをphp-apacheサーバーに送ってみます(これは別のターミナルで実行してください)。 + +```shell +kubectl run -i --tty load-generator --rm --image=busybox --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://php-apache; done" +``` + +数分以内に、下記を実行することでCPU負荷が高まっていることを確認できます。 + +```shell +kubectl get hpa +``` + +``` +NAME REFERENCE TARGET MINPODS MAXPODS REPLICAS AGE +php-apache Deployment/php-apache/scale 305% / 50% 1 10 1 3m +``` + +ここでは、CPU使用率はrequestの305%にまで高まっています。 +結果として、Deploymentはレプリカ数7にリサイズされました。 + +```shell +kubectl get deployment php-apache +``` + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +php-apache 7/7 7 7 19m +``` + +{{< note >}} +レプリカ数が安定するまでは数分かかることがあります。負荷量は何らかの方法で制御されているわけではないので、最終的なレプリカ数はこの例とは異なる場合があります。 +{{< /note >}} + +## 負荷の停止 + +ユーザー負荷を止めてこの例を終わらせましょう。 + +私たちが`busybox`イメージを使って作成したコンテナ内のターミナルで、` + C`を入力して負荷生成を終了させます。 + +そして結果の状態を確認します(数分後)。 + +```shell +kubectl get hpa +``` + +``` +NAME REFERENCE TARGET MINPODS MAXPODS REPLICAS AGE +php-apache Deployment/php-apache/scale 0% / 50% 1 10 1 11m +``` + +```shell +kubectl get deployment php-apache +``` + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +php-apache 1/1 1 1 27m +``` + +ここでCPU使用率は0に下がり、HPAによってオートスケールされたレプリカ数は1に戻ります。 + +{{< note >}} +レプリカのオートスケールには数分かかることがあります。 +{{< /note >}} + + + +## 複数のメトリクスやカスタムメトリクスを基にオートスケーリングする + +`autoscaling/v2beta2` APIバージョンと使うと、`php-apache` Deploymentをオートスケーリングする際に使う追加のメトリクスを導入することが出来ます。 + +まず、`autoscaling/v2beta2`内のHorizontalPodAutoscalerのYAMLファイルを入手します。 + +```shell +kubectl get hpa.v2beta2.autoscaling -o yaml > /tmp/hpa-v2.yaml +``` + +`/tmp/hpa-v2.yaml`ファイルをエディタで開くと、以下のようなYAMLファイルが見えるはずです。 + +```yaml +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: php-apache + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 +status: + observedGeneration: 1 + lastScaleTime: + currentReplicas: 1 + desiredReplicas: 1 + currentMetrics: + - type: Resource + resource: + name: cpu + current: + averageUtilization: 0 + averageValue: 0 +``` + +`targetCPUUtilizationPercentage`フィールドは`metrics`と呼ばれる配列に置換されています。 +CPU使用率メトリクスは、Podコンテナで定められたリソースの割合として表されるため、*リソースメトリクス*です。CPU以外のリソースメトリクスを指定することもできます。デフォルトでは、他にメモリだけがリソースメトリクスとしてサポートされています。これらのリソースはクラスター間で名前が変わることはなく、そして`metrics.k8s.io` APIが利用可能である限り常に利用可能です。 + +さらに`target.type`において`Utilization`の代わりに`AverageValue`を使い、`target.averageUtilization`フィールドの代わりに対応する`target.averageValue`フィールドを設定することで、リソースメトリクスをrequest値に対する割合に代わり、直接的な値に設定することも可能です。 + +PodメトリクスとObjectメトリクスという2つの異なる種類のメトリクスが存在し、どちらも*カスタムメトリクス*とみなされます。これらのメトリクスはクラスター特有の名前を持ち、利用するにはより発展的なクラスター監視設定が必要となります。 + +これらの代替メトリクスタイプのうち、最初のものが*Podメトリクス*です。これらのメトリクスはPodを説明し、Podを渡って平均され、レプリカ数を決定するためにターゲット値と比較されます。 +これらはほとんどリソースメトリクス同様に機能しますが、`target`の種類としては`AverageValue`*のみ*をサポートしている点が異なります。 + +Podメトリクスはmetricブロックを使って以下のように指定されます。 + +```yaml +type: Pods +pods: + metric: + name: packets-per-second + target: + type: AverageValue + averageValue: 1k +``` + +2つ目のメトリクスタイプは*Objectメトリクス*です。これらのメトリクスはPodを説明するかわりに、同一Namespace内の異なったオブジェクトを説明します。このメトリクスはオブジェクトから取得される必要はありません。単に説明するだけです。Objectメトリクスは`target`の種類として`Value`と`AverageValue`をサポートします。`Value`では、ターゲットはAPIから返ってきたメトリクスと直接比較されます。`AverageValue`では、カスタムメトリクスAPIから返ってきた値はターゲットと比較される前にPodの数で除算されます。以下の例は`requests-per-second`メトリクスのYAML表現です。 + +```yaml +type: Object +object: + metric: + name: requests-per-second + describedObject: + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + name: main-route + target: + type: Value + value: 2k +``` + +もしこのようなmetricブロックを複数提供した場合、HorizontalPodAutoscalerはこれらのメトリクスを順番に処理します。 +HorizontalPodAutoscalerはそれぞれのメトリクスについて推奨レプリカ数を算出し、その中で最も多いレプリカ数を採用します。 + +例えば、もしあなたがネットワークトラフィックについてのメトリクスを収集する監視システムを持っているなら、`kubectl edit`を使って指定を次のように更新することができます。 + +```yaml +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: php-apache + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + - type: Pods + pods: + metric: + name: packets-per-second + target: + type: AverageValue + averageValue: 1k + - type: Object + object: + metric: + name: requests-per-second + describedObject: + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + name: main-route + target: + type: Value + value: 10k +status: + observedGeneration: 1 + lastScaleTime: + currentReplicas: 1 + desiredReplicas: 1 + currentMetrics: + - type: Resource + resource: + name: cpu + current: + averageUtilization: 0 + averageValue: 0 + - type: Object + object: + metric: + name: requests-per-second + describedObject: + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + name: main-route + current: + value: 10k +``` + +この時、HorizontalPodAutoscalerはそれぞれのPodがCPU requestの50%を使い、1秒当たり1000パケットを送信し、そしてmain-route +Ingressの裏にあるすべてのPodが合計で1秒当たり10000パケットを送信する状態を保持しようとします。 + +### より詳細なメトリクスをもとにオートスケーリングする + +多くのメトリクスパイプラインは、名前もしくは _labels_ と呼ばれる追加の記述子の組み合わせによって説明することができます。全てのリソースメトリクス以外のメトリクスタイプ(Pod、Object、そして下で説明されている外部メトリクス)において、メトリクスパイプラインに渡す追加のラベルセレクターを指定することができます。例えば、もしあなたが`http_requests`メトリクスを`verb`ラベルとともに収集しているなら、下記のmetricブロックを指定してGETリクエストにのみ基づいてスケールさせることができます。 + +```yaml +type: Object +object: + metric: + name: http_requests + selector: {matchLabels: {verb: GET}} +``` + +このセレクターは完全なKubernetesラベルセレクターと同じ文法を利用します。もし名前とセレクターが複数の系列に一致した場合、この監視パイプラインはどのようにして複数の系列を一つの値にまとめるかを決定します。このセレクターは付加的なもので、ターゲットオブジェクト(`Pods`タイプの場合は対象Pod、`Object`タイプの場合は説明されるオブジェクト)では**ない**オブジェクトを説明するメトリクスを選択することは出来ません。 + +### Kubernetesオブジェクトと関係ないメトリクスに基づいたオートスケーリング + +Kubernetes上で動いているアプリケーションを、Kubernetes Namespaceと直接的な関係がないサービスを説明するメトリクスのような、Kubernetesクラスター内のオブジェクトと明確な関係が無いメトリクスを基にオートスケールする必要があるかもしれません。Kubernetes 1.10以降では、このようなユースケースを*外部メトリクス*によって解決できます。 + +外部メトリクスを使うにはあなたの監視システムについての知識が必要となります。この設定はカスタムメトリクスを使うときのものに似ています。外部メトリクスを使うとあなたの監視システムのあらゆる利用可能なメトリクスに基づいてクラスターをオートスケールできるようになります。上記のように`metric`ブロックで`name`と`selector`を設定し、`Object`のかわりに`External`メトリクスタイプを使います。 +もし複数の時系列が`metricSelector`により一致した場合は、それらの値の合計がHorizontalPodAutoscalerに使われます。 +外部メトリクスは`Value`と`AverageValue`の両方のターゲットタイプをサポートしています。これらの機能は`Object`タイプを利用するときとまったく同じです。 + +例えばもしあなたのアプリケーションがホストされたキューサービスからのタスクを処理している場合、あなたは下記のセクションをHorizontalPodAutoscalerマニフェストに追記し、未処理のタスク30個あたり1つのワーカーを必要とすることを指定します。 + +```yaml +- type: External + external: + metric: + name: queue_messages_ready + selector: "queue=worker_tasks" + target: + type: AverageValue + averageValue: 30 +``` + +可能なら、クラスター管理者がカスタムメトリクスAPIを保護することを簡単にするため、外部メトリクスのかわりにカスタムメトリクスを用いることが望ましいです。外部メトリクスAPIは潜在的に全てのメトリクスへのアクセスを許可するため、クラスター管理者はこれを公開する際には注意が必要です。 + +## 付録: Horizontal Pod Autoscaler status conditions + +`autoscaling/v2beta2`形式のHorizontalPodAutoscalerを使っている場合は、KubernetesによるHorizontalPodAutoscaler上の*status conditions*セットを見ることができます。status conditionsはHorizontalPodAutoscalerがスケール可能かどうか、そして現時点でそれが何らかの方法で制限されているかどうかを示しています。 + +このconditionsは`status.conditions`フィールドに現れます。HorizontalPodAutoscalerに影響しているconditionsを確認するために、`kubectl describe hpa`を利用できます。 + +```shell +kubectl describe hpa cm-test +``` + +``` +Name: cm-test +Namespace: prom +Labels: +Annotations: +CreationTimestamp: Fri, 16 Jun 2017 18:09:22 +0000 +Reference: ReplicationController/cm-test +Metrics: ( current / target ) + "http_requests" on pods: 66m / 500m +Min replicas: 1 +Max replicas: 4 +ReplicationController pods: 1 current / 1 desired +Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from pods metric http_requests + ScalingLimited False DesiredWithinRange the desired replica count is within the acceptable range +Events: +``` + +このHorizontalPodAutoscalerにおいて、いくつかの正常な状態のconditionsを見ることができます。まず最初に、`AbleToScale`は、HPAがスケール状況を取得し、更新させることが出来るかどうかだけでなく、何らかのbackoffに関連した状況がスケーリングを妨げていないかを示しています。2番目に、`ScalingActive`は、HPAが有効化されているかどうか(例えば、レプリカ数のターゲットがゼロでないこと)や、望ましいスケールを算出できるかどうかを示します。もしこれが`False`の場合、大体はメトリクスの取得において問題があることを示しています。最後に、一番最後の状況である`ScalingLimited`は、HorizontalPodAutoscalerの最大値や最小値によって望ましいスケールがキャップされていることを示しています。この指標を見てHorizontalPodAutoscaler上の最大・最小レプリカ数制限を増やす、もしくは減らす検討ができます。 + +## 付録: 数量 + +全てのHorizontalPodAutoscalerおよびメトリクスAPIにおけるメトリクスは{{< glossary_tooltip term_id="quantity" text="quantity">}}として知られる特殊な整数表記によって指定されます。例えば、`10500m`という数量は10進数表記で`10.5`と書くことができます。メトリクスAPIは可能であれば接尾辞を用いない整数を返し、そうでない場合は基本的にミリ単位での数量を返します。これはメトリクス値が`1`と`1500m`の間で、もしくは10進法表記で書かれた場合は`1`と`1.5`の間で変動するということを意味します。 + +## 付録: その他の起きうるシナリオ + +### Autoscalerを宣言的に作成する + +`kubectl autoscale`コマンドを使って命令的にHorizontalPodAutoscalerを作るかわりに、下記のファイルを使って宣言的に作成することができます。 + +{{< codenew file="application/hpa/php-apache.yaml" >}} + +下記のコマンドを実行してAutoscalerを作成します。 + +```shell +kubectl create -f https://k8s.io/examples/application/hpa/php-apache.yaml +``` + +``` +horizontalpodautoscaler.autoscaling/php-apache created +``` diff --git a/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md b/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md index 963df7852408a..da7c286f89b95 100644 --- a/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md @@ -49,7 +49,6 @@ Kubernetes Deploymentオブジェクトを作成することでアプリケー 出力はこのようになります: - user@computer:~/website$ kubectl describe deployment nginx-deployment Name: nginx-deployment Namespace: default CreationTimestamp: Tue, 30 Aug 2016 18:11:37 -0700 diff --git a/content/ja/docs/tasks/tools/_index.md b/content/ja/docs/tasks/tools/_index.md index ac97313bb8455..72a8c1361fad9 100755 --- a/content/ja/docs/tasks/tools/_index.md +++ b/content/ja/docs/tasks/tools/_index.md @@ -17,7 +17,7 @@ Kubernetesのコマンドラインツール`kubectl`を使用すると、Kuberne [Minikube](https://minikube.sigs.k8s.io/)は、Kubernetesをローカルで実行するツールです。MinikubeはシングルノードのKubernetesクラスターをパーソナルコンピューター上(Windows、macOS、Linux PCを含む)で実行することで、Kubernetesを試したり、日常的な開発作業のために利用できます。 -ツールのインストールについて知りたい場合は、公式の[Get Started!](https://minikube.sigs.k8s.io/docs/start/)のガイドに従うか、[Minikubeのインストール](/ja/docs/tasks/tools/install-minikube/)を読んでください。 +ツールのインストールについて知りたい場合は、公式の[Get Started!](https://minikube.sigs.k8s.io/docs/start/)のガイドに従ってください。 Minikubeが起動したら、[サンプルアプリケーションの実行](/ja/docs/tutorials/hello-minikube/)を試すことができます。 diff --git a/content/ja/docs/tasks/tools/install-kubectl.md b/content/ja/docs/tasks/tools/install-kubectl.md index 2ab3ca50f747e..a33b4756719ed 100644 --- a/content/ja/docs/tasks/tools/install-kubectl.md +++ b/content/ja/docs/tasks/tools/install-kubectl.md @@ -502,7 +502,7 @@ compinit ## {{% heading "whatsnext" %}} -* [Minikubeをインストールする](/ja/docs/tasks/tools/install-minikube/) +* [Minikubeをインストールする](https://minikube.sigs.k8s.io/docs/start/) * クラスターの作成に関する詳細を[スタートガイド](/ja/docs/setup/)で確認する * [アプリケーションを起動して公開する方法を学ぶ](/ja/docs/tasks/access-application-cluster/service-access-application-cluster/) * あなたが作成していないクラスターにアクセスする必要がある場合は、[クラスターアクセスドキュメントの共有](/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)を参照してください diff --git a/content/ja/docs/tasks/tools/install-minikube.md b/content/ja/docs/tasks/tools/install-minikube.md deleted file mode 100644 index 730145740c120..0000000000000 --- a/content/ja/docs/tasks/tools/install-minikube.md +++ /dev/null @@ -1,267 +0,0 @@ ---- -title: Minikubeのインストール -content_type: task -weight: 20 -card: - name: tasks - weight: 10 ---- - - - -このページでは[Minikube](/ja/docs/tutorials/hello-minikube)のインストール方法を説明し、コンピューターの仮想マシン上で単一ノードのKubernetesクラスターを実行します。 - - - -## {{% heading "prerequisites" %}} - - -{{< tabs name="minikube_before_you_begin" >}} -{{% tab name="Linux" %}} -Linuxで仮想化がサポートされているかどうかを確認するには、次のコマンドを実行して、出力が空でないことを確認します: -``` -grep -E --color 'vmx|svm' /proc/cpuinfo -``` -{{% /tab %}} - -{{% tab name="macOS" %}} -仮想化がmacOSでサポートされているかどうかを確認するには、ターミナルで次のコマンドを実行します。 -``` -sysctl -a | grep -E --color 'machdep.cpu.features|VMX' -``` -出力に`VMX`が表示されている場合(色付けされているはずです)、VT-x機能がマシンで有効になっています。 -{{% /tab %}} - -{{% tab name="Windows" %}} -Windows 8以降で仮想化がサポートされているかどうかを確認するには、Windowsターミナルまたはコマンドプロンプトで次のコマンドを実行します。 -``` -systeminfo -``` -次の出力が表示される場合、仮想化はWindowsでサポートされています。 -``` -Hyper-V Requirements: VM Monitor Mode Extensions: Yes - Virtualization Enabled In Firmware: Yes - Second Level Address Translation: Yes - Data Execution Prevention Available: Yes -``` - -次の出力が表示される場合、システムにはすでにHypervisorがインストールされており、次の手順をスキップできます。 -``` -Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed. -``` - - -{{% /tab %}} -{{< /tabs >}} - - - - - -## minikubeのインストール - -{{< tabs name="tab_with_md" >}} -{{% tab name="Linux" %}} - -### kubectlのインストール - -kubectlがインストールされていることを確認してください。 -[kubectlのインストールとセットアップ](/ja/docs/tasks/tools/install-kubectl/#install-kubectl-on-linux)の指示に従ってkubectlをインストールできます。 - -### ハイパーバイザーのインストール - -ハイパーバイザーがまだインストールされていない場合は、これらのいずれかをインストールしてください: - -• [KVM](https://www.linux-kvm.org/)、ただしQEMUも使っているもの - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -Minikubeは、VMではなくホストでKubernetesコンポーネントを実行する`--driver=none`オプションもサポートしています。 -このドライバーを使用するには、[Docker](https://www.docker.com/products/docker-desktop)とLinux環境が必要ですが、ハイパーバイザーは不要です。 - -Debian系のLinuxで`none`ドライバーを使用する場合は、snapパッケージではなく`.deb`パッケージを使用してDockerをインストールしてください。snapパッケージはMinikubeでは機能しません。 -[Docker](https://www.docker.com/products/docker-desktop)から`.deb`パッケージをダウンロードできます。 - -{{< caution >}} -`none` VMドライバーは、セキュリティとデータ損失の問題を引き起こす可能性があります。 -`--driver=none`を使用する前に、詳細について[このドキュメント](https://minikube.sigs.k8s.io/docs/reference/drivers/none/) を参照してください。 -{{< /caution >}} - -MinikubeはDockerドライバーと似たような`vm-driver=podman`もサポートしています。Podmanを特権ユーザー権限(root user)で実行することは、コンテナがシステム上の利用可能な機能へ完全にアクセスするための最もよい方法です。 - -{{< caution >}} -`podman` ドライバーは、rootでコンテナを実行する必要があります。これは、通常のユーザーアカウントが、コンテナの実行に必要とされるすべてのOS機能への完全なアクセスを持っていないためです。 -{{< /caution >}} - -### パッケージを利用したMinikubeのインストール - -Minikubeの*Experimental*パッケージが利用可能です。 -GitHubのMinikubeの[リリース](https://github.com/kubernetes/minikube/releases)ページからLinux(AMD64)パッケージを見つけることができます。 - -Linuxのディストリビューションのパッケージツールを使用して、適切なパッケージをインストールしてください。 - -### 直接ダウンロードによるMinikubeのインストール - -パッケージ経由でインストールしない場合は、スタンドアロンバイナリをダウンロードして使用できます。 - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ - && chmod +x minikube -``` - -Minikube実行可能バイナリをパスに追加する簡単な方法を次に示します: - -```shell -sudo mkdir -p /usr/local/bin/ -sudo install minikube /usr/local/bin/ -``` - -### Homebrewを利用したMinikubeのインストール - -別の選択肢として、Linux [Homebrew](https://docs.brew.sh/Homebrew-on-Linux)を利用してインストールできます。 - -```shell -brew install minikube -``` - -{{% /tab %}} -{{% tab name="macOS" %}} -### kubectlのインストール - -kubectlがインストールされていることを確認してください。 -[kubectlのインストールとセットアップ](/ja/docs/tasks/tools/install-kubectl/#install-kubectl-on-macos)の指示に従ってkubectlをインストールできます。 - -### ハイパーバイザーのインストール - -ハイパーバイザーがまだインストールされていない場合は、これらのいずれかをインストールしてください: - -• [HyperKit](https://github.com/moby/hyperkit) - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -• [VMware Fusion](https://www.vmware.com/products/fusion) - -### Minikubeのインストール -[Homebrew](https://brew.sh)を使うことでmacOSにMinikubeを簡単にインストールできます: - -```shell -brew install minikube -``` - -スタンドアロンのバイナリをダウンロードして、macOSにインストールすることもできます: - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 \ - && chmod +x minikube -``` - -Minikube実行可能バイナリをパスに追加する簡単な方法を次に示します: - -```shell -sudo mv minikube /usr/local/bin -``` - -{{% /tab %}} -{{% tab name="Windows" %}} -### kubectlのインストール - -kubectlがインストールされていることを確認してください。 -[kubectlのインストールとセットアップ](/ja/docs/tasks/tools/install-kubectl/#install-kubectl-on-windows)の指示に従ってkubectlをインストールできます。 - -### ハイパーバイザーのインストール - -ハイパーバイザーがまだインストールされていない場合は、これらのいずれかをインストールしてください: - -• [Hyper-V](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -{{< note >}} -Hyper-Vは、Windows 10 Enterprise、Windows 10 Professional、Windows 10 Educationの3つのバージョンのWindows 10で実行できます。 -{{< /note >}} - -### Chocolateyを使用したMinikubeのインストール - -[Chocolatey](https://chocolatey.org/)を使うことでWindowsにMinikubeを簡単にインストールできます(管理者権限で実行する必要があります)。 - -```shell -choco install minikube -``` - -Minikubeのインストールが終わったら、現在のCLIのセッションを終了して再起動します。Minikubeは自動的にパスに追加されます。 - -### インストーラーを使用したMinikubeのインストール - -[Windowsインストーラー](https://docs.microsoft.com/en-us/windows/desktop/msi/windows-installer-portal)を使用してWindowsにMinikubeを手動でインストールするには、[`minikube-installer.exe`](https://github.com/kubernetes/minikube/releases/latest/download/minikube-installer.exe)をダウンロードしてインストーラーを実行します。 - -### 直接ダウンロードによるMinikubeのインストール - -WindowsにMinikubeを手動でインストールするには、[`minikube-windows-amd64`](https://github.com/kubernetes/minikube/releases/latest)をダウンロードし、名前を`minikube.exe`に変更して、パスに追加します。 - -{{% /tab %}} -{{< /tabs >}} - - - -## インストールの確認 - -ハイパーバイザーとMinikube両方のインストール成功を確認するため、以下のコマンドをローカルKubernetesクラスターを起動するために実行してください: - -{{< note >}} - -`minikube start`で`--driver`の設定をするため、次の``の部分では、インストールしたハイパーバイザーの名前を小文字で入力してください。`--driver`値のすべてのリストは、[specifying the VM driver documentation](/docs/setup/learning-environment/minikube/#specifying-the-vm-driver)で確認できます。 - -{{< /note >}} - -{{< caution >}} -KVMを使用する場合、Debianおよび他の一部のシステムでのlibvirtのデフォルトのQEMU URIは`qemu:///session`であるのに対し、MinikubeのデフォルトのQEMU URIは`qemu:///system`であることに注意してください。これがあなたのシステムに当てはまる場合、`--kvm-qemu-uri qemu:///session`を`minikube start`に渡す必要があります。 -{{< /caution >}} - -```shell -minikube start --driver= -``` - -`minikube start`が完了した場合、次のコマンドを実行してクラスターの状態を確認します。 - -```shell -minikube status -``` - -クラスターが起動していると、`minikube status`の出力はこのようになります。 - -``` -host: Running -kubelet: Running -apiserver: Running -kubeconfig: Configured -``` - -選択したハイパーバイザーでMinikubeが動作しているか確認した後は、そのままMinikubeを使い続けることもできます。また、クラスターを停止することもできます。クラスターを停止するためには、次を実行してください。 - -```shell -minikube stop -``` - -## ローカル状態のクリーンアップ {#cleanup-local-state} - -もし以前に Minikubeをインストールしていたら、以下のコマンドを実行します。 -```shell -minikube start -``` - -`minikube start`はエラーを返します。 -```shell -machine does not exist -``` - -minikubeのローカル状態をクリアする必要があります: -```shell -minikube delete -``` - - -## {{% heading "whatsnext" %}} - - -* [Minikubeを使ってローカルでKubernetesを実行する](/ja/docs/setup/learning-environment/minikube/) - diff --git a/content/ja/docs/tutorials/clusters/_index.md b/content/ja/docs/tutorials/clusters/_index.md new file mode 100755 index 0000000000000..0f3a08fdda0ab --- /dev/null +++ b/content/ja/docs/tutorials/clusters/_index.md @@ -0,0 +1,4 @@ +--- +title: "クラスター" +weight: 60 +--- diff --git a/content/ja/docs/tutorials/clusters/apparmor.md b/content/ja/docs/tutorials/clusters/apparmor.md new file mode 100644 index 0000000000000..5ee0201d71bcc --- /dev/null +++ b/content/ja/docs/tutorials/clusters/apparmor.md @@ -0,0 +1,375 @@ +--- +title: AppArmorを使用してコンテナのリソースへのアクセスを制限する +content_type: tutorial +weight: 10 +--- + + + +{{< feature-state for_k8s_version="v1.4" state="beta" >}} + +AppArmorは、Linux標準のユーザー・グループをベースとしたパーミッションを補完するLinuxカーネルのセキュリティモジュールであり、プログラムのアクセスを限定されたリソースセットに制限するために利用されます。AppArmorを設定することで、任意のアプリケーションの攻撃サーフェイスとなりうる面を減らしたり、より優れた多重の防御を提供できます。AppArmorは、たとえばLinuxのcapability、ネットワークアクセス、ファイルのパーミッションなど、特定のプログラムやコンテナに必要なアクセスを許可するようにチューニングされたプロファイルにより設定を行います。各プロファイルは、許可されなかったリソースへのアクセスをブロックする*enforcing*モードと、ルール違反を報告するだけの*complain*モードのいずれかで実行できます。 + +AppArmorを利用すれば、コンテナに許可することを制限したりシステムログを通してよりよい監査を提供することで、デプロイをよりセキュアにする助けになります。しかし、AppArmorは銀の弾丸ではなく、アプリケーションコードの悪用からの防御を強化できるだけであることを心に留めておくことが重要です。制限の強い優れたプロファイルを提供し、アプリケーションとクラスターを別の角度から強化することが重要です。 + +## {{% heading "objectives" %}} + +* プロファイルをノードに読み込む方法の例を見る +* Pod上でプロファイルを矯正する方法を学ぶ +* プロファイルが読み込まれたかを確認する方法を学ぶ +* プロファイルに違反した場合に何が起こるのかを見る +* プロファイルが読み込めなかった場合に何が起こるのかを見る + +## {{% heading "prerequisites" %}} + +以下のことを確認してください。 + +1. Kubernetesのバージョンがv1.4以上であること。KubernetesのAppArmorのサポートはv1.4で追加されました。v1.4より古いバージョンのKubernetesのコンポーネントは、新しいAppArmorのアノテーションを認識できないため、AppArmorの設定を与えたとしても**黙って無視されてしまいます**。Podが期待した保護を確実に受けられるようにするためには、次のようにノードのKubeletのバージョンを確認することが重要です。 + + ```shell + kubectl get nodes -o=jsonpath=$'{range .items[*]}{@.metadata.name}: {@.status.nodeInfo.kubeletVersion}\n{end}' + ``` + ``` + gke-test-default-pool-239f5d02-gyn2: v1.4.0 + gke-test-default-pool-239f5d02-x1kf: v1.4.0 + gke-test-default-pool-239f5d02-xwux: v1.4.0 + ``` + +2. AppArmorカーネルモジュールが有効であること。LinuxカーネルがAppArmorプロファイルを強制するためには、AppArmorカーネルモジュールのインストールと有効化が必須です。UbuntuやSUSEなどのディストリビューションではデフォルトで有効化されますが、他の多くのディストリビューションでのサポートはオプションです。モジュールが有効になっているかチェックするには、次のように`/sys/module/apparmor/parameters/enabled`ファイルを確認します。 + + ```shell + cat /sys/module/apparmor/parameters/enabled + Y + ``` + + KubeletがAppArmorをサポートしていれば(>= v1.4)、カーネルモジュールが有効になっていない場合にはAppArmorオプションが付いたPodを拒否します。 + + {{< note >}} + UbuntuはAppArmorに対して、アップストリームのLinuxにマージしていない多数のパッチを当てています。その中には、追加のフックや機能を加えるパッチも含まれます。Kubernetesはアップストリームのバージョンでのみテストされており、その他の機能に対するサポートを約束していません。 + {{< /note >}} + +3. コンテナランタイムがAppArmorをサポートしていること。現在、Kubernetesがサポートするすべての一般的なコンテナランタイム、{{< glossary_tooltip term_id="docker">}}、{{< glossary_tooltip term_id="cri-o" >}}、{{< glossary_tooltip term_id="containerd" >}}などは、AppArmorをサポートしています。関連するランタイムのドキュメントを参照して、クラスターがAppArmorを利用するための要求を満たしているかどうかを検証してください。 + +4. プロファイルが読み込まれていること。AppArmorがPodに適用されるのは、各コンテナが実行されるべきAppArmorプロファイルを指定したときです。もし指定されたプロファイルがまだカーネルに読み込まれていなければ、Kubelet(>= v1.4)はPodを拒否します。どのプロファイルがノードに読み込まれているのかを確かめるには、次のようなコマンドを実行して`/sys/kernel/security/apparmor/profiles`をチェックします。 + + ```shell + ssh gke-test-default-pool-239f5d02-gyn2 "sudo cat /sys/kernel/security/apparmor/profiles | sort" + ``` + ``` + apparmor-test-deny-write (enforce) + apparmor-test-audit-write (enforce) + docker-default (enforce) + k8s-nginx (enforce) + ``` + + ノード上でのプロファイルの読み込みの詳細については、[プロファイルを使用したノードのセットアップ](#setting-up-nodes-with-profiles)を参照してください。 + +KubeletのバージョンがAppArmorサポートに対応しているもの(>= v1.4)である限り、Kubeletは必要条件を1つでも満たさないAppArmorオプションが付けられたPodをリジェクトします。また、ノード上のAppArmorのサポートは、次のようにready conditionのメッセージで確認することもできます(ただし、この機能は将来のリリースで削除される可能性があります)。 + +```shell +kubectl get nodes -o=jsonpath=$'{range .items[*]}{@.metadata.name}: {.status.conditions[?(@.reason=="KubeletReady")].message}\n{end}' +``` +``` +gke-test-default-pool-239f5d02-gyn2: kubelet is posting ready status. AppArmor enabled +gke-test-default-pool-239f5d02-x1kf: kubelet is posting ready status. AppArmor enabled +gke-test-default-pool-239f5d02-xwux: kubelet is posting ready status. AppArmor enabled +``` + + + +## Podをセキュアにする + +{{< note >}} +AppArmorは現在beta版であるため、オプションはアノテーションとして指定します。将来サポートが一般利用可能(GA)になれば、アノテーションは第1級のフィールドで置き換えられます(詳細については、[一般利用可能(General Availability)への更新パス](#upgrade-path-to-general-availability)を参照してください)。 +{{< /note >}} + +AppArmorのプロファイルは*各コンテナごとに*指定します。Podのコンテナで実行するAppArmorのプロファイルを指定するには、Podのメタデータに次のようなアノテーションを追加します。 + +```yaml +container.apparmor.security.beta.kubernetes.io/: +``` + +ここで、``はプロファイルを適用するコンテナの名前であり、``には適用するプロファイルを指定します。`profile_ref`は次の値のうち1つを指定します。 + +* `runtime/default`: ランタイムのデフォルトのプロファイルを適用する +* `localhost/`: ``という名前でホストにロードされたプロファイルを適用する +* `unconfined`: いかなるプロファイルもロードされないことを示す + +アノテーションとプロファイルの名前のフォーマットの詳細については、[APIリファレンス](#api-reference)を参照してください。 + +KubernetesのAppArmorの強制では、まずはじめにすべての前提条件が満たされているかどうかをチェックします。その後、強制を行うためにプロファイルの選択をコンテナランタイムに委ねます。前提条件が満たされなかった場合、Podはリジェクトされ、実行されません。 + +プロファイルが適用されたかどうか確認するには、AppArmor securityオプションがコンテナ作成イベントに一覧されているかどうかを確認します。 + +```shell +kubectl get events | grep Created +``` +``` +22s 22s 1 hello-apparmor Pod spec.containers{hello} Normal Created {kubelet e2e-test-stclair-node-pool-31nt} Created container with docker id 269a53b202d3; Security:[seccomp=unconfined apparmor=k8s-apparmor-example-deny-write] +``` + +proc attrを調べることで、コンテナのルートプロセスが正しいプロファイルで実行されているかどうかを直接確認することもできます。 + +```shell +kubectl exec cat /proc/1/attr/current +``` +``` +k8s-apparmor-example-deny-write (enforce) +``` + +## 例 {#example} + +*この例は、クラスターがすでにAppArmorのサポート付きでセットアップ済みであることを前提としています。* + +まず、使用したいプロファイルをノード上に読み込む必要があります。このプロファイルは、すべてのファイル書き込みを拒否します。 + +```shell +#include + +profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { + #include + + file, + + # Deny all file writes. + deny /** w, +} +``` + +Podがどのノードにスケジュールされるかは予測できないため、プロファイルはすべてのノードに読み込ませる必要があります。この例では、単純にSSHを使ってプロファイルをインストールしますが、[プロファイルを使用したノードのセットアップ](#setting-up-nodes-with-profiles)では、他のアプローチについて議論しています。 + +```shell +NODES=( + # SSHでアクセス可能なノードのドメイン名 + gke-test-default-pool-239f5d02-gyn2.us-central1-a.my-k8s + gke-test-default-pool-239f5d02-x1kf.us-central1-a.my-k8s + gke-test-default-pool-239f5d02-xwux.us-central1-a.my-k8s) +for NODE in ${NODES[*]}; do ssh $NODE 'sudo apparmor_parser -q < + +profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { + #include + + file, + + # Deny all file writes. + deny /** w, +} +EOF' +done +``` + +次に、deny-writeプロファイルを使用した単純な "Hello AppArmor" Podを実行します。 + +{{< codenew file="pods/security/hello-apparmor.yaml" >}} + +```shell +kubectl create -f ./hello-apparmor.yaml +``` + +Podイベントを確認すると、PodコンテナがAppArmorプロファイル "k8s-apparmor-example-deny-write" を使用して作成されたことがわかります。 + +```shell +kubectl get events | grep hello-apparmor +``` +``` +14s 14s 1 hello-apparmor Pod Normal Scheduled {default-scheduler } Successfully assigned hello-apparmor to gke-test-default-pool-239f5d02-gyn2 +14s 14s 1 hello-apparmor Pod spec.containers{hello} Normal Pulling {kubelet gke-test-default-pool-239f5d02-gyn2} pulling image "busybox" +13s 13s 1 hello-apparmor Pod spec.containers{hello} Normal Pulled {kubelet gke-test-default-pool-239f5d02-gyn2} Successfully pulled image "busybox" +13s 13s 1 hello-apparmor Pod spec.containers{hello} Normal Created {kubelet gke-test-default-pool-239f5d02-gyn2} Created container with docker id 06b6cd1c0989; Security:[seccomp=unconfined apparmor=k8s-apparmor-example-deny-write] +13s 13s 1 hello-apparmor Pod spec.containers{hello} Normal Started {kubelet gke-test-default-pool-239f5d02-gyn2} Started container with docker id 06b6cd1c0989 +``` + +コンテナがこのプロファイルで実際に実行されていることを確認するために、コンテナのproc attrをチェックします。 + +```shell +kubectl exec hello-apparmor cat /proc/1/attr/current +``` +``` +k8s-apparmor-example-deny-write (enforce) +``` + +最後に、ファイルへの書き込みを行おうとすることで、プロファイルに違反すると何が起こるか見てみましょう。 + +```shell +kubectl exec hello-apparmor touch /tmp/test +``` +``` +touch: /tmp/test: Permission denied +error: error executing remote command: command terminated with non-zero exit code: Error executing in Docker Container: 1 +``` + +まとめとして、読み込まれていないプロファイルを指定しようとするとどうなるのか見てみましょう。 + +```shell +kubectl create -f /dev/stdin < +Annotations: container.apparmor.security.beta.kubernetes.io/hello=localhost/k8s-apparmor-example-allow-write +Status: Pending +Reason: AppArmor +Message: Pod Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded +IP: +Controllers: +Containers: + hello: + Container ID: + Image: busybox + Image ID: + Port: + Command: + sh + -c + echo 'Hello AppArmor!' && sleep 1h + State: Waiting + Reason: Blocked + Ready: False + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from default-token-dnz7v (ro) +Conditions: + Type Status + Initialized True + Ready False + PodScheduled True +Volumes: + default-token-dnz7v: + Type: Secret (a volume populated by a Secret) + SecretName: default-token-dnz7v + Optional: false +QoS Class: BestEffort +Node-Selectors: +Tolerations: +Events: + FirstSeen LastSeen Count From SubobjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 23s 23s 1 {default-scheduler } Normal Scheduled Successfully assigned hello-apparmor-2 to e2e-test-stclair-node-pool-t1f5 + 23s 23s 1 {kubelet e2e-test-stclair-node-pool-t1f5} Warning AppArmor Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded +``` + +PodのステータスはPendingとなり、`Pod Cannot enforce AppArmor: profile +"k8s-apparmor-example-allow-write" is not loaded`(PodはAppArmorを強制できません: プロファイル "k8s-apparmor-example-allow-write" はロードされていません)という役に立つエラーメッセージが表示されています。同じメッセージのイベントも記録されています。 + +## 管理 + +### プロファイルを使用したノードのセットアップ {#setting-up-nodes-with-profiles} + +現在、KubernetesはAppArmorのプロファイルをノードに読み込むネイティブの仕組みは提供していません。しかし、プロファイルをセットアップする方法は、以下のように様々な方法があります。 + +* 各ノード上に正しいプロファイルがロードされていることを保証するPodを実行する[DaemonSet](/ja/docs/concepts/workloads/controllers/daemonset/)を利用する方法。[ここ](https://git.k8s.io/kubernetes/test/images/apparmor-loader)に実装例があります。 +* ノードの初期化時に初期化スクリプト(例: Salt、Ansibleなど)や初期化イメージを使用する。 +* [例](#example)で示したような方法で、プロファイルを各ノードにコピーし、SSHで読み込む。 + +スケジューラーはどのプロファイルがどのノードに読み込まれているのかがわからないため、すべてのプロファイルがすべてのノードに読み込まれていなければなりません。もう1つのアプローチとしては、各プロファイル(あるいはプロファイルのクラス)ごとにノードラベルを追加し、[node selector](/ja/docs/concepts/scheduling-eviction/assign-pod-node/)を用いてPodが必要なプロファイルを読み込んだノードで実行されるようにする方法もあります。 + +### PodSecurityPolicyを使用したプロファイルの制限 + +PodSecurityPolicy extensionが有効になっている場合、クラスタ全体でAppArmorn制限が適用されます。PodSecurityPolicyを有効にするには、`apiserver`上で次のフラグを設定する必要があります。 + +``` +--enable-admission-plugins=PodSecurityPolicy[,others...] +``` + +AppArmorのオプションはPodSecurityPolicy上でアノテーションとして指定します。 + +```yaml +apparmor.security.beta.kubernetes.io/defaultProfileName: +apparmor.security.beta.kubernetes.io/allowedProfileNames: [,others...] +``` + +defaultProfileNameオプションには、何も指定されなかった場合にコンテナにデフォルトで適用されるプロファイルを指定します。allowedProfileNamesオプションには、Podコンテナの実行が許可されるプロファイルのリストを指定します。両方のオプションが指定された場合、デフォルトは許可されなければいけません。プロファイルはコンテナ上で同じフォーマットで指定されます。完全な仕様については、[APIリファレンス](#api-reference)を参照してください。 + +### AppArmorの無効化 + +クラスタ上でAppArmorを利用可能にしたくない場合、次のコマンドラインフラグで無効化できます。 + +``` +--feature-gates=AppArmor=false +``` + +無効化すると、AppArmorプロファイルを含むPodは"Forbidden"エラーで検証に失敗します。ただし、デフォルトのdockerは非特権Pod上では"docker-default"というプロファイルを常に有効化し(AppArmorカーネルモジュールが有効である場合)、フィーチャーゲートで無効化したとしても有効化し続けることに注意してください。AppArmorを無効化するオプションは、AppArmorが一般利用(GA)になったときに削除される予定です。 + +### AppArmorを使用するKubernetes v1.4にアップグレードする + +クラスタをv1.4にアップグレードするために、AppArmorに関する操作は必要ありません。ただし、既存のPodがAppArmorのアノテーションを持っている場合、検証(またはPodSecurityPolicy admission)は行われません。もしpermissiveなプロファイルがノードに読み込まれていた場合、悪意のあるユーザーがPodの権限を上述のdocker-defaultより昇格させるために、permissiveなプロファイルを再適用する恐れがあります。これが問題となる場合、`apparmor.security.beta.kubernetes.io`のアノテーションを含むすべてのPodのクラスターをクリーンアップすることを推奨します。 + +### 一般利用可能(General Availability)への更新パス {#upgrade-path-to-general-availability} + +AppArmorが一般利用可能(GA)になったとき、現在アノテーションで指定しているオプションはフィールドに変換されます。移行中のすべてのアップグレードとダウングレードの経路をサポートするのは非常に微妙であるため、以降が必要になったときに詳細に説明する予定です。最低2リリースの間はフィールドとアノテーションの両方がサポートされるようにする予定です。最低2リリースの後は、アノテーションは明示的に拒否されるようになります。 + +## Profilesの作成 + +AppArmorのプロファイルを正しく指定するのはやっかいな作業です。幸い、その作業を補助するツールがいくつかあります。 + +* `aa-genprof`および`aa-logprof`は、アプリケーションの動作とログを監視することによりプロファイルのルールを生成します。詳しい説明については、[AppArmor documentation](https://gitlab.com/apparmor/apparmor/wikis/Profiling_with_tools)を参照してください。 +* [bane](https://github.com/jfrazelle/bane)は、Docker向けのAppArmorのプロファイル・ジェネレータです。簡略化されたプロファイル言語を使用しています。 + +プロファイルの生成には、アプリケーションを開発用ワークステーション上でDockerで実行することを推奨します。しかし、実際にPodが実行されるKubernetesノード上でツールを実行してはいけない理由はありません。 + +AppArmorに関する問題をデバッグするには、システムログをチェックして、特に何が拒否されたのかを確認できます。AppArmorのログは`dmesg`にverboseメッセージを送り、エラーは通常システムログまたは`journalctl`で確認できます。詳しい情報は、[AppArmor failures](https://gitlab.com/apparmor/apparmor/wikis/AppArmor_Failures)で提供されています。 + +## APIリファレンス {#api-reference} + +### Podアノテーション + +コンテナが実行するプロファイルを指定します。 + +- **key**: `container.apparmor.security.beta.kubernetes.io/` + ここで、``はPod内のコンテナの名前を一致させます。Pod内の各コンテナごとに別々のプロファイルを指定できます。 +- **value**: 下で説明するプロファイルのリファレンス + +### プロファイルのリファレンス + +- `runtime/default`: デフォルトのランタイムプロファイルを指します。 + - (PodSecurityPolicyのデフォルトを設定せずに)プロファイルを指定しない場合と同等ですが、AppArmorを有効化する必要があります。 + - Dockerの場合、非特権コンテナでは[`docker-default`](https://docs.docker.com/engine/security/apparmor/)プロファイルが選択され、特権コンテナではunconfined(プロファイルなし)が選択されます。 +- `localhost/`: 名前で指定されたノード(localhost)に読み込まれたプロファイルを指します。 + - 利用できるプロファイル名の詳細は[core policy reference](https://gitlab.com/apparmor/apparmor/wikis/AppArmor_Core_Policy_Reference#profile-names-and-attachment-specifications)で説明されています。 +- `unconfined`: これは実質的にコンテナ上のAppArmorを無効化します。 + +これ以外のプロファイルリファレンスはすべて無効です。 + +### PodSecurityPolicyアノテーション + +何も指定されなかった場合にコンテナに適用するデフォルトのプロファイルは、以下のように指定します。 + +* **key**: `apparmor.security.beta.kubernetes.io/defaultProfileName` +* **value**: 上で説明したプロファイルのリファレンス + +Podコンテナが指定することを許可するプロファイルのリストは、以下のように指定します。 + +* **key**: `apparmor.security.beta.kubernetes.io/allowedProfileNames` +* **value**: カンマ区切りの上述のプロファイルリファレンスのリスト + - プロファイル名ではエスケープしたカンマは不正な文字ではありませんが、ここでは明示的に許可されません。 + +## {{% heading "whatsnext" %}} + +追加のリソースとしては以下のものがあります。 + +* [Quick guide to the AppArmor profile language](https://gitlab.com/apparmor/apparmor/wikis/QuickProfileLanguage) +* [AppArmor core policy reference](https://gitlab.com/apparmor/apparmor/wikis/Policy_Layout) diff --git a/content/ja/docs/tutorials/hello-minikube.md b/content/ja/docs/tutorials/hello-minikube.md index 530e53ffd564d..35192a8ca9748 100644 --- a/content/ja/docs/tutorials/hello-minikube.md +++ b/content/ja/docs/tutorials/hello-minikube.md @@ -18,7 +18,7 @@ card: このチュートリアルでは、[Minikube](/ja/docs/setup/learning-environment/minikube)とKatacodaを使用して、Kubernetes上でサンプルアプリケーションを動かす方法を紹介します。Katacodaはブラウザで無償のKubernetes環境を提供します。 {{< note >}} -[Minikubeをローカルにインストール](/ja/docs/tasks/tools/install-minikube/)している場合もこのチュートリアルを進めることが可能です。 +[Minikubeをローカルにインストール](https://minikube.sigs.k8s.io/docs/start/)している場合もこのチュートリアルを進めることが可能です。 {{< /note >}} diff --git a/content/ja/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/ja/docs/tutorials/kubernetes-basics/explore/explore-intro.html index 63b3d503b2029..47f9954a15ca4 100644 --- a/content/ja/docs/tutorials/kubernetes-basics/explore/explore-intro.html +++ b/content/ja/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -29,7 +29,7 @@

    目標

    Kubernetes Pod

    -

    モジュール2でDeploymentを作成したときに、KubernetesはアプリケーションインスタンスをホストするためのPodを作成しました。Podは、1つ以上のアプリケーションコンテナ(Dockerやrktなど)のグループとそれらのコンテナの共有リソースを表すKubernetesの抽象概念です。 Podには以下のものが含まれます:

    +

    モジュール2でDeploymentを作成したときに、KubernetesはアプリケーションインスタンスをホストするためのPodを作成しました。Podは、1つ以上のアプリケーションコンテナ(Dockerなど)のグループとそれらのコンテナの共有リソースを表すKubernetesの抽象概念です。 Podには以下のものが含まれます:

    • 共有ストレージ(ボリューム)
    • ネットワーキング(クラスターに固有のIPアドレス)
    • @@ -49,7 +49,7 @@

      まとめ:

    - Podは1つ以上のアプリケーションコンテナ(Dockerやrktなど)のグループであり、共有ストレージ(ボリューム)、IPアドレス、それらの実行方法に関する情報が含まれています。 + Podは1つ以上のアプリケーションコンテナ(Dockerなど)のグループであり、共有ストレージ(ボリューム)、IPアドレス、それらの実行方法に関する情報が含まれています。

    @@ -77,7 +77,7 @@

    ノード

    すべてのKubernetesノードでは少なくとも以下のものが動作します。

    • Kubelet: Kubernetesマスターとノード間の通信を担当するプロセス。マシン上で実行されているPodとコンテナを管理します。
    • -
    • レジストリからコンテナイメージを取得し、コンテナを解凍し、アプリケーションを実行することを担当する、Docker、rktのようなコンテナランタイム。
    • +
    • レジストリからコンテナイメージを取得し、コンテナを解凍し、アプリケーションを実行することを担当する、Dockerのようなコンテナランタイム。
    diff --git a/content/ja/docs/tutorials/services/source-ip.md b/content/ja/docs/tutorials/services/source-ip.md index 05a54152a38cd..505ba8ef45ca9 100644 --- a/content/ja/docs/tutorials/services/source-ip.md +++ b/content/ja/docs/tutorials/services/source-ip.md @@ -272,7 +272,7 @@ graph TD; ## `Type=LoadBalancer`を使用したServiceでの送信元IP -[`Type=LoadBalancer`](/ja/docs/concepts/services-networking/service/#loadbalancer)を使用したServiceに送られたパケットは、デフォルトでは送信元のNATは行われません。`Ready`状態にあるすべてのスケジュール可能なKubernetesのNodeは、ロードバランサーからのトラフィックを受付可能であるためです。そのため、エンドポイントが存在しないノードにパケットが到達した場合、システムはエンドポイントが*存在する*ノードにパケットをプロシキーします。このとき、(前のセクションで説明したように)パケットの送信元IPがノードのIPに置換されます。 +[`Type=LoadBalancer`](/ja/docs/concepts/services-networking/service/#loadbalancer)を使用したServiceに送られたパケットは、デフォルトで送信元のNATが行われます。`Ready`状態にあるすべてのスケジュール可能なKubernetesのNodeは、ロードバランサーからのトラフィックを受付可能であるためです。そのため、エンドポイントが存在しないノードにパケットが到達した場合、システムはエンドポイントが*存在する*ノードにパケットをプロシキーします。このとき、(前のセクションで説明したように)パケットの送信元IPがノードのIPに置換されます。 ロードバランサー経由でsource-ip-appを公開することで、これをテストできます。 @@ -399,7 +399,7 @@ client_address=198.51.100.79 Serviceを削除します。 ```shell -kubectl delete svc -l run=source-ip-app +kubectl delete svc -l app=source-ip-app ``` Deployment、ReplicaSet、Podを削除します。 diff --git a/content/ja/docs/tutorials/stateless-application/guestbook.md b/content/ja/docs/tutorials/stateless-application/guestbook.md index e08abdb62cf3e..aff86a2289620 100644 --- a/content/ja/docs/tutorials/stateless-application/guestbook.md +++ b/content/ja/docs/tutorials/stateless-application/guestbook.md @@ -185,7 +185,7 @@ Deploymentはマニフェストファイル内に書かれた設定に基づい 1. Podのリストを問い合わせて、3つのフロントエンドのレプリカが実行中になっていることを確認します。 ```shell - kubectl get pods -l app=guestbook -l tier=frontend + kubectl get pods -l app.kubernetes.io/name=guestbook -l app.kubernetes.io/component=frontend ``` 結果は次のようになるはずです。 diff --git a/content/ja/examples/admin/cloud/ccm-example.yaml b/content/ja/examples/admin/cloud/ccm-example.yaml index 4c98162a70db9..96b78331744c1 100644 --- a/content/ja/examples/admin/cloud/ccm-example.yaml +++ b/content/ja/examples/admin/cloud/ccm-example.yaml @@ -1,4 +1,4 @@ -# This is an example of how to setup cloud-controller-manger as a Daemonset in your cluster. +# This is an example of how to setup cloud-controller-manager as a Daemonset in your cluster. # It assumes that your masters can run pods and has the role node-role.kubernetes.io/master # Note that this Daemonset will not work straight out of the box for your cloud, this is # meant to be a guideline. diff --git a/content/ja/examples/application/job/cronjob.yaml b/content/ja/examples/application/job/cronjob.yaml index c9d38930278c3..2ce31233c3cd4 100644 --- a/content/ja/examples/application/job/cronjob.yaml +++ b/content/ja/examples/application/job/cronjob.yaml @@ -11,7 +11,7 @@ spec: containers: - name: hello image: busybox - args: + command: - /bin/sh - -c - date; echo Hello from the Kubernetes cluster diff --git a/content/ja/examples/application/php-apache.yaml b/content/ja/examples/application/php-apache.yaml new file mode 100644 index 0000000000000..e8e1b5aeb43e2 --- /dev/null +++ b/content/ja/examples/application/php-apache.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: php-apache +spec: + selector: + matchLabels: + run: php-apache + replicas: 1 + template: + metadata: + labels: + run: php-apache + spec: + containers: + - name: php-apache + image: k8s.gcr.io/hpa-example + ports: + - containerPort: 80 + resources: + limits: + cpu: 500m + requests: + cpu: 200m +--- +apiVersion: v1 +kind: Service +metadata: + name: php-apache + labels: + run: php-apache +spec: + ports: + - port: 80 + selector: + run: php-apache diff --git a/content/ja/examples/service/networking/example-ingress.yaml b/content/ja/examples/service/networking/example-ingress.yaml new file mode 100644 index 0000000000000..b309d13275105 --- /dev/null +++ b/content/ja/examples/service/networking/example-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: hello-world.info + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: web + port: + number: 8080 \ No newline at end of file diff --git a/content/ko/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md b/content/ko/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md index b7550a3d83fb4..d5405651871f4 100644 --- a/content/ko/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md +++ b/content/ko/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md @@ -5,7 +5,9 @@ date: 2020-12-02 slug: dont-panic-kubernetes-and-docker --- -**작성자:** Jorge Castro, Duffie Cooley, Kat Cosgrove, Justin Garrison, Noah Kantrowitz, Bob Killen, Rey Lejano, Dan “POP” Papandrea, Jeffrey Sica, Davanum “Dims” Srinivas +**저자:** Jorge Castro, Duffie Cooley, Kat Cosgrove, Justin Garrison, Noah Kantrowitz, Bob Killen, Rey Lejano, Dan “POP” Papandrea, Jeffrey Sica, Davanum “Dims” Srinivas + +**번역:** 박재화(삼성SDS), 손석호(한국전자통신연구원) 쿠버네티스는 v1.20 이후 컨테이너 런타임으로서 [도커를 diff --git a/content/ko/community/_index.html b/content/ko/community/_index.html index 39d17396ffc96..40bea7b523a36 100644 --- a/content/ko/community/_index.html +++ b/content/ko/community/_index.html @@ -19,6 +19,7 @@
    +커뮤니티 가치      행동 강령       비디오      토론      @@ -41,10 +42,28 @@ 쿠버네티스 컨퍼런스 갤러리
    쿠버네티스 컨퍼런스 갤러리 - - +
    +
    +
    +
    +

    +

    +

    커뮤니티 가치

    +쿠버네티스 커뮤니티가 추구하는 가치는 프로젝트의 지속적인 성공의 핵심입니다.
    +이러한 원칙은 쿠버네티스 프로젝트의 모든 측면을 이끌어 갑니다. +
    + +

    + + 더 읽어 보기 + +
    +
    +
    +
    +
    diff --git a/content/ko/docs/concepts/architecture/cloud-controller.md b/content/ko/docs/concepts/architecture/cloud-controller.md index f7666ef0c3c6b..fe7fda364a4e6 100644 --- a/content/ko/docs/concepts/architecture/cloud-controller.md +++ b/content/ko/docs/concepts/architecture/cloud-controller.md @@ -206,6 +206,8 @@ rules: [클라우드 컨트롤러 매니저 관리](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager)에는 클라우드 컨트롤러 매니저의 실행과 관리에 대한 지침이 있다. +클라우드 컨트롤러 매니저를 사용하기 위해 HA 컨트롤 플레인을 업그레이드하려면, [클라우드 컨트롤러 매니저를 사용하기 위해 복제된 컨트롤 플레인 마이그레이션 하기](/docs/tasks/administer-cluster/controller-manager-leader-migration/)를 참고한다. + 자체 클라우드 컨트롤러 매니저를 구현하거나 기존 프로젝트를 확장하는 방법을 알고 싶은가? 클라우드 컨트롤러 매니저는 Go 인터페이스를 사용해서 모든 클라우드 플러그인을 구현할 수 있다. 구체적으로, [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider)의 [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62)에 정의된 `CloudProvider` 인터페이스를 사용한다. diff --git a/content/ko/docs/concepts/architecture/control-plane-node-communication.md b/content/ko/docs/concepts/architecture/control-plane-node-communication.md index 36ece0d6d2249..52fa728043b2d 100644 --- a/content/ko/docs/concepts/architecture/control-plane-node-communication.md +++ b/content/ko/docs/concepts/architecture/control-plane-node-communication.md @@ -8,20 +8,21 @@ aliases: -이 문서는 컨트롤 플레인(실제로는 API 서버)과 쿠버네티스 클러스터 사이에 대한 통신 경로의 목록을 작성한다. 이는 사용자가 신뢰할 수 없는 네트워크(또는 클라우드 공급자의 완전한 퍼블릭 IP)에서 클러스터를 실행할 수 있도록 네트워크 구성을 강화하기 위한 맞춤 설치를 할 수 있도록 한다. +이 문서는 컨트롤 플레인(API 서버)과 쿠버네티스 클러스터 사이에 대한 통신 경로의 목록을 작성한다. 이는 사용자가 신뢰할 수 없는 네트워크(또는 클라우드 공급자의 완전한 퍼블릭 IP)에서 클러스터를 실행할 수 있도록 네트워크 구성을 강화하기 위한 맞춤 설치를 할 수 있도록 한다. ## 노드에서 컨트롤 플레인으로의 통신 -쿠버네티스에는 "허브 앤 스포크(hub-and-spoke)" API 패턴을 가지고 있다. 노드(또는 노드에서 실행되는 파드들)의 모든 API 사용은 API 서버에서 종료된다(다른 컨트롤 플레인 컴포넌트 중 어느 것도 원격 서비스를 노출하도록 설계되지 않았다). API 서버는 하나 이상의 클라이언트 [인증](/docs/reference/access-authn-authz/authentication/) 형식이 활성화된 보안 HTTPS 포트(일반적으로 443)에서 원격 연결을 수신하도록 구성된다. +쿠버네티스에는 "허브 앤 스포크(hub-and-spoke)" API 패턴을 가지고 있다. 노드(또는 노드에서 실행되는 파드들)의 모든 API 사용은 API 서버에서 종료된다. 다른 컨트롤 플레인 컴포넌트 중 어느 것도 원격 서비스를 노출하도록 설계되지 않았다. API 서버는 하나 이상의 클라이언트 [인증](/docs/reference/access-authn-authz/authentication/) 형식이 활성화된 보안 HTTPS 포트(일반적으로 443)에서 원격 연결을 수신하도록 구성된다. + 특히 [익명의 요청](/docs/reference/access-authn-authz/authentication/#anonymous-requests) 또는 [서비스 어카운트 토큰](/docs/reference/access-authn-authz/authentication/#service-account-tokens)이 허용되는 경우, 하나 이상의 [권한 부여](/ko/docs/reference/access-authn-authz/authorization/) 형식을 사용해야 한다. 노드는 유효한 클라이언트 자격 증명과 함께 API 서버에 안전하게 연결할 수 있도록 클러스터에 대한 공개 루트 인증서로 프로비전해야 한다. 예를 들어, 기본 GKE 배포에서, kubelet에 제공되는 클라이언트 자격 증명은 클라이언트 인증서 형식이다. kubelet 클라이언트 인증서의 자동 프로비저닝은 [kubelet TLS 부트스트랩](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)을 참고한다. API 서버에 연결하려는 파드는 쿠버네티스가 공개 루트 인증서와 유효한 베어러 토큰(bearer token)을 파드가 인스턴스화될 때 파드에 자동으로 주입하도록 서비스 어카운트를 활용하여 안전하게 연결할 수 있다. -`kubernetes` 서비스(모든 네임스페이스의)는 API 서버의 HTTPS 엔드포인트로 리디렉션되는 가상 IP 주소(kube-proxy를 통해)로 구성되어 있다. +`kubernetes` 서비스(`default` 네임스페이스의)는 API 서버의 HTTPS 엔드포인트로 리디렉션되는 가상 IP 주소(kube-proxy를 통해)로 구성되어 있다. 컨트롤 플레인 컴포넌트는 보안 포트를 통해 클러스터 API 서버와도 통신한다. @@ -50,20 +51,20 @@ API 서버에서 kubelet으로의 연결은 다음의 용도로 사용된다. ### API 서버에서 노드, 파드 및 서비스로의 통신 -API 서버에서 노드, 파드 또는 서비스로의 연결은 기본적으로 일반 HTTP 연결로 연결되므로 인증되거나 암호화되지 않는다. API URL에서 노드, 파드 또는 서비스 이름을 접두어 `https:` 로 사용하여 보안 HTTPS 연결을 통해 실행될 수 있지만, HTTPS 엔드포인트가 제공한 인증서의 유효성을 검증하지 않거나 클라이언트 자격 증명을 제공하지 않으므로 연결이 암호화되는 동안 무결성을 보장하지 않는다. 이러한 연결은 신뢰할 수 없는 네트워크 및/또는 공용 네트워크에서 실행하기에 **현재는 안전하지 않다** . +API 서버에서 노드, 파드 또는 서비스로의 연결은 기본적으로 일반 HTTP 연결로 연결되므로 인증되거나 암호화되지 않는다. API URL에서 노드, 파드 또는 서비스 이름을 접두어 `https:` 로 사용하여 보안 HTTPS 연결을 통해 실행될 수 있지만, HTTPS 엔드포인트가 제공한 인증서의 유효성을 검증하지 않거나 클라이언트 자격 증명을 제공하지 않는다. 그래서 연결이 암호화되는 동안 무결성을 보장하지 않는다. 이러한 연결은 신뢰할 수 없는 네트워크 및/또는 공용 네트워크에서 실행하기에 **현재는 안전하지 않다** . ### SSH 터널 쿠버네티스는 SSH 터널을 지원하여 컨트롤 플레인에서 노드로의 통신 경로를 보호한다. 이 구성에서, API 서버는 클러스터의 각 노드에 SSH 터널을 시작하고(포트 22에서 수신 대기하는 ssh 서버에 연결) 터널을 통해 kubelet, 노드, 파드 또는 서비스로 향하는 모든 트래픽을 전달한다. 이 터널은 트래픽이 노드가 실행 중인 네트워크 외부에 노출되지 않도록 한다. -SSH 터널은 현재 더 이상 사용되지 않으므로 수행 중인 작업이 어떤 것인지 모른다면 사용하면 안된다. Konnectivity 서비스는 이 통신 채널을 대체한다. +SSH 터널은 현재 더 이상 사용되지 않으므로, 수행 중인 작업이 어떤 것인지 모른다면 사용하면 안된다. Konnectivity 서비스는 이 통신 채널을 대체한다. ### Konnectivity 서비스 {{< feature-state for_k8s_version="v1.18" state="beta" >}} -SSH 터널을 대체하는 Konnectivity 서비스는 컨트롤 플레인에서 클러스터 통신에 TCP 레벨 프록시를 제공한다. Konnectivity 서비스는 컨트롤 플레인 네트워크와 노드 네트워크에서 각각 실행되는 Konnectivity 서버와 Konnectivity 에이전트의 두 부분으로 구성된다. Konnectivity 에이전트는 Konnectivity 서버에 대한 연결을 시작하고 네트워크 연결을 유지한다. +SSH 터널을 대체하는 Konnectivity 서비스는 컨트롤 플레인에서 클러스터 통신에 TCP 레벨 프록시를 제공한다. Konnectivity 서비스는 컨트롤 플레인 네트워크의 Konnectivity 서버와 노드 네트워크의 Konnectivity 에이전트, 두 부분으로 구성된다. Konnectivity 에이전트는 Konnectivity 서버에 대한 연결을 시작하고 네트워크 연결을 유지한다. Konnectivity 서비스를 활성화한 후, 모든 컨트롤 플레인에서 노드로의 트래픽은 이 연결을 통과한다. [Konnectivity 서비스 태스크](/docs/tasks/extend-kubernetes/setup-konnectivity/)에 따라 클러스터에서 Konnectivity 서비스를 설정한다. diff --git a/content/ko/docs/concepts/architecture/controller.md b/content/ko/docs/concepts/architecture/controller.md index 784ad2ac58307..e516dd9cc5351 100644 --- a/content/ko/docs/concepts/architecture/controller.md +++ b/content/ko/docs/concepts/architecture/controller.md @@ -102,7 +102,7 @@ weight: 30 온도 조절기 예에서 방이 매우 추우면 다른 컨트롤러가 서리 방지 히터를 켤 수도 있다. 쿠버네티스 클러스터에서는 [쿠버네티스 확장](/ko/docs/concepts/extend-kubernetes/)을 통해 -IP 주소 관리 도구, 스토리지 서비스, 클라우드 제공자 APIS 및 +IP 주소 관리 도구, 스토리지 서비스, 클라우드 제공자의 API 및 기타 서비스 등과 간접적으로 연동하여 이를 구현한다. ## 의도한 상태와 현재 상태 {#desired-vs-current} diff --git a/content/ko/docs/concepts/architecture/nodes.md b/content/ko/docs/concepts/architecture/nodes.md index 6872096814788..291b7e82ce6b2 100644 --- a/content/ko/docs/concepts/architecture/nodes.md +++ b/content/ko/docs/concepts/architecture/nodes.md @@ -7,10 +7,10 @@ weight: 10 쿠버네티스는 컨테이너를 파드내에 배치하고 _노드_ 에서 실행함으로 워크로드를 구동한다. -노드는 클러스터에 따라 가상 또는 물리적 머신일 수 있다. 각 노드에는 -{{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}}이라는 +노드는 클러스터에 따라 가상 또는 물리적 머신일 수 있다. 각 노드는 +{{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}}에 의해 관리되며 {{< glossary_tooltip text="파드" term_id="pod" >}}를 -실행하는데 필요한 서비스가 포함되어 있다. +실행하는 데 필요한 서비스를 포함한다. 일반적으로 클러스터에는 여러 개의 노드가 있으며, 학습 또는 리소스가 제한되는 환경에서는 하나만 있을 수도 있다. @@ -27,7 +27,7 @@ weight: 10 {{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}}에 노드를 추가하는 두가지 주요 방법이 있다. 1. 노드의 kubelet으로 컨트롤 플레인에 자체 등록 -2. 사용자 또는 다른 사용자가 노드 오브젝트를 수동으로 추가 +2. 사용자(또는 다른 사용자)가 노드 오브젝트를 수동으로 추가 노드 오브젝트 또는 노드의 kubelet으로 자체 등록한 후 컨트롤 플레인은 새 노드 오브젝트가 유효한지 확인한다. @@ -48,8 +48,8 @@ weight: 10 쿠버네티스는 내부적으로 노드 오브젝트를 생성한다(표시한다). 쿠버네티스는 kubelet이 노드의 `metadata.name` 필드와 일치하는 API 서버에 등록이 되어있는지 확인한다. -노드가 정상이면(필요한 모든 서비스가 실행중인 경우) 파드를 실행할 수 있게 된다. -그렇지 않으면, 해당 노드는 정상이 될때까지 모든 클러스터 활동에 +노드가 정상이면(예를 들어 필요한 모든 서비스가 실행중인 경우) 파드를 실행할 수 있게 된다. +그렇지 않으면, 해당 노드는 정상이 될 때까지 모든 클러스터 활동에 대해 무시된다. {{< note >}} @@ -57,12 +57,22 @@ kubelet이 노드의 `metadata.name` 필드와 일치하는 API 서버에 등록 정상적인지 확인한다. 상태 확인을 중지하려면 사용자 또는 {{< glossary_tooltip term_id="controller" text="컨트롤러">}}에서 -노드 오브젝트를 명시적으로 삭제해야한다. +노드 오브젝트를 명시적으로 삭제해야 한다. {{< /note >}} 노드 오브젝트의 이름은 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. +### 노드 이름 고유성 + +[이름](/ko/docs/concepts/overview/working-with-objects/names#names)은 노드를 식별한다. 두 노드는 +동시에 같은 이름을 가질 수 없다. 쿠버네티스는 또한 같은 이름의 리소스가 +동일한 객체라고 가정한다. 노드의 경우, 동일한 이름을 사용하는 인스턴스가 동일한 +상태(예: 네트워크 설정, 루트 디스크 내용)를 갖는다고 암시적으로 가정한다. 인스턴스가 +이름을 변경하지 않고 수정된 경우 이로 인해 불일치가 발생할 수 있다. 노드를 대폭 교체하거나 +업데이트해야 하는 경우, 기존 노드 오브젝트를 먼저 API 서버에서 제거하고 +업데이트 후 다시 추가해야 한다. + ### 노드에 대한 자체-등록 kubelet 플래그 `--register-node`는 참(기본값)일 경우, kubelet 은 API 서버에 @@ -226,13 +236,15 @@ apiserver로부터 삭제되어 그 이름을 사용할 수 있는 결과를 낳 노드 컨트롤러는 노드 리스트로부터 그 노드를 삭제한다. 세 번째는 노드의 동작 상태를 모니터링 하는 것이다. 노드 컨트롤러는 -노드가 접근 불가할 경우 (즉 노드 컨트롤러가 어떠한 사유로 하트비트 -수신을 중지하는 경우, 예를 들어 노드 다운과 같은 경우이다.) -NodeStatus의 NodeReady 컨디션을 ConditionUnknown으로 업데이트 하는 책임을 지고, -노드가 계속 접근 불가할 경우 나중에 노드로부터 (정상적인 종료를 이용하여) 모든 파드를 축출시킨다. -(ConditionUnknown을 알리기 시작하는 기본 타임아웃 값은 40초 이고, -파드를 축출하기 시작하는 값은 5분이다.) 노드 컨트롤러는 -매 `--node-monitor-period` 초 마다 각 노드의 상태를 체크한다. +다음을 담당한다. +- 노드 다운과 같은 어떤 이유로 노드 컨트롤러가 + 하트비트 수신이 중단되는 경우 NodeStatus의 NodeReady + 컨디션을 ConditionUnknown으로 업데이트 한다. +- 노드가 계속 접근 불가할 경우 나중에 노드로부터 정상적인 종료를 이용해서 모든 파드를 축출 한다. + ConditionUnknown을 알리기 시작하는 기본 타임아웃 값은 40초 이고, + 파드를 축출하기 시작하는 값은 5분이다. + +노드 컨트롤러는 매 `--node-monitor-period` 초 마다 각 노드의 상태를 체크한다. #### 하트비트 @@ -250,11 +262,12 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 - kubelet은 상태가 변경되거나 구성된 상태에 대한 업데이트가 없는 경우, `NodeStatus` 를 업데이트 한다. `NodeStatus` 의 기본 업데이트 - 주기는 5분이다(연결할 수 없는 노드의 시간 제한인 40초 - 보다 훨씬 길다). + 주기는 5분으로, 연결할 수 없는 노드의 시간 제한인 40초 + 보다 훨씬 길다. - kubelet은 10초마다 리스 오브젝트를 생성하고 업데이트 한다(기본 업데이트 주기). 리스 업데이트는 `NodeStatus` 업데이트와는 - 독립적으로 발생한다. 리스 업데이트가 실패하면 kubelet에 의해 재시도하며 7초로 제한된 지수 백오프를 200 밀리초에서 부터 시작한다. + 독립적으로 발생한다. 리스 업데이트가 실패하면 kubelet에 의해 재시도하며 + 7초로 제한된 지수 백오프를 200 밀리초에서 부터 시작한다. #### 안정성 @@ -264,13 +277,14 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 노드 축출 행위는 주어진 가용성 영역 내 하나의 노드가 상태가 불량할 경우 변화한다. 노드 컨트롤러는 영역 내 동시에 상태가 불량한 노드의 퍼센티지가 얼마나 되는지 -체크한다(NodeReady 컨디션은 ConditionUnknown 또는 ConditionFalse 다.). -상태가 불량한 노드의 일부가 최소 -`--unhealthy-zone-threshold` 기본값 0.55) 가 -되면 축출 비율은 감소한다. 클러스터가 작으면 (즉 -`--large-cluster-size-threshold` 노드 이하면 - 기본값 50) 축출은 중지되고, -그렇지 않으면 축출 비율은 초당 -`--secondary-node-eviction-rate`(기본값 0.01)로 감소된다. +체크한다(NodeReady 컨디션은 ConditionUnknown 또는 +ConditionFalse 다.). +- 상태가 불량한 노드의 일부가 최소 `--unhealthy-zone-threshold` + (기본값 0.55)가 되면 축출 비율은 감소한다. +- 클러스터가 작으면 (즉 `--large-cluster-size-threshold` + 노드 이하면 - 기본값 50) 축출은 중지되고, 그렇지 않으면 축출 비율은 초당 + `--secondary-node-eviction-rate`(기본값 0.01)로 감소된다. + 이 정책들이 가용성 영역 단위로 실행되어지는 이유는 나머지가 연결되어 있는 동안 하나의 가용성 영역이 마스터로부터 분할되어 질 수도 있기 때문이다. 만약 클러스터가 여러 클라우드 제공사업자의 가용성 영역에 걸쳐 있지 않으면, @@ -299,8 +313,8 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 ### 노드 용량 -노드 오브젝트는 노드 리소스 용량에 대한 정보(예: 사용 가능한 메모리의 -양과 CPU의 수)를 추적한다. +노드 오브젝트는 노드 리소스 용량에 대한 정보: 예를 들어, 사용 가능한 메모리의 +양과 CPU의 수를 추적한다. 노드의 [자체 등록](#노드에-대한-자체-등록)은 등록하는 중에 용량을 보고한다. [수동](#수동-노드-관리)으로 노드를 추가하는 경우 추가할 때 노드의 용량 정보를 설정해야 한다. @@ -327,14 +341,27 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 자세한 내용은 [노드의 컨트롤 토폴로지 관리 정책](/docs/tasks/administer-cluster/topology-manager/)을 본다. -## 그레이스풀(Graceful) 노드 셧다운 +## 그레이스풀(Graceful) 노드 셧다운 {#graceful-node-shutdown} -{{< feature-state state="alpha" for_k8s_version="v1.20" >}} +{{< feature-state state="beta" for_k8s_version="v1.21" >}} + +kubelet은 노드 시스템 셧다운을 감지하고 노드에서 실행 중인 파드를 종료하려고 시도한다. -`GracefulNodeShutdown` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화한 경우 kubelet은 노드 시스템 종료를 감지하고 노드에서 실행 중인 파드를 종료한다. Kubelet은 노드가 종료되는 동안 파드가 일반 [파드 종료 프로세스](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)를 따르도록 한다. -`GracefulNodeShutdown` 기능 게이트가 활성화되면 kubelet은 [systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/)를 사용하여 주어진 기간 동안 노드 종료를 지연시킨다. 종료 중에 kubelet은 두 단계로 파드를 종료시킨다. +그레이스풀 노드 셧다운 기능은 +[systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/)를 +사용하여 주어진 기간 동안 노드 종료를 지연시키므로 systemd에 의존한다. + +그레이스풀 노드 셧다운은 1.21에서 기본적으로 활성화된 `GracefulNodeShutdown` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)로 제어된다. + +기본적으로, 아래 설명된 두 구성 옵션, +`ShutdownGracePeriod` 및 `ShutdownGracePeriodCriticalPods` 는 모두 0으로 설정되어 있으므로, +그레이스풀 노드 셧다운 기능이 활성화되지 않는다. +기능을 활성화하려면, 두 개의 kubelet 구성 설정을 적절하게 구성하고 0이 아닌 값으로 설정해야 한다. + +그레이스풀 셧다운 중에 kubelet은 다음의 두 단계로 파드를 종료한다. 1. 노드에서 실행 중인 일반 파드를 종료시킨다. 2. 노드에서 실행 중인 [중요(critical) 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)를 종료시킨다. @@ -343,9 +370,13 @@ Kubelet은 노드가 종료되는 동안 파드가 일반 [파드 종료 프로 * `ShutdownGracePeriod`: * 노드가 종료를 지연해야 하는 총 기간을 지정한다. 이것은 모든 일반 및 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)의 파드 종료에 필요한 총 유예 기간에 해당한다. * `ShutdownGracePeriodCriticalPods`: - * 노드 종료 중에 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)를 종료하는 데 사용되는 기간을 지정한다. 이는 `ShutdownGracePeriod`보다 작아야 한다. + * 노드 종료 중에 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)를 종료하는 데 사용되는 기간을 지정한다. 이 값은 `ShutdownGracePeriod` 보다 작아야 한다. -예를 들어 `ShutdownGracePeriod=30s`, `ShutdownGracePeriodCriticalPods=10s` 인 경우 kubelet은 노드 종료를 30 초까지 지연시킨다. 종료하는 동안 처음 20(30-10) 초는 일반 파드의 유예 종료에 할당되고, 마지막 10 초는 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)의 종료에 할당된다. +예를 들어, `ShutdownGracePeriod=30s`, +`ShutdownGracePeriodCriticalPods=10s` 인 경우, kubelet은 노드 종료를 30초까지 +지연시킨다. 종료하는 동안 처음 20(30-10)초는 일반 파드의 +유예 종료에 할당되고, 마지막 10초는 +[중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)의 종료에 할당된다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/cluster-administration/_index.md b/content/ko/docs/concepts/cluster-administration/_index.md index 1442a5da01475..987070459600d 100755 --- a/content/ko/docs/concepts/cluster-administration/_index.md +++ b/content/ko/docs/concepts/cluster-administration/_index.md @@ -1,5 +1,8 @@ --- title: 클러스터 관리 + + + weight: 100 content_type: concept description: > @@ -11,6 +14,7 @@ no_list: true 클러스터 관리 개요는 쿠버네티스 클러스터를 생성하거나 관리하는 모든 사람들을 위한 것이다. 핵심 쿠버네티스 [개념](/ko/docs/concepts/)에 어느 정도 익숙하다고 가정한다. + ## 클러스터 계획 @@ -22,12 +26,12 @@ no_list: true 가이드를 선택하기 전에 고려해야 할 사항은 다음과 같다. - - 컴퓨터에서 쿠버네티스를 그냥 한번 사용해보고 싶은가? 아니면, 고가용 멀티 노드 클러스터를 만들고 싶은가? 사용자의 필요에 따라 가장 적합한 배포판을 선택한다. + - 컴퓨터에서 쿠버네티스를 한번 사용해보고 싶은가? 아니면, 고가용 멀티 노드 클러스터를 만들고 싶은가? 사용자의 필요에 따라 가장 적합한 배포판을 선택한다. - [구글 쿠버네티스 엔진(Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/)과 같은 클라우드 제공자의 **쿠버네티스 클러스터 호스팅** 을 사용할 것인가? 아니면, **자체 클러스터를 호스팅** 할 것인가? - 클러스터가 **온-프레미스 환경** 에 있나? 아니면, **클라우드(IaaS)** 에 있나? 쿠버네티스는 하이브리드 클러스터를 직접 지원하지는 않는다. 대신 여러 클러스터를 설정할 수 있다. - **온-프레미스 환경에 쿠버네티스** 를 구성하는 경우, 어떤 [네트워킹 모델](/ko/docs/concepts/cluster-administration/networking/)이 가장 적합한 지 고려한다. - 쿠버네티스를 **"베어 메탈" 하드웨어** 에서 실행할 것인가? 아니면, **가상 머신(VM)** 에서 실행할 것인가? - - **단지 클러스터만 실행할 것인가?** 아니면, **쿠버네티스 프로젝트 코드를 적극적으로 개발** 하는 것을 기대하는가? 만약 + - **클러스터만 실행할 것인가?** 아니면, **쿠버네티스 프로젝트 코드를 적극적으로 개발** 하는 것을 기대하는가? 만약 후자라면, 활발하게 개발이 진행되고 있는 배포판을 선택한다. 일부 배포판은 바이너리 릴리스만 사용하지만, 더 다양한 선택을 제공한다. - 클러스터를 실행하는 데 필요한 [컴포넌트](/ko/docs/concepts/overview/components/)에 익숙해지자. @@ -41,7 +45,7 @@ no_list: true ## 클러스터 보안 -* [인증서](/ko/docs/concepts/cluster-administration/certificates/)는 다른 툴 체인을 사용하여 인증서를 생성하는 단계를 설명한다. +* [인증서 생성](/ko/docs/tasks/administer-cluster/certificates/)는 다른 툴 체인을 사용하여 인증서를 생성하는 단계를 설명한다. * [쿠버네티스 컨테이너 환경](/ko/docs/concepts/containers/container-environment/)은 쿠버네티스 노드에서 Kubelet으로 관리하는 컨테이너에 대한 환경을 설명한다. diff --git a/content/ko/docs/concepts/cluster-administration/addons.md b/content/ko/docs/concepts/cluster-administration/addons.md index 811f77362632d..7e67dd604f769 100644 --- a/content/ko/docs/concepts/cluster-administration/addons.md +++ b/content/ko/docs/concepts/cluster-administration/addons.md @@ -16,6 +16,7 @@ content_type: concept ## 네트워킹과 네트워크 폴리시 * [ACI](https://www.github.com/noironetworks/aci-containers)는 Cisco ACI로 통합 컨테이너 네트워킹 및 네트워크 보안을 제공한다. +* [Antrea](https://antrea.io/)는 레이어 3/4에서 작동하여 쿠버네티스를 위한 네트워킹 및 보안 서비스를 제공하며, Open vSwitch를 네트워킹 데이터 플레인으로 활용한다. * [Calico](https://docs.projectcalico.org/latest/introduction/)는 네트워킹 및 네트워크 폴리시 제공자이다. Calico는 유연한 네트워킹 옵션을 지원하므로 BGP 유무에 관계없이 비-오버레이 및 오버레이 네트워크를 포함하여 가장 상황에 맞는 옵션을 선택할 수 있다. Calico는 동일한 엔진을 사용하여 서비스 메시 계층(service mesh layer)에서 호스트, 파드 및 (이스티오(istio)와 Envoy를 사용하는 경우) 애플리케이션에 대한 네트워크 폴리시를 적용한다. * [Canal](https://github.com/tigera/canal/tree/master/k8s-install)은 Flannel과 Calico를 통합하여 네트워킹 및 네트워크 폴리시를 제공한다. * [Cilium](https://github.com/cilium/cilium)은 L3 네트워크 및 네트워크 폴리시 플러그인으로 HTTP/API/L7 폴리시를 투명하게 시행할 수 있다. 라우팅 및 오버레이/캡슐화 모드를 모두 지원하며, 다른 CNI 플러그인 위에서 작동할 수 있다. diff --git a/content/ko/docs/concepts/cluster-administration/certificates.md b/content/ko/docs/concepts/cluster-administration/certificates.md index 7b71b9c3444b9..5acb75ea80675 100644 --- a/content/ko/docs/concepts/cluster-administration/certificates.md +++ b/content/ko/docs/concepts/cluster-administration/certificates.md @@ -4,247 +4,6 @@ content_type: concept weight: 20 --- - -클라이언트 인증서로 인증을 사용하는 경우 `easyrsa`, `openssl` 또는 `cfssl` -을 통해 인증서를 수동으로 생성할 수 있다. - - - - - - -### easyrsa - -**easyrsa** 는 클러스터 인증서를 수동으로 생성할 수 있다. - -1. easyrsa3의 패치 버전을 다운로드하여 압축을 풀고, 초기화한다. - - curl -LO https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz - tar xzf easy-rsa.tar.gz - cd easy-rsa-master/easyrsa3 - ./easyrsa init-pki -1. 새로운 인증 기관(CA)을 생성한다. `--batch` 는 자동 모드를 설정한다. - `--req-cn` 는 CA의 새 루트 인증서에 대한 일반 이름(Common Name (CN))을 지정한다. - - ./easyrsa --batch "--req-cn=${MASTER_IP}@`date +%s`" build-ca nopass -1. 서버 인증서와 키를 생성한다. - `--subject-alt-name` 인수는 API 서버에 접근이 가능한 IP와 DNS - 이름을 설정한다. `MASTER_CLUSTER_IP` 는 일반적으로 API 서버와 - 컨트롤러 관리자 컴포넌트에 대해 `--service-cluster-ip-range` 인수로 - 지정된 서비스 CIDR의 첫 번째 IP이다. `--days` 인수는 인증서가 만료되는 - 일 수를 설정하는데 사용된다. - 또한, 아래 샘플은 기본 DNS 이름으로 `cluster.local` 을 - 사용한다고 가정한다. - - ./easyrsa --subject-alt-name="IP:${MASTER_IP},"\ - "IP:${MASTER_CLUSTER_IP},"\ - "DNS:kubernetes,"\ - "DNS:kubernetes.default,"\ - "DNS:kubernetes.default.svc,"\ - "DNS:kubernetes.default.svc.cluster,"\ - "DNS:kubernetes.default.svc.cluster.local" \ - --days=10000 \ - build-server-full server nopass -1. `pki/ca.crt`, `pki/issued/server.crt` 그리고 `pki/private/server.key` 를 디렉터리에 복사한다. -1. API 서버 시작 파라미터에 다음 파라미터를 채우고 추가한다. - - --client-ca-file=/yourdirectory/ca.crt - --tls-cert-file=/yourdirectory/server.crt - --tls-private-key-file=/yourdirectory/server.key - -### openssl - -**openssl** 은 클러스터 인증서를 수동으로 생성할 수 있다. - -1. ca.key를 2048bit로 생성한다. - - openssl genrsa -out ca.key 2048 -1. ca.key에 따라 ca.crt를 생성한다(인증서 유효 기간을 사용하려면 -days를 사용한다). - - openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt -1. server.key를 2048bit로 생성한다. - - openssl genrsa -out server.key 2048 -1. 인증서 서명 요청(Certificate Signing Request (CSR))을 생성하기 위한 설정 파일을 생성한다. - 파일에 저장하기 전에 꺾쇠 괄호(예: ``)로 - 표시된 값을 실제 값으로 대체한다(예: `csr.conf`). - `MASTER_CLUSTER_IP` 의 값은 이전 하위 섹션에서 - 설명한 대로 API 서버의 서비스 클러스터 IP이다. - 또한, 아래 샘플에서는 `cluster.local` 을 기본 DNS 도메인 - 이름으로 사용하고 있다고 가정한다. - - [ req ] - default_bits = 2048 - prompt = no - default_md = sha256 - req_extensions = req_ext - distinguished_name = dn - - [ dn ] - C = <국가(country)> - ST = <도(state)> - L = <시(city)> - O = <조직(organization)> - OU = <조직 단위(organization unit)> - CN = - - [ req_ext ] - subjectAltName = @alt_names - - [ alt_names ] - DNS.1 = kubernetes - DNS.2 = kubernetes.default - DNS.3 = kubernetes.default.svc - DNS.4 = kubernetes.default.svc.cluster - DNS.5 = kubernetes.default.svc.cluster.local - IP.1 = - IP.2 = - - [ v3_ext ] - authorityKeyIdentifier=keyid,issuer:always - basicConstraints=CA:FALSE - keyUsage=keyEncipherment,dataEncipherment - extendedKeyUsage=serverAuth,clientAuth - subjectAltName=@alt_names -1. 설정 파일을 기반으로 인증서 서명 요청을 생성한다. - - openssl req -new -key server.key -out server.csr -config csr.conf -1. ca.key, ca.crt 그리고 server.csr을 사용해서 서버 인증서를 생성한다. - - openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \ - -CAcreateserial -out server.crt -days 10000 \ - -extensions v3_ext -extfile csr.conf -1. 인증서를 본다. - - openssl x509 -noout -text -in ./server.crt - -마지막으로, API 서버 시작 파라미터에 동일한 파라미터를 추가한다. - -### cfssl - -**cfssl** 은 인증서 생성을 위한 또 다른 도구이다. - -1. 아래에 표시된 대로 커맨드 라인 도구를 다운로드하여 압축을 풀고 준비한다. - 사용 중인 하드웨어 아키텍처 및 cfssl 버전에 따라 샘플 - 명령을 조정해야 할 수도 있다. - - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64 -o cfssl - chmod +x cfssl - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64 -o cfssljson - chmod +x cfssljson - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl-certinfo_1.5.0_linux_amd64 -o cfssl-certinfo - chmod +x cfssl-certinfo -1. 아티팩트(artifact)를 보유할 디렉터리를 생성하고 cfssl을 초기화한다. - - mkdir cert - cd cert - ../cfssl print-defaults config > config.json - ../cfssl print-defaults csr > csr.json -1. CA 파일을 생성하기 위한 JSON 설정 파일을 `ca-config.json` 예시와 같이 생성한다. - - { - "signing": { - "default": { - "expiry": "8760h" - }, - "profiles": { - "kubernetes": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "8760h" - } - } - } - } -1. CA 인증서 서명 요청(CSR)을 위한 JSON 설정 파일을 - `ca-csr.json` 예시와 같이 생성한다. 꺾쇠 괄호로 표시된 - 값을 사용하려는 실제 값으로 변경한다. - - { - "CN": "kubernetes", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names":[{ - "C": "<국가(country)>", - "ST": "<도(state)>", - "L": "<시(city)>", - "O": "<조직(organization)>", - "OU": "<조직 단위(organization unit)>" - }] - } -1. CA 키(`ca-key.pem`)와 인증서(`ca.pem`)을 생성한다. - - ../cfssl gencert -initca ca-csr.json | ../cfssljson -bare ca -1. API 서버의 키와 인증서를 생성하기 위한 JSON 구성파일을 - `server-csr.json` 예시와 같이 생성한다. 꺾쇠 괄호 안의 값을 - 사용하려는 실제 값으로 변경한다. `MASTER_CLUSTER_IP` 는 - 이전 하위 섹션에서 설명한 API 서버의 클러스터 IP이다. - 아래 샘플은 기본 DNS 도메인 이름으로 `cluster.local` 을 - 사용한다고 가정한다. - - { - "CN": "kubernetes", - "hosts": [ - "127.0.0.1", - "", - "", - "kubernetes", - "kubernetes.default", - "kubernetes.default.svc", - "kubernetes.default.svc.cluster", - "kubernetes.default.svc.cluster.local" - ], - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [{ - "C": "<국가(country)>", - "ST": "<도(state)>", - "L": "<시(city)>", - "O": "<조직(organization)>", - "OU": "<조직 단위(organization unit)>" - }] - } -1. API 서버 키와 인증서를 생성하면, 기본적으로 - `server-key.pem` 과 `server.pem` 파일에 각각 저장된다. - - ../cfssl gencert -ca=ca.pem -ca-key=ca-key.pem \ - --config=ca-config.json -profile=kubernetes \ - server-csr.json | ../cfssljson -bare server - - -## 자체 서명된 CA 인증서의 배포 - -클라이언트 노드는 자체 서명된 CA 인증서를 유효한 것으로 인식하지 않을 수 있다. -비-프로덕션 디플로이먼트 또는 회사 방화벽 뒤에서 실행되는 -디플로이먼트의 경우, 자체 서명된 CA 인증서를 모든 클라이언트에 -배포하고 유효한 인증서의 로컬 목록을 새로 고칠 수 있다. - -각 클라이언트에서, 다음 작업을 수행한다. - -```bash -sudo cp ca.crt /usr/local/share/ca-certificates/kubernetes.crt -sudo update-ca-certificates -``` - -``` -Updating certificates in /etc/ssl/certs... -1 added, 0 removed; done. -Running hooks in /etc/ca-certificates/update.d.... -done. -``` - -## 인증서 API - -`certificates.k8s.io` API를 사용해서 -[여기](/docs/tasks/tls/managing-tls-in-a-cluster)에 -설명된 대로 인증에 사용할 x509 인증서를 프로비전 할 수 있다. +클러스터를 위한 인증서를 생성하기 위해서는, [인증서](/ko/docs/tasks/administer-cluster/certificates/)를 참고한다. diff --git a/content/ko/docs/concepts/cluster-administration/logging.md b/content/ko/docs/concepts/cluster-administration/logging.md index 48044b046080a..85f3e4efde166 100644 --- a/content/ko/docs/concepts/cluster-administration/logging.md +++ b/content/ko/docs/concepts/cluster-administration/logging.md @@ -1,4 +1,7 @@ --- + + + title: 로깅 아키텍처 content_type: concept weight: 60 @@ -6,23 +9,22 @@ weight: 60 -애플리케이션 로그는 애플리케이션 내부에서 발생하는 상황을 이해하는 데 도움이 된다. 로그는 문제를 디버깅하고 클러스터 활동을 모니터링하는 데 특히 유용하다. 대부분의 최신 애플리케이션에는 일종의 로깅 메커니즘이 있다. 따라서, 대부분의 컨테이너 엔진은 일종의 로깅을 지원하도록 설계되었다. 컨테이너화된 애플리케이션에 가장 쉽고 가장 널리 사용되는 로깅 방법은 표준 출력과 표준 에러 스트림에 작성하는 것이다. +애플리케이션 로그는 애플리케이션 내부에서 발생하는 상황을 이해하는 데 도움이 된다. 로그는 문제를 디버깅하고 클러스터 활동을 모니터링하는 데 특히 유용하다. 대부분의 최신 애플리케이션에는 일종의 로깅 메커니즘이 있다. 마찬가지로, 컨테이너 엔진들도 로깅을 지원하도록 설계되었다. 컨테이너화된 애플리케이션에 가장 쉽고 가장 널리 사용되는 로깅 방법은 표준 출력과 표준 에러 스트림에 작성하는 것이다. -그러나, 일반적으로 컨테이너 엔진이나 런타임에서 제공하는 기본 기능은 완전한 로깅 솔루션으로 충분하지 않다. 예를 들어, 컨테이너가 크래시되거나, 파드가 축출되거나, 노드가 종료된 경우에도 여전히 애플리케이션의 로그에 접근하려고 한다. 따라서, 로그는 노드, 파드 또는 컨테이너와는 독립적으로 별도의 스토리지와 라이프사이클을 가져야 한다. 이 개념을 _클러스터-레벨-로깅_ 이라고 한다. 클러스터-레벨 로깅은 로그를 저장하고, 분석하고, 쿼리하기 위해 별도의 백엔드가 필요하다. 쿠버네티스는 로그 데이터를 위한 네이티브 스토리지 솔루션을 제공하지 않지만, 기존의 많은 로깅 솔루션을 쿠버네티스 클러스터에 통합할 수 있다. +그러나, 일반적으로 컨테이너 엔진이나 런타임에서 제공하는 기본 기능은 완전한 로깅 솔루션으로 충분하지 않다. +예를 들어, 컨테이너가 크래시되거나, 파드가 축출되거나, 노드가 종료된 경우에도 애플리케이션의 로그에 접근하고 싶을 것이다. +클러스터에서 로그는 노드, 파드 또는 컨테이너와는 독립적으로 별도의 스토리지와 라이프사이클을 가져야 한다. 이 개념을 _클러스터-레벨-로깅_ 이라고 한다. -클러스터-레벨 로깅 아키텍처는 로깅 백엔드가 -클러스터 내부 또는 외부에 존재한다고 가정하여 설명한다. 클러스터-레벨 -로깅에 관심이 없는 경우에도, 노드에서 로그를 저장하고 -처리하는 방법에 대한 설명이 여전히 유용할 수 있다. +클러스터-레벨 로깅은 로그를 저장하고, 분석하고, 쿼리하기 위해 별도의 백엔드가 필요하다. 쿠버네티스는 +로그 데이터를 위한 네이티브 스토리지 솔루션을 제공하지 않지만, +쿠버네티스에 통합될 수 있는 기존의 로깅 솔루션이 많이 있다. ## 쿠버네티스의 기본 로깅 -이 섹션에서는, 쿠버네티스에서 표준 출력 스트림으로 데이터를 -출력하는 기본 로깅의 예시를 볼 수 있다. 이 데모에서는 -일부 텍스트를 초당 한 번씩 표준 출력에 쓰는 컨테이너와 함께 -파드 명세를 사용한다. +이 예시는 텍스트를 초당 한 번씩 표준 출력에 쓰는 +컨테이너에 대한 `Pod` 명세를 사용한다. {{< codenew file="debug/counter-pod.yaml" >}} @@ -31,8 +33,10 @@ weight: 60 ```shell kubectl apply -f https://k8s.io/examples/debug/counter-pod.yaml ``` + 출력은 다음과 같다. -``` + +```console pod/counter created ``` @@ -41,69 +45,72 @@ pod/counter created ```shell kubectl logs counter ``` + 출력은 다음과 같다. -``` + +```console 0: Mon Jan 1 00:00:00 UTC 2001 1: Mon Jan 1 00:00:01 UTC 2001 2: Mon Jan 1 00:00:02 UTC 2001 ... ``` -컨테이너가 크래시된 경우, `kubectl logs` 의 `--previous` 플래그를 사용해서 컨테이너의 이전 인스턴스에 대한 로그를 검색할 수 있다. 파드에 여러 컨테이너가 있는 경우, 명령에 컨테이너 이름을 추가하여 접근하려는 컨테이너 로그를 지정해야 한다. 자세한 내용은 [`kubectl logs` 문서](/docs/reference/generated/kubectl/kubectl-commands#logs)를 참조한다. +`kubectl logs --previous` 를 사용해서 컨테이너의 이전 인스턴스에 대한 로그를 검색할 수 있다. 파드에 여러 컨테이너가 있는 경우, 명령에 컨테이너 이름을 추가하여 접근하려는 컨테이너 로그를 지정해야 한다. 자세한 내용은 [`kubectl logs` 문서](/docs/reference/generated/kubectl/kubectl-commands#logs)를 참조한다. ## 노드 레벨에서의 로깅 ![노드 레벨 로깅](/images/docs/user-guide/logging/logging-node-level.png) -컨테이너화된 애플리케이션이 `stdout(표준 출력)` 및 `stderr(표준 에러)` 에 쓰는 모든 것은 컨테이너 엔진에 의해 어딘가에서 처리와 리디렉션 된다. 예를 들어, 도커 컨테이너 엔진은 이 두 스트림을 [로깅 드라이버](https://docs.docker.com/engine/admin/logging/overview)로 리디렉션 한다. 이 드라이버는 쿠버네티스에서 json 형식의 파일에 작성하도록 구성된다. +컨테이너화된 애플리케이션의 `stdout(표준 출력)` 및 `stderr(표준 에러)` 스트림에 의해 생성된 모든 출력은 컨테이너 엔진이 처리 및 리디렉션 한다. +예를 들어, 도커 컨테이너 엔진은 이 두 스트림을 [로깅 드라이버](https://docs.docker.com/engine/admin/logging/overview)로 리디렉션 한다. 이 드라이버는 쿠버네티스에서 JSON 형식의 파일에 작성하도록 구성된다. {{< note >}} -도커 json 로깅 드라이버는 각 라인을 별도의 메시지로 취급한다. 도커 로깅 드라이버를 사용하는 경우, 멀티-라인 메시지를 직접 지원하지 않는다. 로깅 에이전트 레벨 이상에서 멀티-라인 메시지를 처리해야 한다. +도커 JSON 로깅 드라이버는 각 라인을 별도의 메시지로 취급한다. 도커 로깅 드라이버를 사용하는 경우, 멀티-라인 메시지를 직접 지원하지 않는다. 로깅 에이전트 레벨 이상에서 멀티-라인 메시지를 처리해야 한다. {{< /note >}} 기본적으로, 컨테이너가 다시 시작되면, kubelet은 종료된 컨테이너 하나를 로그와 함께 유지한다. 파드가 노드에서 축출되면, 해당하는 모든 컨테이너도 로그와 함께 축출된다. 노드-레벨 로깅에서 중요한 고려 사항은 로그 로테이션을 구현하여, 로그가 노드에서 사용 가능한 모든 스토리지를 사용하지 않도록 하는 것이다. 쿠버네티스는 -현재 로그 로테이션에 대한 의무는 없지만, 디플로이먼트 도구로 +로그 로테이션에 대한 의무는 없지만, 디플로이먼트 도구로 이를 해결하기 위한 솔루션을 설정해야 한다. 예를 들어, `kube-up.sh` 스크립트에 의해 배포된 쿠버네티스 클러스터에는, 매시간 실행되도록 구성된 [`logrotate`](https://linux.die.net/man/8/logrotate) -도구가 있다. 예를 들어, 도커의 `log-opt` 를 사용하여 애플리케이션의 로그를 -자동으로 로테이션을 하도록 컨테이너 런타임을 설정할 수도 있다. -`kube-up.sh` 스크립트에서, 후자의 접근 방식은 GCP의 COS 이미지에 사용되며, -전자의 접근 방식은 다른 환경에서 사용된다. 두 경우 모두, -기본적으로 로그 파일이 10MB를 초과하면 로테이션이 되도록 구성된다. +도구가 있다. 애플리케이션의 로그를 자동으로 +로테이션하도록 컨테이너 런타임을 설정할 수도 있다. + +예를 들어, `kube-up.sh` 가 GCP의 COS 이미지 로깅을 설정하는 방법은 +[`configure-helper` 스크립트](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh)를 통해 +자세히 알 수 있다. -예를 들어, `kube-up.sh` 가 해당 -[스크립트](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh)에서 -GCP의 COS 이미지 로깅을 설정하는 방법에 대한 자세한 정보를 찾을 수 있다. +**CRI 컨테이너 런타임** 을 사용할 때, kubelet은 로그를 로테이션하고 로깅 디렉터리 구조를 관리한다. kubelet은 +이 정보를 CRI 컨테이너 런타임에 전송하고 런타임은 컨테이너 로그를 지정된 위치에 기록한다. 두 개의 kubelet 플래그 `container-log-max-size` 및 `container-log-max-files` 를 사용하여 각 로그 파일의 최대 크기와 각 컨테이너에 허용되는 최대 파일 수를 각각 구성할 수 있다. 기본 로깅 예제에서와 같이 [`kubectl logs`](/docs/reference/generated/kubectl/kubectl-commands#logs)를 실행하면, 노드의 kubelet이 요청을 처리하고 -로그 파일에서 직접 읽은 다음, 응답의 내용을 반환한다. +로그 파일에서 직접 읽는다. kubelet은 로그 파일의 내용을 반환한다. {{< note >}} -현재, 일부 외부 시스템에서 로테이션을 수행한 경우, +만약, 일부 외부 시스템이 로테이션을 수행했거나 CRI 컨테이너 런타임이 사용된 경우, `kubectl logs` 를 통해 최신 로그 파일의 내용만 사용할 수 있다. 예를 들어, 10MB 파일이 있으면, `logrotate` 가 -로테이션을 수행하고 두 개의 파일이 생긴다(크기가 10MB인 파일 하나와 비어있는 파일). -그 후 `kubectl logs` 는 빈 응답을 반환한다. +로테이션을 수행하고 두 개의 파일이 생긴다. (크기가 10MB인 파일 하나와 비어있는 파일) +`kubectl logs` 는 이 예시에서는 빈 응답에 해당하는 최신 로그 파일을 반환한다. {{< /note >}} -[cosConfigureHelper]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh ### 시스템 컴포넌트 로그 시스템 컴포넌트에는 컨테이너에서 실행되는 것과 컨테이너에서 실행되지 않는 두 가지 유형이 있다. 예를 들면 다음과 같다. * 쿠버네티스 스케줄러와 kube-proxy는 컨테이너에서 실행된다. -* Kubelet과 컨테이너 런타임(예: 도커)은 컨테이너에서 실행되지 않는다. +* Kubelet과 컨테이너 런타임은 컨테이너에서 실행되지 않는다. -systemd를 사용하는 시스템에서, kubelet과 컨테이너 런타임은 journald에 작성한다. -systemd를 사용하지 않으면, `/var/log` 디렉터리의 `.log` 파일에 작성한다. -컨테이너 내부의 시스템 컴포넌트는 기본 로깅 메커니즘을 무시하고, -항상 `/var/log` 디렉터리에 기록한다. 그것은 [klog](https://github.com/kubernetes/klog) +systemd를 사용하는 시스템에서는, kubelet과 컨테이너 런타임은 journald에 작성한다. +systemd를 사용하지 않으면, kubelet과 컨테이너 런타임은 `/var/log` 디렉터리의 +`.log` 파일에 작성한다. 컨테이너 내부의 시스템 컴포넌트는 기본 로깅 메커니즘을 무시하고, +항상 `/var/log` 디렉터리에 기록한다. +시스템 컴포넌트는 [klog](https://github.com/kubernetes/klog) 로깅 라이브러리를 사용한다. [로깅에 대한 개발 문서](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)에서 해당 컴포넌트의 로깅 심각도(severity)에 대한 규칙을 찾을 수 있다. @@ -126,13 +133,14 @@ systemd를 사용하지 않으면, `/var/log` 디렉터리의 `.log` 파일에 각 노드에 _노드-레벨 로깅 에이전트_ 를 포함시켜 클러스터-레벨 로깅을 구현할 수 있다. 로깅 에이전트는 로그를 노출하거나 로그를 백엔드로 푸시하는 전용 도구이다. 일반적으로, 로깅 에이전트는 해당 노드의 모든 애플리케이션 컨테이너에서 로그 파일이 있는 디렉터리에 접근할 수 있는 컨테이너이다. -로깅 에이전트는 모든 노드에서 실행해야 하므로, 이를 데몬셋 레플리카, 매니페스트 파드 또는 노드의 전용 네이티브 프로세스로 구현하는 것이 일반적이다. 그러나 후자의 두 가지 접근법은 더 이상 사용되지 않으며 절대 권장하지 않는다. +로깅 에이전트는 모든 노드에서 실행해야 하므로, 에이전트는 +`DaemonSet` 으로 동작시키는 것을 추천한다. -쿠버네티스 클러스터는 노드-레벨 로깅 에이전트를 사용하는 것이 가장 일반적이며 권장되는 방법으로, 이는 노드별 하나의 에이전트만 생성하며, 노드에서 실행되는 애플리케이션을 변경할 필요가 없기 때문이다. 그러나, 노드-레벨 로깅은 _애플리케이션의 표준 출력과 표준 에러에 대해서만 작동한다_ . +노드-레벨 로깅은 노드별 하나의 에이전트만 생성하며, 노드에서 실행되는 애플리케이션에 대한 변경은 필요로 하지 않는다. -쿠버네티스는 로깅 에이전트를 지정하지 않지만, 쿠버네티스 릴리스에는 두 가지 선택적인 로깅 에이전트(Google 클라우드 플랫폼과 함께 사용하기 위한 [스택드라이버(Stackdriver) 로깅](/docs/tasks/debug-application-cluster/logging-stackdriver/)과 [엘라스틱서치(Elasticsearch)](/ko/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/))가 패키지로 함께 제공된다. 전용 문서에서 자세한 정보와 지침을 찾을 수 있다. 두 가지 다 사용자 정의 구성이 된 [fluentd](http://www.fluentd.org/)를 에이전트로써 노드에서 사용한다. +컨테이너는 stdout과 stderr를 동의되지 않은 포맷으로 작성한다. 노드-레벨 에이전트는 이러한 로그를 수집하고 취합을 위해 전달한다. -### 로깅 에이전트와 함께 사이드카 컨테이너 사용 +### 로깅 에이전트와 함께 사이드카 컨테이너 사용 {#sidecar-container-with-logging-agent} 다음 중 한 가지 방법으로 사이드카 컨테이너를 사용할 수 있다. @@ -143,28 +151,27 @@ systemd를 사용하지 않으면, `/var/log` 디렉터리의 `.log` 파일에 ![스트리밍 컨테이너가 있는 사이드카 컨테이너](/images/docs/user-guide/logging/logging-with-streaming-sidecar.png) -사이드카 컨테이너를 자체 `stdout` 및 `stderr` 스트림으로 -스트리밍하면, 각 노드에서 이미 실행 중인 kubelet과 로깅 에이전트를 -활용할 수 있다. 사이드카 컨테이너는 파일, 소켓 -또는 journald에서 로그를 읽는다. 각 개별 사이드카 컨테이너는 자체 `stdout` -또는 `stderr` 스트림에 로그를 출력한다. +사이드카 컨테이너가 자체 `stdout` 및 `stderr` 스트림으로 +쓰도록 하면, 각 노드에서 이미 실행 중인 kubelet과 로깅 에이전트를 +활용할 수 있다. 사이드카 컨테이너는 파일, 소켓 또는 journald에서 로그를 읽는다. +각 사이드카 컨테이너는 자체 `stdout` 또는 `stderr` 스트림에 로그를 출력한다. 이 방법을 사용하면 애플리케이션의 다른 부분에서 여러 로그 스트림을 분리할 수 ​​있고, 이 중 일부는 `stdout` 또는 `stderr` 에 작성하기 위한 지원이 부족할 수 있다. 로그를 리디렉션하는 로직은 -미미하기 때문에, 큰 오버헤드가 거의 없다. 또한, +최소화되어 있기 때문에, 심각한 오버헤드가 아니다. 또한, `stdout` 및 `stderr` 가 kubelet에서 처리되므로, `kubectl logs` 와 같은 빌트인 도구를 사용할 수 있다. -다음의 예를 고려해보자. 파드는 단일 컨테이너를 실행하고, 컨테이너는 -서로 다른 두 가지 형식을 사용하여, 서로 다른 두 개의 로그 파일에 기록한다. 파드에 대한 +예를 들어, 파드는 단일 컨테이너를 실행하고, 컨테이너는 +서로 다른 두 가지 형식을 사용하여 서로 다른 두 개의 로그 파일에 기록한다. 파드에 대한 구성 파일은 다음과 같다. {{< codenew file="admin/logging/two-files-counter-pod.yaml" >}} 두 컴포넌트를 컨테이너의 `stdout` 스트림으로 리디렉션한 경우에도, 동일한 로그 -스트림에 서로 다른 형식의 로그 항목을 갖는 것은 -알아보기 힘들다. 대신, 두 개의 사이드카 컨테이너를 도입할 수 있다. 각 사이드카 +스트림에 서로 다른 형식의 로그 항목을 작성하는 것은 +추천하지 않는다. 대신, 두 개의 사이드카 컨테이너를 생성할 수 있다. 각 사이드카 컨테이너는 공유 볼륨에서 특정 로그 파일을 테일(tail)한 다음 로그를 자체 `stdout` 스트림으로 리디렉션할 수 있다. @@ -178,7 +185,10 @@ systemd를 사용하지 않으면, `/var/log` 디렉터리의 `.log` 파일에 ```shell kubectl logs counter count-log-1 ``` -``` + +출력은 다음과 같다. + +```console 0: Mon Jan 1 00:00:00 UTC 2001 1: Mon Jan 1 00:00:01 UTC 2001 2: Mon Jan 1 00:00:02 UTC 2001 @@ -188,7 +198,10 @@ kubectl logs counter count-log-1 ```shell kubectl logs counter count-log-2 ``` -``` + +출력은 다음과 같다. + +```console Mon Jan 1 00:00:00 UTC 2001 INFO 0 Mon Jan 1 00:00:01 UTC 2001 INFO 1 Mon Jan 1 00:00:02 UTC 2001 INFO 2 @@ -204,11 +217,10 @@ Mon Jan 1 00:00:02 UTC 2001 INFO 2 `stdout` 으로 스트리밍하면 디스크 사용량은 두 배가 될 수 있다. 단일 파일에 쓰는 애플리케이션이 있는 경우, 일반적으로 스트리밍 사이드카 컨테이너 방식을 구현하는 대신 `/dev/stdout` 을 대상으로 -설정하는 것이 더 낫다. +설정하는 것을 추천한다. 사이드카 컨테이너를 사용하여 애플리케이션 자체에서 로테이션할 수 없는 -로그 파일을 로테이션할 수도 있다. 이 방법의 예로는 -정기적으로 logrotate를 실행하는 작은 컨테이너를 두는 것이다. +로그 파일을 로테이션할 수도 있다. 이 방법의 예시는 정기적으로 `logrotate` 를 실행하는 작은 컨테이너를 두는 것이다. 그러나, `stdout` 및 `stderr` 을 직접 사용하고 로테이션과 유지 정책을 kubelet에 두는 것이 권장된다. @@ -223,21 +235,17 @@ Mon Jan 1 00:00:02 UTC 2001 INFO 2 {{< note >}} 사이드카 컨테이너에서 로깅 에이전트를 사용하면 상당한 리소스 소비로 이어질 수 있다. 게다가, kubelet에 의해 -제어되지 않기 때문에, `kubectl logs` 명령을 사용하여 해당 로그에 +제어되지 않기 때문에, `kubectl logs` 를 사용하여 해당 로그에 접근할 수 없다. {{< /note >}} -예를 들어, 로깅 에이전트로 fluentd를 사용하는 [스택드라이버](/docs/tasks/debug-application-cluster/logging-stackdriver/)를 -사용할 수 있다. 여기에 이 방법을 구현하는 데 사용할 수 있는 -두 가지 구성 파일이 있다. 첫 번째 파일에는 -fluentd를 구성하기 위한 [컨피그맵](/docs/tasks/configure-pod-container/configure-pod-configmap/)이 포함되어 있다. +여기에 로깅 에이전트가 포함된 사이드카 컨테이너를 구현하는 데 사용할 수 있는 두 가지 구성 파일이 있다. 첫 번째 파일에는 +fluentd를 구성하기 위한 [`ConfigMap`](/docs/tasks/configure-pod-container/configure-pod-configmap/)이 포함되어 있다. {{< codenew file="admin/logging/fluentd-sidecar-config.yaml" >}} {{< note >}} -fluentd의 구성은 이 문서의 범위를 벗어난다. -fluentd를 구성하는 것에 대한 자세한 내용은, -[공식 fluentd 문서](https://docs.fluentd.org/)를 참고한다. +fluentd를 구성하는 것에 대한 자세한 내용은, [fluentd 문서](https://docs.fluentd.org/)를 참고한다. {{< /note >}} 두 번째 파일은 fluentd가 실행되는 사이드카 컨테이너가 있는 파드를 설명한다. @@ -245,16 +253,10 @@ fluentd를 구성하는 것에 대한 자세한 내용은, {{< codenew file="admin/logging/two-files-counter-pod-agent-sidecar.yaml" >}} -얼마 후 스택드라이버 인터페이스에서 로그 메시지를 찾을 수 있다. - -이것은 단지 예시일 뿐이며 실제로 애플리케이션 컨테이너 내의 -모든 소스에서 읽은 fluentd를 로깅 에이전트로 대체할 수 있다는 것을 -기억한다. +이 예시 구성에서, 사용자는 애플리케이션 컨테이너 내의 모든 소스을 읽는 fluentd를 다른 로깅 에이전트로 대체할 수 있다. ### 애플리케이션에서 직접 로그 노출 ![애플리케이션에서 직접 로그 노출](/images/docs/user-guide/logging/logging-from-application.png) -모든 애플리케이션에서 직접 로그를 노출하거나 푸시하여 클러스터-레벨 로깅을 -구현할 수 있다. 그러나, 이러한 로깅 메커니즘의 구현은 -쿠버네티스의 범위를 벗어난다. +모든 애플리케이션에서 직접 로그를 노출하거나 푸시하는 클러스터-로깅은 쿠버네티스의 범위를 벗어난다. diff --git a/content/ko/docs/concepts/cluster-administration/manage-deployment.md b/content/ko/docs/concepts/cluster-administration/manage-deployment.md index be5befdbd38d7..abcc4c2cd58e0 100644 --- a/content/ko/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/ko/docs/concepts/cluster-administration/manage-deployment.md @@ -1,4 +1,6 @@ --- + + title: 리소스 관리 content_type: concept weight: 40 @@ -43,9 +45,9 @@ kubectl apply -f https://k8s.io/examples/application/nginx/ `kubectl` 은 접미사가 `.yaml`, `.yml` 또는 `.json` 인 파일을 읽는다. -동일한 마이크로서비스 또는 애플리케이션 티어(tier)와 관련된 리소스를 동일한 파일에 배치하고, 애플리케이션과 연관된 모든 파일을 동일한 디렉터리에 그룹화하는 것이 좋다. 애플리케이션의 티어가 DNS를 사용하여 서로 바인딩되면, 스택의 모든 컴포넌트를 일괄로 배포할 수 있다. +동일한 마이크로서비스 또는 애플리케이션 티어(tier)와 관련된 리소스를 동일한 파일에 배치하고, 애플리케이션과 연관된 모든 파일을 동일한 디렉터리에 그룹화하는 것이 좋다. 애플리케이션의 티어가 DNS를 사용하여 서로 바인딩되면, 스택의 모든 컴포넌트를 함께 배포할 수 있다. -URL을 구성 소스로 지정할 수도 있다. 이는 github에 체크인된 구성 파일에서 직접 배포하는 데 편리하다. +URL을 구성 소스로 지정할 수도 있다. 이는 GitHub에 체크인된 구성 파일에서 직접 배포하는 데 편리하다. ```shell kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml @@ -68,7 +70,7 @@ deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` -두 개의 리소스만 있는 경우, 리소스/이름 구문을 사용하여 커맨드 라인에서 둘다 모두 쉽게 지정할 수도 있다. +두 개의 리소스가 있는 경우, 리소스/이름 구문을 사용하여 커맨드 라인에서 둘다 모두 지정할 수도 있다. ```shell kubectl delete deployments/my-nginx services/my-nginx-svc @@ -85,10 +87,11 @@ deployment.apps "my-nginx" deleted service "my-nginx-svc" deleted ``` -`kubectl` 은 입력을 받아들이는 것과 동일한 구문으로 리소스 이름을 출력하므로, `$()` 또는 `xargs` 를 사용하여 작업을 쉽게 연결할 수 있다. +`kubectl` 은 입력을 받아들이는 것과 동일한 구문으로 리소스 이름을 출력하므로, `$()` 또는 `xargs` 를 사용하여 작업을 연결할 수 있다. ```shell kubectl get $(kubectl create -f docs/concepts/cluster-administration/nginx/ -o name | grep service) +kubectl create -f docs/concepts/cluster-administration/nginx/ -o name | grep service | xargs -i kubectl get {} ``` ```shell @@ -262,7 +265,7 @@ guestbook-redis-slave-qgazl 1/1 Running 0 3m ## 레이블 업데이트 새로운 리소스를 만들기 전에 기존 파드 및 기타 리소스의 레이블을 다시 지정해야 하는 경우가 있다. 이것은 `kubectl label` 로 수행할 수 있다. -예를 들어, 모든 nginx 파드에 프론트엔드 티어로 레이블을 지정하려면, 간단히 다음과 같이 실행한다. +예를 들어, 모든 nginx 파드에 프론트엔드 티어로 레이블을 지정하려면, 다음과 같이 실행한다. ```shell kubectl label pods -l app=nginx tier=fe @@ -275,7 +278,7 @@ pod/my-nginx-2035384211-u3t6x labeled ``` 먼저 "app=nginx" 레이블이 있는 모든 파드를 필터링한 다음, "tier=fe" 레이블을 지정한다. -방금 레이블을 지정한 파드를 보려면, 다음을 실행한다. +레이블을 지정한 파드를 보려면, 다음을 실행한다. ```shell kubectl get pods -l app=nginx -L tier @@ -299,6 +302,7 @@ my-nginx-2035384211-u3t6x 1/1 Running 0 23m fe kubectl annotate pods my-nginx-v4-9gw19 description='my frontend running nginx' kubectl get pods my-nginx-v4-9gw19 -o yaml ``` + ```shell apiVersion: v1 kind: pod @@ -312,11 +316,12 @@ metadata: ## 애플리케이션 스케일링 -애플리케이션의 로드가 증가하거나 축소되면, `kubectl` 을 사용하여 쉽게 스케일링할 수 있다. 예를 들어, nginx 레플리카 수를 3에서 1로 줄이려면, 다음을 수행한다. +애플리케이션의 로드가 증가하거나 축소되면, `kubectl` 을 사용하여 애플리케이션을 스케일링한다. 예를 들어, nginx 레플리카 수를 3에서 1로 줄이려면, 다음을 수행한다. ```shell kubectl scale deployment/my-nginx --replicas=1 ``` + ```shell deployment.apps/my-nginx scaled ``` @@ -326,6 +331,7 @@ deployment.apps/my-nginx scaled ```shell kubectl get pods -l app=nginx ``` + ```shell NAME READY STATUS RESTARTS AGE my-nginx-2035384211-j5fhi 1/1 Running 0 30m @@ -336,6 +342,7 @@ my-nginx-2035384211-j5fhi 1/1 Running 0 30m ```shell kubectl autoscale deployment/my-nginx --min=1 --max=3 ``` + ```shell horizontalpodautoscaler.autoscaling/my-nginx autoscaled ``` @@ -404,11 +411,12 @@ JSON 병합 패치 그리고 전략적 병합 패치를 지원한다. ## 파괴적(disruptive) 업데이트 -경우에 따라, 한 번 초기화하면 업데이트할 수 없는 리소스 필드를 업데이트해야 하거나, 디플로이먼트에서 생성된 손상된 파드를 고치는 등의 재귀적 변경을 즉시 원할 수도 있다. 이러한 필드를 변경하려면, `replace --force` 를 사용하여 리소스를 삭제하고 다시 만든다. 이 경우, 원래 구성 파일을 간단히 수정할 수 있다. +경우에 따라, 한 번 초기화하면 업데이트할 수 없는 리소스 필드를 업데이트해야 하거나, 디플로이먼트에서 생성된 손상된 파드를 고치는 등의 재귀적 변경을 즉시 원할 수도 있다. 이러한 필드를 변경하려면, `replace --force` 를 사용하여 리소스를 삭제하고 다시 만든다. 이 경우, 원래 구성 파일을 수정할 수 있다. ```shell kubectl replace -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml --force ``` + ```shell deployment.apps/my-nginx deleted deployment.apps/my-nginx replaced @@ -425,19 +433,22 @@ nginx 1.14.2 버전을 실행한다고 가정해 보겠다. ```shell kubectl create deployment my-nginx --image=nginx:1.14.2 ``` + ```shell deployment.apps/my-nginx created ``` 3개의 레플리카를 포함한다(이전과 새 개정판이 공존할 수 있음). + ```shell kubectl scale deployment my-nginx --current-replicas=1 --replicas=3 ``` + ``` deployment.apps/my-nginx scaled ``` -1.16.1 버전으로 업데이트하려면, 위에서 배운 kubectl 명령을 사용하여 `.spec.template.spec.containers[0].image` 를 `nginx:1.14.2` 에서 `nginx:1.16.1` 로 간단히 변경한다. +1.16.1 버전으로 업데이트하려면, 위에서 배운 kubectl 명령을 사용하여 `.spec.template.spec.containers[0].image` 를 `nginx:1.14.2` 에서 `nginx:1.16.1` 로 변경한다. ```shell kubectl edit deployment/my-nginx @@ -452,5 +463,3 @@ kubectl edit deployment/my-nginx - [애플리케이션 검사 및 디버깅에 `kubectl` 을 사용하는 방법](/docs/tasks/debug-application-cluster/debug-application-introspection/)에 대해 알아본다. - [구성 모범 사례 및 팁](/ko/docs/concepts/configuration/overview/)을 참고한다. - - diff --git a/content/ko/docs/concepts/cluster-administration/proxies.md b/content/ko/docs/concepts/cluster-administration/proxies.md index df431575785a1..ab1f6611fd770 100644 --- a/content/ko/docs/concepts/cluster-administration/proxies.md +++ b/content/ko/docs/concepts/cluster-administration/proxies.md @@ -39,7 +39,7 @@ weight: 90 - UDP, TCP, SCTP를 이용하여 프락시 한다. - HTTP는 이해하지 못한다. - 로드 밸런싱을 제공한다. - - 단지 서비스에 도달하는데 사용한다. + - 서비스에 도달하는데만 사용한다. 1. API 서버 앞단의 프락시/로드밸런서 @@ -61,7 +61,3 @@ weight: 90 ## 요청을 리다이렉트하기 프락시는 리다이렉트 기능을 대체했다. 리다이렉트는 더 이상 사용하지 않는다. - - - - diff --git a/content/ko/docs/concepts/cluster-administration/system-metrics.md b/content/ko/docs/concepts/cluster-administration/system-metrics.md index 03eb904ee3889..737c1ded252bc 100644 --- a/content/ko/docs/concepts/cluster-administration/system-metrics.md +++ b/content/ko/docs/concepts/cluster-administration/system-metrics.md @@ -1,9 +1,5 @@ --- -title: 쿠버네티스 컨트롤 플레인에 대한 메트릭 - - - - +title: 쿠버네티스 시스템 컴포넌트에 대한 메트릭 content_type: concept weight: 60 --- @@ -12,7 +8,7 @@ weight: 60 시스템 컴포넌트 메트릭으로 내부에서 발생하는 상황을 더 잘 파악할 수 있다. 메트릭은 대시보드와 경고를 만드는 데 특히 유용하다. -쿠버네티스 컨트롤 플레인의 메트릭은 [프로메테우스 형식](https://prometheus.io/docs/instrumenting/exposition_formats/)으로 출력된다. +쿠버네티스 컴포넌트의 메트릭은 [프로메테우스 형식](https://prometheus.io/docs/instrumenting/exposition_formats/)으로 출력된다. 이 형식은 구조화된 평문으로 디자인되어 있으므로 사람과 기계 모두가 쉽게 읽을 수 있다. @@ -36,7 +32,7 @@ weight: 60 클러스터가 {{< glossary_tooltip term_id="rbac" text="RBAC" >}}을 사용하는 경우, 메트릭을 읽으려면 `/metrics` 에 접근을 허용하는 클러스터롤(ClusterRole)을 가지는 사용자, 그룹 또는 서비스어카운트(ServiceAccount)를 통한 권한이 필요하다. 예를 들면, 다음과 같다. -``` +```yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -134,7 +130,7 @@ cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} ### kube-scheduler 메트릭 -{{< feature-state for_k8s_version="v1.20" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} 스케줄러는 실행 중인 모든 파드의 요청(request)된 리소스와 요구되는 제한(limit)을 보고하는 선택적 메트릭을 노출한다. 이러한 메트릭은 용량 계획(capacity planning) 대시보드를 구축하고, 현재 또는 과거 스케줄링 제한을 평가하고, 리소스 부족으로 스케줄할 수 없는 워크로드를 빠르게 식별하고, 실제 사용량을 파드의 요청과 비교하는 데 사용할 수 있다. @@ -152,9 +148,26 @@ kube-scheduler는 각 파드에 대해 구성된 리소스 [요청과 제한](/k 메트릭은 HTTP 엔드포인트 `/metrics/resources`에 노출되며 스케줄러의 `/metrics` 엔드포인트 와 동일한 인증이 필요하다. 이러한 알파 수준의 메트릭을 노출시키려면 `--show-hidden-metrics-for-version=1.20` 플래그를 사용해야 한다. +## 메트릭 비활성화 + +커맨드 라인 플래그 `--disabled-metrics` 를 통해 메트릭을 명시적으로 끌 수 있다. 이 방법이 필요한 이유는 메트릭이 성능 문제를 일으키는 경우을 예로 들 수 있다. 입력값은 비활성화되는 메트릭 목록이다(예: `--disabled-metrics=metric1,metric2`). + +## 메트릭 카디널리티(cardinality) 적용 + +제한되지 않은 차원의 메트릭은 계측하는 컴포넌트에서 메모리 문제를 일으킬 수 있다. 리소스 사용을 제한하려면, `--allow-label-value` 커맨드 라인 옵션을 사용하여 메트릭 항목에 대한 레이블 값의 허용 목록(allow-list)을 동적으로 구성한다. + +알파 단계에서, 플래그는 메트릭 레이블 허용 목록으로 일련의 매핑만 가져올 수 있다. +각 매핑은 `,=` 형식이다. 여기서 +`` 는 허용되는 레이블 이름의 쉼표로 구분된 목록이다. + +전체 형식은 다음과 같다. +`--allow-label-value ,=', ...', ,=', ...', ...`. + +예시는 다음과 같다. +`--allow-label-value number_count_metric,odd_number='1,3,5', number_count_metric,even_number='2,4,6', date_gauge_metric,weekend='Saturday,Sunday'` + ## {{% heading "whatsnext" %}} * 메트릭에 대한 [프로메테우스 텍스트 형식](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)에 대해 읽어본다 -* [안정적인 쿠버네티스 메트릭](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) 목록을 참고한다 * [쿠버네티스 사용 중단 정책](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior)에 대해 읽어본다 diff --git a/content/ko/docs/concepts/configuration/configmap.md b/content/ko/docs/concepts/configuration/configmap.md index 73fcf62b982bf..841feb8fb34fe 100644 --- a/content/ko/docs/concepts/configuration/configmap.md +++ b/content/ko/docs/concepts/configuration/configmap.md @@ -42,8 +42,8 @@ API [오브젝트](/ko/docs/concepts/overview/working-with-objects/kubernetes-ob `spec` 이 있는 대부분의 쿠버네티스 오브젝트와 달리, 컨피그맵에는 `data` 및 `binaryData` 필드가 있다. 이러한 필드는 키-값 쌍을 값으로 허용한다. `data` 필드와 `binaryData` 는 모두 선택 사항이다. `data` 필드는 -UTF-8 바이트 시퀀스를 포함하도록 설계되었으며 `binaryData` 필드는 바이너리 데이터를 -포함하도록 설계되었다. +UTF-8 바이트 시퀀스를 포함하도록 설계되었으며 `binaryData` 필드는 바이너리 +데이터를 base64로 인코딩된 문자열로 포함하도록 설계되었다. 컨피그맵의 이름은 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. @@ -223,7 +223,7 @@ spec: 현재 볼륨에서 사용된 컨피그맵이 업데이트되면, 프로젝션된 키도 마찬가지로 업데이트된다. kubelet은 모든 주기적인 동기화에서 마운트된 컨피그맵이 최신 상태인지 확인한다. 그러나, kubelet은 로컬 캐시를 사용해서 컨피그맵의 현재 값을 가져온다. -캐시 유형은 [KubeletConfiguration 구조체](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)의 +캐시 유형은 [KubeletConfiguration 구조체](/docs/reference/config-api/kubelet-config.v1beta1/)의 `ConfigMapAndSecretChangeDetectionStrategy` 필드를 사용해서 구성할 수 있다. 컨피그맵은 watch(기본값), ttl 기반 또는 API 서버로 직접 모든 요청을 리디렉션할 수 있다. @@ -233,11 +233,12 @@ kubelet은 모든 주기적인 동기화에서 마운트된 컨피그맵이 최 지연을 지켜보거나, 캐시의 ttl 또는 0에 상응함). 환경 변수로 사용되는 컨피그맵은 자동으로 업데이트되지 않으며 파드를 다시 시작해야 한다. + ## 변경할 수 없는(immutable) 컨피그맵 {#configmap-immutable} -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -쿠버네티스 베타 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 개별 시크릿과 +쿠버네티스 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 개별 시크릿과 컨피그맵을 변경할 수 없는 것으로 설정하는 옵션을 제공한다. 컨피그맵을 광범위하게 사용하는 클러스터(최소 수만 개의 고유한 컨피그맵이 파드에 마운트)의 경우 데이터 변경을 방지하면 다음과 같은 이점이 있다. diff --git a/content/ko/docs/concepts/configuration/manage-resources-containers.md b/content/ko/docs/concepts/configuration/manage-resources-containers.md index 8f499fbc6a12b..ccd3ee929060b 100644 --- a/content/ko/docs/concepts/configuration/manage-resources-containers.md +++ b/content/ko/docs/concepts/configuration/manage-resources-containers.md @@ -21,9 +21,6 @@ feature: 컨테이너가 사용할 수 있도록 해당 시스템 리소스의 최소 _요청_ 량을 예약한다. - - - ## 요청 및 제한 @@ -72,7 +69,7 @@ Huge page는 노드 커널이 기본 페이지 크기보다 훨씬 큰 메모리 이것은 `memory` 및 `cpu` 리소스와는 다르다. {{< /note >}} -CPU와 메모리를 통칭하여 *컴퓨트 리소스* 또는 그냥 *리소스* 라고 한다. 컴퓨트 +CPU와 메모리를 통칭하여 *컴퓨트 리소스* 또는 *리소스* 라고 한다. 컴퓨트 리소스는 요청, 할당 및 소비될 수 있는 측정 가능한 수량이다. 이것은 [API 리소스](/ko/docs/concepts/overview/kubernetes-api/)와는 다르다. 파드 및 @@ -441,7 +438,9 @@ kubelet은 각 `emptyDir` 볼륨, 컨테이너 로그 디렉터리 및 쓰기 프로젝트 쿼터를 사용하려면, 다음을 수행해야 한다. -* kubelet 구성에서 `LocalStorageCapacityIsolationFSQuotaMonitoring=true` +* [kubelet 구성](/docs/reference/config-api/kubelet-config.v1beta1/)의 + `featureGates` 필드 또는 `--feature-gates` 커맨드 라인 플래그를 사용하여 + `LocalStorageCapacityIsolationFSQuotaMonitoring=true` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화한다. @@ -449,6 +448,7 @@ kubelet은 각 `emptyDir` 볼륨, 컨테이너 로그 디렉터리 및 쓰기 프로젝트 쿼터가 활성화되어 있는지 확인한다. 모든 XFS 파일시스템은 프로젝트 쿼터를 지원한다. ext4 파일시스템의 경우, 파일시스템이 마운트되지 않은 상태에서 프로젝트 쿼터 추적 기능을 활성화해야 한다. + ```bash # ext4인 /dev/block-device가 마운트되지 않은 경우 sudo tune2fs -O project -Q prjquota /dev/block-device @@ -518,9 +518,8 @@ JSON-Pointer로 해석된다. 더 자세한 내용은, 클러스터-레벨의 확장된 리소스는 노드에 연결되지 않는다. 이들은 일반적으로 리소스 소비와 리소스 쿼터를 처리하는 스케줄러 익스텐더(extender)에 의해 관리된다. -[스케줄러 정책 구성](https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/scheduler/api/v1/types.go#L31)에서 -스케줄러 익스텐더가 -처리하는 확장된 리소스를 지정할 수 있다. +[스케줄러 정책 구성](/docs/reference/config-api/kube-scheduler-policy-config.v1/)에서 +스케줄러 익스텐더가 처리하는 확장된 리소스를 지정할 수 있다. **예제:** @@ -743,23 +742,13 @@ LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-0 컨테이너가 `reason:OOM Killed`(`OOM` 은 메모리 부족(Out Of Memory)의 약자) 때문에 종료된 것을 알 수 있다. - - - - - ## {{% heading "whatsnext" %}} - * [컨테이너와 파드에 메모리 리소스를 할당](/ko/docs/tasks/configure-pod-container/assign-memory-resource/)하는 핸즈온 경험을 해보자. - * [컨테이너와 파드에 CPU 리소스를 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/)하는 핸즈온 경험을 해보자. - * 요청과 제한의 차이점에 대한 자세한 내용은, [리소스 QoS](https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md)를 참조한다. - * [컨테이너](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) API 레퍼런스 읽어보기 - * [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) API 레퍼런스 읽어보기 - * XFS의 [프로젝트 쿼터](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html)에 대해 읽어보기 +* [kube-scheduler 정책 레퍼런스 (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/)에 대해 더 읽어보기 diff --git a/content/ko/docs/concepts/configuration/overview.md b/content/ko/docs/concepts/configuration/overview.md index 6bcec1d6c0d38..17fd61c6a667d 100644 --- a/content/ko/docs/concepts/configuration/overview.md +++ b/content/ko/docs/concepts/configuration/overview.md @@ -59,13 +59,13 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 - `hostPort`와 같은 이유로, `hostNetwork`를 사용하는 것을 피한다. -- `kube-proxy` 로드 밸런싱이 필요하지 않을 때, 쉬운 서비스 발견을 위해 [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스)(`ClusterIP`의 값을 `None`으로 가지는)를 사용한다. +- `kube-proxy` 로드 밸런싱이 필요하지 않을 때, 서비스 발견을 위해 [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스)(`ClusterIP`의 값을 `None`으로 가지는)를 사용한다. ## 레이블 사용하기 - `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`처럼 애플리케이션이나 디플로이먼트의 __속성에 대한 의미__ 를 식별하는 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)을 정의해 사용한다. 다른 리소스를 위해 적절한 파드를 선택하는 용도로 이러한 레이블을 이용할 수 있다. 예를 들어, 모든 `tier: frontend` 파드를 선택하거나, `app: myapp`의 모든 `phase: test` 컴포넌트를 선택하는 서비스를 생각해 볼 수 있다. 이 접근 방법의 예시는 [방명록](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) 앱을 참고한다. -릴리스에 특정되는 레이블을 서비스의 셀렉터에서 생략함으로써 여러 개의 디플로이먼트에 걸치는 서비스를 생성할 수 있다. [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/)는 생성되어 있는 서비스를 다운타임 없이 수정하기 쉽도록 만든다. +릴리스에 특정되는 레이블을 서비스의 셀렉터에서 생략함으로써 여러 개의 디플로이먼트에 걸치는 서비스를 생성할 수 있다. 동작 중인 서비스를 다운타임 없이 갱신하려면, [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/)를 사용한다. 오브젝트의 의도한 상태는 디플로이먼트에 의해 기술되며, 만약 그 스펙에 대한 변화가 _적용될_ 경우, 디플로이먼트 컨트롤러는 일정한 비율로 실제 상태를 의도한 상태로 변화시킨다. @@ -81,9 +81,9 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 - `imagePullPolicy: Always`: kubelet이 컨테이너를 시작할 때마다, kubelet은 컨테이너 이미지 레지스트리를 쿼리해서 이름을 이미지 다이제스트(digest)로 확인한다. kubelet에 정확한 다이제스트가 저장된 컨테이너 이미지가 로컬로 캐시된 경우, kubelet은 캐시된 이미지를 사용한다. 그렇지 않으면, kubelet은 확인한 다이제스트를 사용해서 이미지를 다운로드(pull)하고, 해당 이미지를 사용해서 컨테이너를 시작한다. -- `imagePullPolicy`가 생략되어 있고, 이미지 태그가 `:latest` 이거나 생략되어 있다면 `Always`가 적용된다. +- `imagePullPolicy`가 생략되어 있고, 이미지 태그가 `:latest` 이거나 생략되어 있다면 `imagePullPolicy`는 자동으로 `Always`가 적용된다. 태그 값을 변경하더라도 이 값은 `IfNotPresent`로 업데이트 되지 _않는다_. -- `imagePullPolicy`가 생략되어 있고, 이미지 태그가 존재하지만 `:latest`가 아니라면 `IfNotPresent`가 적용된다. +- `imagePullPolicy`가 생략되어 있고, 이미지 태그가 존재하지만 `:latest`가 아니라면 `imagePullPolicy`는 자동으로 `IfNotPresent`가 적용된다. 태그가 나중에 제거되거나 `:latest`로 변경되더라도 `Always`로 업데이트 되지 _않는다_. - `imagePullPolicy: Never`: 이미지가 로컬에 존재한다고 가정한다. 이미지를 풀(Pull) 하기 위해 시도하지 않는다. @@ -96,7 +96,7 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 {{< /note >}} {{< note >}} -기반이 되는 이미지 제공자의 캐시 방법은 `imagePullPolicy: Always`를 효율적으로 만든다. 예를 들어, 도커에서는 이미지가 이미 존재한다면 풀(Pull) 시도는 빠르게 진행되는데, 이는 모든 이미지 레이어가 캐시되어 있으며 이미지 다운로드가 필요하지 않기 때문이다. +레지스트리가 안정적으로 동작하는 상황에서는, `imagePullPolicy: Always`로 설정되어 있더라도 기반 이미지 관리 도구의 캐싱 정책을 통해 이미지 풀(pull) 작업의 효율성을 높일 수 있다. 예를 들어, 도커를 사용하는 경우 이미지가 이미 존재한다면 풀(Pull) 시도는 빠르게 진행되는데, 이는 모든 이미지 레이어가 캐시되어 있으며 이미지 다운로드가 필요하지 않기 때문이다. {{< /note >}} ## kubectl 사용하기 diff --git a/content/ko/docs/concepts/configuration/secret.md b/content/ko/docs/concepts/configuration/secret.md index e5466b8dec2ce..a4544397d7a0f 100644 --- a/content/ko/docs/concepts/configuration/secret.md +++ b/content/ko/docs/concepts/configuration/secret.md @@ -1,4 +1,6 @@ --- + + title: 시크릿(Secret) content_type: concept feature: @@ -22,6 +24,16 @@ weight: 30 명세나 이미지에 포함될 수 있다. 사용자는 시크릿을 만들 수 있고 시스템도 일부 시크릿을 만들 수 있다. +{{< caution >}} +쿠버네티스 시크릿은 기본적으로 암호화되지 않은 base64 인코딩 문자열로 저장된다. +기본적으로 API 액세스 권한이 있는 모든 사용자 또는 쿠버네티스의 기본 데이터 저장소 etcd에 +액세스할 수 있는 모든 사용자가 일반 텍스트로 검색할 수 있다. +시크릿을 안전하게 사용하려면 (최소한) 다음과 같이 하는 것이 좋다. + +1. 시크릿에 대한 [암호화 활성화](/docs/tasks/administer-cluster/encrypt-data/). +2. 시크릿 읽기 및 쓰기를 제한하는 [RBAC 규칙 활성화 또는 구성](/docs/reference/access-authn-authz/authorization/). 파드를 만들 권한이 있는 모든 사용자는 시크릿을 암묵적으로 얻을 수 있다. +{{< /caution >}} + ## 시크릿 개요 @@ -97,7 +109,7 @@ empty-secret Opaque 0 2m6s ``` 해당 `DATA` 열은 시크릿에 저장된 데이터 아이템의 수를 보여준다. -이 경우, `0` 은 비어 있는 시크릿을 방금 하나 생성하였다는 것을 의미한다. +이 경우, `0` 은 비어 있는 시크릿을 하나 생성하였다는 것을 의미한다. ### 서비스 어카운트 토큰 시크릿 @@ -269,6 +281,13 @@ SSH 인증 시크릿 타입은 사용자 편의만을 위해서 제공된다. API 서버는 요구되는 키가 시크릿 구성에서 제공되고 있는지 검증도 한다. +{{< caution >}} +SSH 개인 키는 자체적으로 SSH 클라이언트와 호스트 서버 간에 신뢰할 수 있는 통신을 +설정하지 않는다. 컨피그맵(ConfigMap)에 추가된 `known_hosts` 파일과 같은 +"중간자(man in the middle)" 공격을 완화하려면 신뢰를 설정하는 +2차 수단이 필요하다. +{{< /caution >}} + ### TLS 시크릿 쿠버네티스는 보통 TLS를 위해 사용되는 인증서와 관련된 키를 저장하기 위해서 @@ -648,9 +667,9 @@ cat /etc/foo/password 볼륨에서 현재 사용되는 시크릿이 업데이트되면, 투영된 키도 결국 업데이트된다. kubelet은 마운트된 시크릿이 모든 주기적인 동기화에서 최신 상태인지 여부를 확인한다. 그러나, kubelet은 시크릿의 현재 값을 가져 오기 위해 로컬 캐시를 사용한다. -캐시의 유형은 [KubeletConfiguration 구조체](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)의 +캐시의 유형은 [KubeletConfiguration 구조체](/docs/reference/config-api/kubelet-config.v1beta1/)의 `ConfigMapAndSecretChangeDetectionStrategy` 필드를 사용하여 구성할 수 있다. -시크릿은 watch(기본값), ttl 기반 또는 단순히 API 서버로 모든 요청을 직접 +시크릿은 watch(기본값), ttl 기반 또는 API 서버로 모든 요청을 직접 리디렉션하여 전파할 수 있다. 결과적으로, 시크릿이 업데이트된 순간부터 새로운 키가 파드에 투영되는 순간까지의 총 지연 시간은 kubelet 동기화 시간 + 캐시 @@ -730,9 +749,9 @@ echo $SECRET_PASSWORD ## 변경할 수 없는(immutable) 시크릿 {#secret-immutable} -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -쿠버네티스 베타 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 +쿠버네티스 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 개별 시크릿과 컨피그맵을 변경할 수 없는 것으로 설정하는 옵션을 제공한다. 시크릿을 광범위하게 사용하는 클러스터(최소 수만 개의 고유한 시크릿이 파드에 마운트)의 경우, 데이터 변경을 방지하면 다음과 같은 이점이 있다. @@ -741,8 +760,8 @@ echo $SECRET_PASSWORD - immutable로 표시된 시크릿에 대한 감시를 중단하여, kube-apiserver의 부하를 크게 줄임으로써 클러스터의 성능을 향상시킴 -이 기능은 v1.19부터 기본적으로 활성화된 `ImmutableEphemeralVolumes` [기능 -게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)에 +이 기능은 v1.19부터 기본적으로 활성화된 `ImmutableEphemeralVolumes` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)에 의해 제어된다. `immutable` 필드를 `true` 로 설정하여 변경할 수 없는 시크릿을 생성할 수 있다. 다음은 예시이다. ```yaml @@ -782,12 +801,6 @@ immutable: true 해당 프로세스에 대한 자세한 설명은 [서비스 어카운트에 ImagePullSecrets 추가하기](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account)를 참고한다. -### 수동으로 생성된 시크릿의 자동 마운트 - -수동으로 생성된 시크릿(예: GitHub 계정에 접근하기 위한 토큰이 포함된 시크릿)은 -시크릿의 서비스 어카운트를 기반한 파드에 자동으로 연결될 수 있다. -해당 프로세스에 대한 자세한 설명은 [파드프리셋(PodPreset)을 사용하여 파드에 정보 주입하기](/docs/tasks/inject-data-application/podpreset/)를 참고한다. - ## 상세 내용 ### 제약 사항 @@ -852,6 +865,7 @@ LASTSEEN FIRSTSEEN COUNT NAME KIND SUBOBJECT ### 사용 사례: 컨테이너 환경 변수로 사용하기 시크릿 정의를 작성한다. + ```yaml apiVersion: v1 kind: Secret @@ -864,6 +878,7 @@ data: ``` 시크릿을 생성한다. + ```shell kubectl apply -f mysecret.yaml ``` @@ -1160,14 +1175,12 @@ HTTP 요청을 처리하고, 복잡한 비즈니스 로직을 수행한 다음, 시크릿 API에 접근해야 하는 애플리케이션은 필요한 시크릿에 대한 `get` 요청을 수행해야 한다. 이를 통해 관리자는 앱에 필요한 -[개별 인스턴스에 대한 접근을 허용 목록에 추가]( -/docs/reference/access-authn-authz/rbac/#referring-to-resources)하면서 모든 시크릿에 대한 접근을 +[개별 인스턴스에 대한 접근을 허용 목록에 추가](/docs/reference/access-authn-authz/rbac/#referring-to-resources)하면서 모든 시크릿에 대한 접근을 제한할 수 있다. `get` 반복을 통한 성능 향상을 위해, 클라이언트는 시크릿을 참조한 다음 리소스를 감시(`watch`)하고, 참조가 변경되면 시크릿을 다시 요청하는 리소스를 -설계할 수 있다. 덧붙여, 클라이언트에게 개별 리소스를 감시(`watch`)하도록 하는 ["대량 감시" API]( -https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/bulk_watch.md)도 +설계할 수 있다. 덧붙여, 클라이언트에게 개별 리소스를 감시(`watch`)하도록 하는 ["대량 감시" API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/bulk_watch.md)도 제안되었으며, 쿠버네티스의 후속 릴리스에서 사용할 수 있을 것이다. diff --git a/content/ko/docs/concepts/containers/container-environment.md b/content/ko/docs/concepts/containers/container-environment.md index c6cb09965aca6..58c106fdceb4e 100644 --- a/content/ko/docs/concepts/containers/container-environment.md +++ b/content/ko/docs/concepts/containers/container-environment.md @@ -1,4 +1,7 @@ --- + + + title: 컨테이너 환경 변수 content_type: concept weight: 20 @@ -24,11 +27,11 @@ weight: 20 ### 컨테이너 정보 컨테이너의 *호스트네임* 은 컨테이너가 동작 중인 파드의 이름과 같다. -그것은 `hostname` 커맨드 또는 libc의 -[`gethostname`](https://man7.org/linux/man-pages/man2/gethostname.2.html) +그것은 `hostname` 커맨드 또는 libc의 +[`gethostname`](https://man7.org/linux/man-pages/man2/gethostname.2.html) 함수 호출을 통해서 구할 수 있다. -파드 이름과 네임스페이스는 +파드 이름과 네임스페이스는 [다운워드(Downward) API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/)를 통해 환경 변수로 구할 수 있다. Docker 이미지에 정적으로 명시된 환경 변수와 마찬가지로, @@ -36,11 +39,12 @@ Docker 이미지에 정적으로 명시된 환경 변수와 마찬가지로, ### 클러스터 정보 -컨테이너가 생성될 때 실행 중이던 모든 서비스의 목록은 환경 변수로 해당 컨테이너에서 사용할 수 +컨테이너가 생성될 때 실행 중이던 모든 서비스의 목록은 환경 변수로 해당 컨테이너에서 사용할 수 있다. +이 목록은 새로운 컨테이너의 파드 및 쿠버네티스 컨트롤 플레인 서비스와 동일한 네임스페이스 내에 있는 서비스로 한정된다. 이러한 환경 변수는 Docker 링크 구문과 일치한다. -*bar* 라는 이름의 컨테이너에 매핑되는 *foo* 라는 이름의 서비스에 대해서는, +*bar* 라는 이름의 컨테이너에 매핑되는 *foo* 라는 이름의 서비스에 대해서는, 다음의 형태로 변수가 정의된다. ```shell @@ -58,5 +62,3 @@ FOO_SERVICE_PORT=<서비스가 동작 중인 포트> * [컨테이너 라이프사이클 훅(hooks)](/ko/docs/concepts/containers/container-lifecycle-hooks/)에 대해 더 배워 보기. * [컨테이너 라이프사이클 이벤트에 핸들러 부착](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) 실제 경험 얻기. - - diff --git a/content/ko/docs/concepts/containers/container-lifecycle-hooks.md b/content/ko/docs/concepts/containers/container-lifecycle-hooks.md index 662ac71522d05..f2ef1f10a9bf7 100644 --- a/content/ko/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/ko/docs/concepts/containers/container-lifecycle-hooks.md @@ -1,4 +1,7 @@ --- + + + title: 컨테이너 라이프사이클 훅(Hook) content_type: concept weight: 30 @@ -33,10 +36,13 @@ Angular와 같이, 컴포넌트 라이프사이클 훅을 가진 많은 프로 `PreStop` -이 훅은 API 요청이나 활성 프로브(liveness probe) 실패, 선점, 자원 경합 등의 관리 이벤트로 인해 컨테이너가 종료되기 직전에 호출된다. 컨테이너가 이미 terminated 또는 completed 상태인 경우에는 preStop 훅 요청이 실패한다. -그것은 동기적인 동작을 의미하는, 차단(blocking)을 수행하고 있으므로, -컨테이너를 중지하기 위한 신호가 전송되기 전에 완료되어야 한다. -파라미터는 핸들러에 전달되지 않는다. +이 훅은 API 요청이나 활성 프로브(liveness probe) 실패, 선점, 자원 경합 +등의 관리 이벤트로 인해 컨테이너가 종료되기 직전에 호출된다. 컨테이너가 이미 +terminated 또는 completed 상태인 경우에는 `PreStop` 훅 요청이 실패하며, +훅은 컨테이너를 중지하기 위한 TERM 신호가 보내지기 이전에 완료되어야 한다. 파드의 그레이스 종료 +기간(termination grace period)의 초읽기는 `PreStop` 훅이 실행되기 전에 시작되어, +핸들러의 결과에 상관없이 컨테이너가 파드의 그레이스 종료 기간 내에 결국 종료되도록 한다. +어떠한 파라미터도 핸들러에게 전달되지 않는다. 종료 동작에 더 자세한 대한 설명은 [파드의 종료](/ko/docs/concepts/workloads/pods/pod-lifecycle/#파드의-종료)에서 찾을 수 있다. @@ -44,17 +50,18 @@ Angular와 같이, 컴포넌트 라이프사이클 훅을 가진 많은 프로 ### 훅 핸들러 구현 컨테이너는 훅의 핸들러를 구현하고 등록함으로써 해당 훅에 접근할 수 있다. -구현될 수 있는 컨테이너의 훅 핸들러에는 두 가지 유형이 있다. +구현될 수 있는 컨테이너의 훅 핸들러에는 세 가지 유형이 있다. * Exec - 컨테이너의 cgroups와 네임스페이스 안에서, `pre-stop.sh`와 같은, 특정 커맨드를 실행. 커맨드에 의해 소비된 리소스는 해당 컨테이너에 대해 계산된다. +* TCP - 컨테이너의 특정 포트에 대한 TCP 연결을 연다. * HTTP - 컨테이너의 특정 엔드포인트에 대해서 HTTP 요청을 실행. ### 훅 핸들러 실행 컨테이너 라이프사이클 관리 훅이 호출되면, 쿠버네티스 관리 시스템은 훅 동작에 따라 핸들러를 실행하고, -`exec` 와 `tcpSocket` 은 컨테이너에서 실행되고, `httpGet` 은 kubelet 프로세스에 의해 실행된다. +`httpGet` 와 `tcpSocket` 은 kubelet 프로세스에 의해 실행되고, `exec` 은 컨테이너에서 실행된다. 훅 핸들러 호출은 해당 컨테이너를 포함하고 있는 파드의 컨텍스트와 동기적으로 동작한다. 이것은 `PostStart` 훅에 대해서, @@ -62,17 +69,13 @@ Angular와 같이, 컴포넌트 라이프사이클 훅을 가진 많은 프로 그러나, 만약 해당 훅이 너무 오래 동작하거나 어딘가에 걸려 있다면, 컨테이너는 `running` 상태에 이르지 못한다. -`PreStop` 훅은 컨테이너 중지 신호에서 비동기적으로 -실행되지 않는다. 훅은 신호를 보내기 전에 실행을 -완료해야 한다. -실행 중에 `PreStop` 훅이 중단되면, +`PreStop` 훅은 컨테이너 중지 신호에서 비동기적으로 실행되지 않는다. 훅은 +TERM 신호를 보내기 전에 실행을 완료해야 한다. 실행 중에 `PreStop` 훅이 중단되면, 파드의 단계는 `Terminating` 이며 `terminationGracePeriodSeconds` 가 -만료된 후 파드가 종료될 때까지 남아 있다. -이 유예 기간은 `PreStop` 훅이 실행되고 컨테이너가 -정상적으로 중지되는 데 걸리는 총 시간에 적용된다. -예를 들어, `terminationGracePeriodSeconds` 가 60이고, 훅이 -완료되는 데 55초가 걸리고, 컨테이너가 신호를 수신한 후 -정상적으로 중지하는 데 10초가 걸리면, `terminationGracePeriodSeconds` 이후 +만료된 후 파드가 종료될 때까지 남아 있다. 이 유예 기간은 `PreStop` 훅이 +실행되고 컨테이너가 정상적으로 중지되는 데 걸리는 총 시간에 적용된다. 예를 들어, +`terminationGracePeriodSeconds` 가 60이고, 훅이 완료되는 데 55초가 걸리고, +컨테이너가 신호를 수신한 후 정상적으로 중지하는 데 10초가 걸리면, `terminationGracePeriodSeconds` 이후 컨테이너가 정상적으로 중지되기 전에 종료된다. 이 두 가지 일이 발생하는 데 걸리는 총 시간(55+10)보다 적다. diff --git a/content/ko/docs/concepts/containers/images.md b/content/ko/docs/concepts/containers/images.md index 2f1ae4a27cba8..fe7aca59aa5fe 100644 --- a/content/ko/docs/concepts/containers/images.md +++ b/content/ko/docs/concepts/containers/images.md @@ -49,16 +49,32 @@ weight: 10 ## 이미지 업데이트 -기본 풀(pull) 정책은 `IfNotPresent`이며, 이것은 -{{< glossary_tooltip text="kubelet" term_id="kubelet" >}}이 이미 -존재하는 이미지에 대한 풀을 생략하게 한다. 만약 항상 풀을 강제하고 싶다면, -다음 중 하나를 수행하면 된다. +{{< glossary_tooltip text="디플로이먼트" term_id="deployment" >}}, +{{< glossary_tooltip text="스테이트풀셋" term_id="statefulset" >}}, 파드 또는 파드 +템플릿은 포함하는 다른 오브젝트를 처음 만들 때 특별히 명시하지 않은 경우 +기본적으로 해당 파드에 있는 모든 컨테이너의 풀(pull) +정책은 `IfNotPresent`로 설정된다. 이 정책은 +{{< glossary_tooltip text="kubelet" term_id="kubelet" >}}이 이미 존재하는 +이미지에 대한 풀을 생략하게 한다. + +만약 항상 풀을 강제하고 싶다면, 다음 중 하나를 수행하면 된다. - 컨테이너의 `imagePullPolicy`를 `Always`로 설정. -- `imagePullPolicy`를 생략하고 `:latest`를 사용할 이미지의 태그로 사용. +- `imagePullPolicy`를 생략하고 `:latest`를 사용할 이미지의 태그로 사용, + 쿠버네티스는 정책을 `Always`로 설정한다. - `imagePullPolicy`와 사용할 이미지의 태그를 생략. - [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) 어드미션 컨트롤러를 활성화. +{{< note >}} +컨테이너의 `imagePullPolicy` 값은 오브젝트가 처음 _created_ 일 때 항상 +설정되고 나중에 이미지 태그가 변경되더라도 업데이트되지 않는다. + +예를 들어, 태그가 `:latest`가 아닌 이미지로 디플로이먼트를 생성하고, +나중에 해당 디플로이먼트의 이미지를 `:latest` 태그로 업데이트하면 +`imagePullPolicy` 필드가 `Always` 로 변경되지 않는다. 오브젝트를 +처음 생성 한 후 모든 오브젝트의 풀 정책을 수동으로 변경해야 한다. +{{< /note >}} + `imagePullPolicy` 가 특정값 없이 정의되면, `Always` 로 설정된다. ## 이미지 인덱스가 있는 다중 아키텍처 이미지 diff --git a/content/ko/docs/concepts/containers/runtime-class.md b/content/ko/docs/concepts/containers/runtime-class.md index b31cabe88862f..3d7c89b65c370 100644 --- a/content/ko/docs/concepts/containers/runtime-class.md +++ b/content/ko/docs/concepts/containers/runtime-class.md @@ -1,7 +1,4 @@ --- - - - title: 런타임클래스(RuntimeClass) content_type: concept weight: 20 @@ -35,10 +32,6 @@ weight: 20 ## 셋업 -런타임클래스 기능 게이트가 활성화(기본값)된 것을 확인한다. -기능 게이트 활성화에 대한 설명은 [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 -참고한다. `RuntimeClass` 기능 게이트는 API 서버 _및_ kubelets에서 활성화되어야 한다. - 1. CRI 구현(implementation)을 노드에 설정(런타임에 따라서). 2. 상응하는 런타임클래스 리소스 생성. @@ -144,11 +137,9 @@ https://github.com/containerd/cri/blob/master/docs/config.md {{< feature-state for_k8s_version="v1.16" state="beta" >}} -쿠버네티스 v1.16 부터, 런타임 클래스는 `scheduling` 필드를 통해 이종의 클러스터 -지원을 포함한다. 이 필드를 사용하면, 이 런타임 클래스를 갖는 파드가 이를 지원하는 -노드로 스케줄된다는 것을 보장할 수 있다. 이 스케줄링 기능을 사용하려면, -[런타임 클래스 어드미션(admission) 컨트롤러](/docs/reference/access-authn-authz/admission-controllers/#runtimeclass)를 -활성화(1.16 부터 기본값)해야 한다. +RuntimeClass에 `scheduling` 필드를 지정하면, 이 RuntimeClass로 실행되는 파드가 +이를 지원하는 노드로 예약되도록 제약 조건을 설정할 수 있다. +`scheduling`이 설정되지 않은 경우 이 RuntimeClass는 모든 노드에서 지원되는 것으로 간주된다. 파드가 지정된 런타임클래스를 지원하는 노드에 안착한다는 것을 보장하려면, 해당 노드들은 `runtimeClass.scheduling.nodeSelector` 필드에서 선택되는 공통 레이블을 가져야한다. diff --git a/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index 7e8a10e8b19ea..a2326c71ddd7e 100644 --- a/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -1,5 +1,9 @@ --- title: 애그리게이션 레이어(aggregation layer)로 쿠버네티스 API 확장하기 + + + + content_type: concept weight: 10 --- @@ -25,8 +29,6 @@ Extension-apiserver는 kube-apiserver로 오가는 연결의 레이턴시가 낮 kube-apiserver로 부터의 디스커버리 요청은 왕복 레이턴시가 5초 이내여야 한다. extention API server가 레이턴시 요구 사항을 달성할 수 없는 경우 이를 충족할 수 있도록 변경하는 것을 고려한다. -`EnableAggregatedDiscoveryTimeout=false` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 설정해서 타임아웃 -제한을 비활성화 할 수 있다. 이 사용 중단(deprecated)된 기능 게이트는 향후 릴리스에서 제거될 예정이다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 68e529549b975..b543addee6f3f 100644 --- a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -1,5 +1,8 @@ --- title: 커스텀 리소스 + + + content_type: concept weight: 10 --- @@ -41,7 +44,7 @@ _선언_ 하거나 지정할 수 있게 해주며 쿠버네티스 오브젝트 클러스터 라이프사이클과 관계없이 실행 중인 클러스터에 커스텀 컨트롤러를 배포하고 업데이트할 수 있다. 커스텀 컨트롤러는 모든 종류의 리소스와 함께 작동할 수 있지만 커스텀 리소스와 결합할 때 특히 효과적이다. -[오퍼레이터 패턴](https://coreos.com/blog/introducing-operators.html)은 사용자 정의 +[오퍼레이터 패턴](/ko/docs/concepts/extend-kubernetes/operator/)은 사용자 정의 리소스와 커스텀 컨트롤러를 결합한다. 커스텀 컨트롤러를 사용하여 특정 애플리케이션에 대한 도메인 지식을 쿠버네티스 API의 익스텐션으로 인코딩할 수 있다. @@ -117,7 +120,7 @@ _선언_ 하거나 지정할 수 있게 해주며 쿠버네티스 오브젝트 쿠버네티스는 다양한 사용자의 요구를 충족시키기 위해 이 두 가지 옵션을 제공하므로 사용의 용이성이나 유연성이 저하되지 않는다. -애그리게이트 API는 기본 API 서버 뒤에 있는 하위 API 서버이며 프록시 역할을 한다. 이 배치를 [API 애그리게이션](/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)(AA)이라고 한다. 사용자에게는 쿠버네티스 API가 확장된 것과 같다. +애그리게이트 API는 기본 API 서버 뒤에 있는 하위 API 서버이며 프록시 역할을 한다. 이 배치를 [API 애그리게이션](/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)(AA)이라고 한다. 사용자에게는 쿠버네티스 API가 확장된 것으로 나타난다. CRD를 사용하면 다른 API 서버를 추가하지 않고도 새로운 타입의 리소스를 생성할 수 있다. CRD를 사용하기 위해 API 애그리게이션을 이해할 필요는 없다. diff --git a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 5f58604cd71c4..3596c9f72e411 100644 --- a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -192,10 +192,69 @@ kubelet은 gRPC 서비스를 제공하여 사용 중인 장치를 검색하고, // PodResourcesLister는 kubelet에서 제공하는 서비스로, 노드의 포드 및 컨테이너가 // 사용한 노드 리소스에 대한 정보를 제공한다. service PodResourcesLister { - rpc List(ListPodResourcesRequest) returns (ListPodResourcesResponse) {} + rpc GetAllocatableResources(AllocatableResourcesRequest) returns (AllocatableResourcesResponse) {} } ``` +`List` 엔드포인트는 독점적으로 할당된 CPU의 ID, 장치 플러그인에 의해 보고된 장치 ID, +이러한 장치가 할당된 NUMA 노드의 ID와 같은 세부 정보와 함께 +실행 중인 파드의 리소스에 대한 정보를 제공한다. + +```gRPC +// ListPodResourcesResponse는 List 함수가 반환하는 응답이다 +message ListPodResourcesResponse { + repeated PodResources pod_resources = 1; +} + +// PodResources에는 파드에 할당된 노드 리소스에 대한 정보가 포함된다 +message PodResources { + string name = 1; + string namespace = 2; + repeated ContainerResources containers = 3; +} + +// ContainerResources는 컨테이너에 할당된 리소스에 대한 정보를 포함한다 +message ContainerResources { + string name = 1; + repeated ContainerDevices devices = 2; + repeated int64 cpu_ids = 3; +} + +// 토폴로지는 리소스의 하드웨어 토폴로지를 설명한다 +message TopologyInfo { + repeated NUMANode nodes = 1; +} + +// NUMA 노드의 NUMA 표현 +message NUMANode { + int64 ID = 1; +} + +// ContainerDevices는 컨테이너에 할당된 장치에 대한 정보를 포함한다 +message ContainerDevices { + string resource_name = 1; + repeated string device_ids = 2; + TopologyInfo topology = 3; +} +``` + +GetAllocatableResources는 워커 노드에서 처음 사용할 수 있는 리소스에 대한 정보를 제공한다. +kubelet이 APIServer로 내보내는 것보다 더 많은 정보를 제공한다. + +```gRPC +// AllocatableResourcesResponses에는 kubelet이 알고 있는 모든 장치에 대한 정보가 포함된다. +message AllocatableResourcesResponse { + repeated ContainerDevices devices = 1; + repeated int64 cpu_ids = 2; +} + +``` + +`ContainerDevices` 는 장치가 어떤 NUMA 셀과 연관되는지를 선언하는 토폴로지 정보를 노출한다. +NUMA 셀은 불분명한(opaque) 정수 ID를 사용하여 식별되며, 이 값은 +[kubelet에 등록할 때](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager) 장치 플러그인이 보고하는 것과 일치한다. + + gRPC 서비스는 `/var/lib/kubelet/pod-resources/kubelet.sock` 의 유닉스 소켓을 통해 제공된다. 장치 플러그인 리소스에 대한 모니터링 에이전트는 데몬 또는 데몬셋으로 배포할 수 있다. 표준 디렉터리 `/var/lib/kubelet/pod-resources` 에는 특권을 가진 접근이 필요하므로, 모니터링 @@ -204,7 +263,7 @@ gRPC 서비스는 `/var/lib/kubelet/pod-resources/kubelet.sock` 의 유닉스 `/var/lib/kubelet/pod-resources` 를 {{< glossary_tooltip text="볼륨" term_id="volume" >}}으로 마운트해야 한다. -"PodResources 서비스"를 지원하려면 `KubeletPodResources` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. +`PodResourcesLister service` 를 지원하려면 `KubeletPodResources` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. ## 토폴로지 관리자와 장치 플러그인 통합 diff --git a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 7542ac0add29f..359284b357854 100644 --- a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -1,4 +1,8 @@ --- + + + + title: 네트워크 플러그인 content_type: concept weight: 10 @@ -20,7 +24,7 @@ weight: 10 kubelet에는 단일 기본 네트워크 플러그인과 전체 클러스터에 공통된 기본 네트워크가 있다. 플러그인은 시작할 때 플러그인을 검색하고, 찾은 것을 기억하며, 파드 라이프사이클에서 적절한 시간에 선택한 플러그인을 실행한다(CRI는 자체 CNI 플러그인을 관리하므로 도커에만 해당됨). 플러그인 사용 시 명심해야 할 두 가지 Kubelet 커맨드라인 파라미터가 있다. * `cni-bin-dir`: Kubelet은 시작할 때 플러그인에 대해 이 디렉터리를 검사한다. -* `network-plugin`: `cni-bin-dir` 에서 사용할 네트워크 플러그인. 플러그인 디렉터리에서 검색한 플러그인이 보고된 이름과 일치해야 한다. CNI 플러그인의 경우, 이는 단순히 "cni"이다. +* `network-plugin`: `cni-bin-dir` 에서 사용할 네트워크 플러그인. 플러그인 디렉터리에서 검색한 플러그인이 보고된 이름과 일치해야 한다. CNI 플러그인의 경우, 이는 "cni"이다. ## 네트워크 플러그인 요구 사항 diff --git a/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md b/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md index d1eecd6fdce45..ee9763a769a42 100644 --- a/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md @@ -69,7 +69,7 @@ weight: 10 웹훅 모델에서 쿠버네티스는 원격 서비스에 네트워크 요청을 한다. *바이너리 플러그인* 모델에서 쿠버네티스는 바이너리(프로그램)를 실행한다. 바이너리 플러그인은 kubelet(예: -[Flex Volume 플러그인](/ko/docs/concepts/storage/volumes/#flexvolume)과 +[Flex 볼륨 플러그인](/ko/docs/concepts/storage/volumes/#flexvolume)과 [네트워크 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/))과 kubectl에서 사용한다. @@ -157,7 +157,7 @@ API를 추가해도 기존 API(예: 파드)의 동작에 직접 영향을 미치 ### 스토리지 플러그인 -[Flex Volumes](/ko/docs/concepts/storage/volumes/#flexvolume)을 사용하면 +[Flex 볼륨](/ko/docs/concepts/storage/volumes/#flexvolume)을 사용하면 Kubelet이 바이너리 플러그인을 호출하여 볼륨을 마운트하도록 함으로써 빌트인 지원 없이 볼륨 유형을 마운트 할 수 있다. diff --git a/content/ko/docs/concepts/extend-kubernetes/operator.md b/content/ko/docs/concepts/extend-kubernetes/operator.md index f6c80d80679ce..21b3183b99fc9 100644 --- a/content/ko/docs/concepts/extend-kubernetes/operator.md +++ b/content/ko/docs/concepts/extend-kubernetes/operator.md @@ -102,27 +102,28 @@ kubectl edit SampleDB/example-database # 일부 설정을 수동으로 변경하 ## 자신만의 오퍼레이터 작성 {#writing-operator} -에코시스템에 원하는 동작을 구현하는 오퍼레이터가 없다면 직접 코딩할 수 있다. -[다음 내용](#다음-내용)에서는 클라우드 네이티브 오퍼레이터를 작성하는 데 -사용할 수 있는 라이브러리 및 도구에 대한 몇 가지 링크를 -찾을 수 있다. +에코시스템에 원하는 동작을 구현하는 오퍼레이터가 없다면 +직접 코딩할 수 있다. 또한 [쿠버네티스 API의 클라이언트](/ko/docs/reference/using-api/client-libraries/) 역할을 할 수 있는 모든 언어 / 런타임을 사용하여 오퍼레이터(즉, 컨트롤러)를 구현한다. +다음은 클라우드 네이티브 오퍼레이터를 작성하는 데 사용할 수 있는 +몇 가지 라이브러리와 도구들이다. +{{% thirdparty-content %}} + +* [kubebuilder](https://book.kubebuilder.io/) 사용하기 +* [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) +* 웹훅(WebHook)과 함께 [Metacontroller](https://metacontroller.app/)를 + 사용하여 직접 구현하기 +* [오퍼레이터 프레임워크](https://operatorframework.io) ## {{% heading "whatsnext" %}} * [사용자 정의 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에 대해 더 알아보기 * [OperatorHub.io](https://operatorhub.io/)에서 유스케이스에 맞는 이미 만들어진 오퍼레이터 찾기 -* 기존 도구를 사용하여 자신만의 오퍼레이터를 작성해보자. 다음은 예시이다. - * [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) 사용하기 - * [kubebuilder](https://book.kubebuilder.io/) 사용하기 - * 웹훅(WebHook)과 함께 [Metacontroller](https://metacontroller.app/)를 - 사용하여 직접 구현하기 - * [오퍼레이터 프레임워크](https://operatorframework.io) 사용하기 * 다른 사람들이 사용할 수 있도록 자신의 오퍼레이터를 [게시](https://operatorhub.io/)하기 -* 오퍼레이터 패턴을 소개한 [CoreOS 원본 기사](https://coreos.com/blog/introducing-operators.html) 읽기 +* 오퍼레이터 패턴을 소개한 [CoreOS 원본 글](https://web.archive.org/web/20170129131616/https://coreos.com/blog/introducing-operators.html) 읽기 (이 링크는 원본 글에 대한 보관 버전임) * 오퍼레이터 구축을 위한 모범 사례에 대한 구글 클라우드(Google Cloud)의 [기사](https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-building-kubernetes-operators-and-stateful-apps) 읽기 diff --git a/content/ko/docs/concepts/extend-kubernetes/service-catalog.md b/content/ko/docs/concepts/extend-kubernetes/service-catalog.md index 3ac6a2df7d1bc..cc387c2f0223e 100644 --- a/content/ko/docs/concepts/extend-kubernetes/service-catalog.md +++ b/content/ko/docs/concepts/extend-kubernetes/service-catalog.md @@ -1,5 +1,7 @@ --- title: 서비스 카탈로그 + + content_type: concept weight: 40 --- @@ -24,7 +26,7 @@ weight: 40 클러스터 운영자는 서비스 카탈로그를 설정하고 이를 이용하여 클라우드 공급자의 서비스 브로커와 통신하여 메시지 큐 서비스의 인스턴스를 프로비저닝하고 쿠버네티스 클러스터 내의 애플리케이션에서 사용할 수 있게 한다. 따라서 애플리케이션 개발자는 메시지 큐의 세부 구현 또는 관리에 신경 쓸 필요가 없다. -애플리케이션은 그것을 서비스로 간단하게 사용할 수 있다. +애플리케이션은 메시지 큐에 서비스로 접속할 수 있다. ## 아키텍처 @@ -229,8 +231,3 @@ spec: * [샘플 서비스 브로커](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers) 살펴보기 * [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) 프로젝트 탐색 * [svc-cat.io](https://svc-cat.io/docs/) 살펴보기 - - - - - diff --git a/content/ko/docs/concepts/overview/components.md b/content/ko/docs/concepts/overview/components.md index d4b77e9319080..b4c6079213217 100644 --- a/content/ko/docs/concepts/overview/components.md +++ b/content/ko/docs/concepts/overview/components.md @@ -30,8 +30,9 @@ card: 컨트롤 플레인 컴포넌트는 클러스터 내 어떠한 머신에서든지 동작할 수 있다. 그러나 간결성을 위하여, 구성 스크립트는 보통 동일 머신 상에 모든 컨트롤 플레인 컴포넌트를 구동시키고, -사용자 컨테이너는 해당 머신 상에 동작시키지 않는다. 다중-마스터-VM 설치 예제를 보려면 -[고가용성 클러스터 구성하기](/docs/admin/high-availability/)를 확인해본다. +사용자 컨테이너는 해당 머신 상에 동작시키지 않는다. 여러 VM에서 +실행되는 컨트롤 플레인 설정의 예제를 보려면 +[kubeadm을 사용하여 고가용성 클러스터 만들기](/docs/setup/production-environment/tools/kubeadm/high-availability/)를 확인해본다. ### kube-apiserver diff --git a/content/ko/docs/concepts/overview/what-is-kubernetes.md b/content/ko/docs/concepts/overview/what-is-kubernetes.md index 449cae4393b73..344c266d1e175 100644 --- a/content/ko/docs/concepts/overview/what-is-kubernetes.md +++ b/content/ko/docs/concepts/overview/what-is-kubernetes.md @@ -1,4 +1,7 @@ --- + + + title: 쿠버네티스란 무엇인가? description: > 쿠버네티스는 컨테이너화된 워크로드와 서비스를 관리하기 위한 이식할 수 있고, 확장 가능한 오픈소스 플랫폼으로, 선언적 구성과 자동화를 모두 지원한다. 쿠버네티스는 크고 빠르게 성장하는 생태계를 가지고 있다. 쿠버네티스 서비스, 지원 그리고 도구들은 광범위하게 제공된다. @@ -40,7 +43,7 @@ sitemap: 컨테이너는 다음과 같은 추가적인 혜택을 제공하기 때문에 인기가 있다. * 기민한 애플리케이션 생성과 배포: VM 이미지를 사용하는 것에 비해 컨테이너 이미지 생성이 보다 쉽고 효율적임. -* 지속적인 개발, 통합 및 배포: 안정적이고 주기적으로 컨테이너 이미지를 빌드해서 배포할 수 있고 (이미지의 불변성 덕에) 빠르고 쉽게 롤백할 수 있다. +* 지속적인 개발, 통합 및 배포: 안정적이고 주기적으로 컨테이너 이미지를 빌드해서 배포할 수 있고 (이미지의 불변성 덕에) 빠르고 효율적으로 롤백할 수 있다. * 개발과 운영의 관심사 분리: 배포 시점이 아닌 빌드/릴리스 시점에 애플리케이션 컨테이너 이미지를 만들기 때문에, 애플리케이션이 인프라스트럭처에서 분리된다. * 가시성은 OS 수준의 정보와 메트릭에 머무르지 않고, 애플리케이션의 헬스와 그 밖의 시그널을 볼 수 있다. * 개발, 테스팅 및 운영 환경에 걸친 일관성: 랩탑에서도 클라우드에서와 동일하게 구동된다. @@ -52,7 +55,7 @@ sitemap: ## 쿠버네티스가 왜 필요하고 무엇을 할 수 있나 {#why-you-need-kubernetes-and-what-can-it-do} -컨테이너는 애플리케이션을 포장하고 실행하는 좋은 방법이다. 프로덕션 환경에서는 애플리케이션을 실행하는 컨테이너를 관리하고 가동 중지 시간이 없는지 확인해야한다. 예를 들어 컨테이너가 다운되면 다른 컨테이너를 다시 시작해야한다. 이 문제를 시스템에 의해 처리한다면 더 쉽지 않을까? +컨테이너는 애플리케이션을 포장하고 실행하는 좋은 방법이다. 프로덕션 환경에서는 애플리케이션을 실행하는 컨테이너를 관리하고 가동 중지 시간이 없는지 확인해야 한다. 예를 들어 컨테이너가 다운되면 다른 컨테이너를 다시 시작해야 한다. 이 문제를 시스템에 의해 처리한다면 더 쉽지 않을까? 그것이 쿠버네티스가 필요한 이유이다! 쿠버네티스는 분산 시스템을 탄력적으로 실행하기 위한 프레임 워크를 제공한다. 애플리케이션의 확장과 장애 조치를 처리하고, 배포 패턴 등을 제공한다. 예를 들어, 쿠버네티스는 시스템의 카나리아 배포를 쉽게 관리 할 수 있다. diff --git a/content/ko/docs/concepts/overview/working-with-objects/annotations.md b/content/ko/docs/concepts/overview/working-with-objects/annotations.md index 944d7340ba1ed..245da33db3389 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/ko/docs/concepts/overview/working-with-objects/annotations.md @@ -37,7 +37,7 @@ weight: 50 자동 생성된 필드, 그리고 오토사이징 또는 오토스케일링 시스템에 의해 설정된 필드와 구분된다. -* 빌드, 릴리스, 또는 타임 스탬프, 릴리즈 ID, git 브랜치, +* 빌드, 릴리스, 또는 타임 스탬프, 릴리스 ID, git 브랜치, PR 번호, 이미지 해시 및 레지스트리 주소와 같은 이미지 정보. * 로깅, 모니터링, 분석 또는 감사 리포지터리에 대한 포인터. diff --git a/content/ko/docs/concepts/overview/working-with-objects/labels.md b/content/ko/docs/concepts/overview/working-with-objects/labels.md index 1e0d86a97b02d..da3cff2a89c97 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/labels.md +++ b/content/ko/docs/concepts/overview/working-with-objects/labels.md @@ -1,4 +1,6 @@ --- + + title: 레이블과 셀렉터 content_type: concept weight: 40 @@ -30,7 +32,7 @@ _레이블_ 은 파드와 같은 오브젝트에 첨부된 키와 값의 쌍이 레이블을 이용하면 사용자가 느슨하게 결합한 방식으로 조직 구조와 시스템 오브젝트를 매핑할 수 있으며, 클라이언트에 매핑 정보를 저장할 필요가 없다. -서비스 배포와 배치 프로세싱 파이프라인은 흔히 다차원의 엔티티들이다(예: 다중 파티션 또는 배포, 다중 릴리즈 트랙, 다중 계층, 계층 속 여러 마이크로 서비스들). 관리에는 크로스-커팅 작업이 필요한 경우가 많은데 이 작업은 사용자보다는 인프라에 의해 결정된 엄격한 계층 표현인 캡슐화를 깨트린다. +서비스 배포와 배치 프로세싱 파이프라인은 흔히 다차원의 엔티티들이다(예: 다중 파티션 또는 배포, 다중 릴리스 트랙, 다중 계층, 계층 속 여러 마이크로 서비스들). 관리에는 크로스-커팅 작업이 필요한 경우가 많은데 이 작업은 사용자보다는 인프라에 의해 결정된 엄격한 계층 표현인 캡슐화를 깨트린다. 레이블 예시: @@ -40,7 +42,7 @@ _레이블_ 은 파드와 같은 오브젝트에 첨부된 키와 값의 쌍이 * `"partition" : "customerA"`, `"partition" : "customerB"` * `"track" : "daily"`, `"track" : "weekly"` -레이블 예시는 일반적으로 사용하는 상황에 해당한다. 당신의 규약에 따라 자유롭게 개발할 수 있다. 오브젝트에 붙여진 레이블 키는 고유해야 한다는 것을 기억해야 한다. +이 예시는 일반적으로 사용하는 레이블이며, 사용자는 자신만의 규칙(convention)에 따라 자유롭게 개발할 수 있다. 오브젝트에 붙여진 레이블 키는 고유해야 한다는 것을 기억해야 한다. ## 구문과 캐릭터 셋 @@ -50,6 +52,11 @@ _레이블_ 은 키와 값의 쌍이다. 유효한 레이블 키에는 슬래시 `kubernetes.io/`와 `k8s.io/` 접두사는 쿠버네티스의 핵심 컴포넌트로 예약되어있다. +유효한 레이블 값은 다음과 같다. +* 63 자 이하 여야 하고(공백이면 안 됨), +* 시작과 끝은 알파벳과 숫자(`[a-z0-9A-Z]`)이며, +* 알파벳과 숫자, 대시(`-`), 밑줄(`_`), 점(`.`)를 중간에 포함할 수 있다. + 유효한 레이블 값은 63자 미만 또는 공백이며 시작과 끝은 알파벳과 숫자(`[a-z0-9A-Z]`)이며, 대시(`-`), 밑줄(`_`), 점(`.`)과 함께 사용할 수 있다. 다음의 예시는 파드에 `environment: production` 과 `app: nginx` 2개의 레이블이 있는 구성 파일이다. @@ -90,14 +97,13 @@ API는 현재 _일치성 기준_ 과 _집합성 기준_ 이라는 두 종류의 {{< /note >}} {{< caution >}} -일치성 기준과 집합성 기준 조건 모두에 대해 논리적인 _OR_ (`||`) 연산자가 없다. -필터 구문이 적절히 구성되어있는지 확인해야 한다. +일치성 기준과 집합성 기준 조건 모두에 대해 논리적인 _OR_ (`||`) 연산자가 없다. 필터 구문이 적절히 구성되어있는지 확인해야 한다. {{< /caution >}} ### _일치성 기준_ 요건 _일치성 기준_ 또는 _불일치 기준_ 의 요구사항으로 레이블의 키와 값의 필터링을 허용한다. 일치하는 오브젝트는 추가 레이블을 가질 수 있지만, 레이블의 명시된 제약 조건을 모두 만족해야 한다. -`=`,`==`,`!=` 이 3가지 연산자만 허용한다. 처음 두 개의 연산자의 _일치성_(그리고 단순히 동의어일 뿐임), 나머지는 _불일치_ 를 의미한다. 예를 들면, +`=`,`==`,`!=` 이 세 가지 연산자만 허용한다. 처음 두 개의 연산자의 _일치성_(그리고 동의어), 나머지는 _불일치_ 를 의미한다. 예를 들면, ``` environment = production @@ -108,8 +114,9 @@ tier != frontend 후자는 `tier`를 키로 가지고, 값을 `frontend`를 가지는 리소스를 제외한 모든 리소스를 선택하고, `tier`를 키로 가지며, 값을 공백으로 가지는 모든 리소스를 선택한다. `environment=production,tier!=frontend` 처럼 쉼표를 통해 한 문장으로 `frontend`를 제외한 `production`을 필터링할 수 있다. -균등-기반 레이블의 요건에 대한 하나의 이용 시나리오는 파드가 노드를 선택하는 기준을 지정하는 것이다. -예를 들어, 아래 샘플 파드는 "`accelerator=nvidia-tesla-p100`" 레이블을 가진 노드를 선택한다. +일치성 기준 레이블 요건에 대한 하나의 이용 시나리오는 파드가 노드를 선택하는 기준을 지정하는 것이다. +예를 들어, 아래 샘플 파드는 "`accelerator=nvidia-tesla-p100`" +레이블을 가진 노드를 선택한다. ```yaml apiVersion: v1 @@ -148,16 +155,17 @@ _집합성 기준_ 레이블 셀렉터는 일반적으로 `environment=productio _집합성 기준_ 요건은 _일치성 기준_ 요건과 조합해서 사용할 수 있다. 예를 들어 `partition in (customerA, customerB),environment!=qa` + ## API ### LIST와 WATCH 필터링 -LIST와 WATCH 작업은 쿼리 파라미터를 사용해서 반환되는 오브젝트 집합을 필터링하기 위해 레이블 셀렉터를 지정할 수 있다. 다음의 2가지 요건 모두 허용된다(URL 쿼리 문자열을 그대로 표기함). +LIST와 WATCH 작업은 쿼리 파라미터를 사용해서 반환되는 오브젝트 집합을 필터링하기 위해 레이블 셀렉터를 지정할 수 있다. 다음의 두 가지 요건 모두 허용된다(URL 쿼리 문자열을 그대로 표기함). - * _불일치 기준_ 요건: `?labelSelector=environment%3Dproduction,tier%3Dfrontend` + * _일치성 기준_ 요건: `?labelSelector=environment%3Dproduction,tier%3Dfrontend` * _집합성 기준_ 요건: `?labelSelector=environment+in+%28production%2Cqa%29%2Ctier+in+%28frontend%29` -두 가지 레이블 셀렉터 스타일은 모두 REST 클라이언트를 통해 선택된 리소스를 확인하거나 목록을 볼 수 있다. 예를 들어, `kubectl`로 `apiserver`를 대상으로 _불일치 기준_ 으로 하는 셀렉터를 다음과 같이 이용할 수 있다. +두 가지 레이블 셀렉터 스타일은 모두 REST 클라이언트를 통해 선택된 리소스를 확인하거나 목록을 볼 수 있다. 예를 들어, `kubectl`로 `apiserver`를 대상으로 _일치성 기준_ 으로 하는 셀렉터를 다음과 같이 이용할 수 있다. ```shell kubectl get pods -l environment=production,tier=frontend @@ -192,7 +200,7 @@ kubectl get pods -l 'environment,environment notin (frontend)' `services`에서 지정하는 파드 집합은 레이블 셀렉터로 정의한다. 마찬가지로 `replicationcontrollers`가 관리하는 파드의 오브젝트 그룹도 레이블 셀렉터로 정의한다. -서비스와 레플리케이션 컨트롤러의 레이블 셀렉터는 `json` 또는 `yaml` 파일에 매핑된 _균등-기반_ 요구사항의 셀렉터만 지원한다. +서비스와 레플리케이션 컨트롤러의 레이블 셀렉터는 `json` 또는 `yaml` 파일에 매핑된 _일치성 기준_ 요구사항의 셀렉터만 지원한다. ```json "selector": { @@ -208,7 +216,6 @@ selector: `json` 또는 `yaml` 서식에서 셀렉터는 `component=redis` 또는 `component in (redis)` 모두 같은 것이다. - #### 세트-기반 요건을 지원하는 리소스 [`Job`](/ko/docs/concepts/workloads/controllers/job/), @@ -232,4 +239,3 @@ selector: 레이블을 통해 선택하는 사용 사례 중 하나는 파드를 스케줄 할 수 있는 노드 셋을 제한하는 것이다. 자세한 내용은 [노드 선택](/ko/docs/concepts/scheduling-eviction/assign-pod-node/) 문서를 참조한다. - diff --git a/content/ko/docs/concepts/overview/working-with-objects/names.md b/content/ko/docs/concepts/overview/working-with-objects/names.md index 891ad4d07a21b..78b7addd43c02 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/names.md +++ b/content/ko/docs/concepts/overview/working-with-objects/names.md @@ -21,11 +21,15 @@ weight: 20 {{< glossary_definition term_id="name" length="all" >}} +{{< note >}} +물리적 호스트를 나타내는 노드와 같이 오브젝트가 물리적 엔티티를 나타내는 경우, 노드를 삭제한 후 다시 생성하지 않은 채 동일한 이름으로 호스트를 다시 생성하면, 쿠버네티스는 새 호스트를 불일치로 이어질 수 있는 이전 호스트로 취급한다. +{{< /note >}} + 다음은 리소스에 일반적으로 사용되는 세 가지 유형의 이름 제한 조건이다. ### DNS 서브도메인 이름 -대부분의 리소스 유형에는 [RFC 1123](https://tools.ietf.org/html/rfc1123)에 정의된 대로 +대부분의 리소스 유형에는 [RFC 1123](https://tools.ietf.org/html/rfc1123)에 정의된 대로 DNS 서브도메인 이름으로 사용할 수 있는 이름이 필요하다. 이것은 이름이 다음을 충족해야 한다는 것을 의미한다. @@ -83,4 +87,3 @@ UUID는 ISO/IEC 9834-8 과 ITU-T X.667 로 표준화 되어 있다. * 쿠버네티스의 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)에 대해 읽기. * [쿠버네티스의 식별자와 이름](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) 디자인 문서 읽기. - diff --git a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md index 905375bdc5628..ef75f4f0819b2 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md @@ -26,7 +26,7 @@ weight: 30 동일한 소프트웨어의 다른 버전과 같이 약간 다른 리소스를 분리하기 위해 여러 네임스페이스를 사용할 필요는 없다. 동일한 네임스페이스 내에서 리소스를 -구별하기 위해 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)을 +구별하기 위해 {{< glossary_tooltip text="레이블" term_id="label" >}}을 사용한다. ## 네임스페이스 다루기 @@ -109,6 +109,16 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` +## 자동 레이블링 + +{{< feature-state state="beta" for_k8s_version="1.21" >}} + +쿠버네티스 컨트롤 플레인은 `NamespaceDefaultLabelName` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 +활성화된 경우 모든 네임스페이스에 변경할 수 없는(immutable) {{< glossary_tooltip text="레이블" term_id="label" >}} +`kubernetes.io / metadata.name` 을 설정한다. +레이블 값은 네임스페이스 이름이다. + + ## {{% heading "whatsnext" %}} * [신규 네임스페이스 생성](/docs/tasks/administer-cluster/namespaces/#creating-a-new-namespace)에 대해 더 배우기. diff --git a/content/ko/docs/concepts/overview/working-with-objects/object-management.md b/content/ko/docs/concepts/overview/working-with-objects/object-management.md index 4c1570c458461..575def2256b65 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/ko/docs/concepts/overview/working-with-objects/object-management.md @@ -32,7 +32,7 @@ weight: 15 지정한다. 이것은 클러스터에서 일회성 작업을 개시시키거나 동작시키기 위한 -가장 단순한 방법이다. 이 기법은 활성 오브젝트를 대상으로 직접적인 +추천 방법이다. 이 기법은 활성 오브젝트를 대상으로 직접적인 영향을 미치기 때문에, 이전 구성에 대한 이력을 제공해 주지 않는다. ### 예시 @@ -47,7 +47,7 @@ kubectl create deployment nginx --image nginx 오브젝트 구성에 비해 장점은 다음과 같다. -- 커맨드는 간단해서 배우기 쉽고, 기억하기 쉽다. +- 커맨드는 하나의 동작을 나타내는 단어로 표현된다. - 커맨드는 클러스터를 수정하기 위해 단 하나의 단계만을 필요로 한다. 오브젝트 구성에 비해 단점은 다음과 같다. @@ -125,7 +125,7 @@ kubectl replace -f nginx.yaml 선언형 오브젝트 구성을 사용할 경우, 사용자는 로컬에 보관된 오브젝트 구성 파일을 대상으로 작동시키지만, 사용자는 파일에서 수행 할 작업을 정의하지 않는다. 생성, 업데이트, 그리고 삭제 작업은 -`kubectl`에 의해 오브젝트 마다 자동으로 감지된다. 이를 통해 다른 오브젝트에 대해 +`kubectl`에 의해 오브젝트마다 자동으로 감지된다. 이를 통해 다른 오브젝트에 대해 다른 조작이 필요할 수 있는 디렉터리에서 작업할 수 있다. {{< note >}} diff --git a/content/ko/docs/concepts/policy/pod-security-policy.md b/content/ko/docs/concepts/policy/pod-security-policy.md index e3c67a4ff9348..8afee5760b112 100644 --- a/content/ko/docs/concepts/policy/pod-security-policy.md +++ b/content/ko/docs/concepts/policy/pod-security-policy.md @@ -9,7 +9,9 @@ weight: 30 -{{< feature-state state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} + +파드시큐리티폴리시(PodSecurityPolicy)는 쿠버네티스 v1.21부터 더이상 사용되지 않으며, v1.25에서 제거된다. 파드 시큐리티 폴리시를 사용하면 파드 생성 및 업데이트에 대한 세분화된 권한을 부여할 수 있다. diff --git a/content/ko/docs/concepts/policy/resource-quotas.md b/content/ko/docs/concepts/policy/resource-quotas.md index a6e6208d76994..8e1d918ef423d 100644 --- a/content/ko/docs/concepts/policy/resource-quotas.md +++ b/content/ko/docs/concepts/policy/resource-quotas.md @@ -1,4 +1,6 @@ --- + + title: 리소스 쿼터 content_type: concept weight: 20 @@ -56,7 +58,7 @@ weight: 20 ## 리소스 쿼터 활성화 많은 쿠버네티스 배포판에 기본적으로 리소스 쿼터 지원이 활성화되어 있다. -API 서버 `--enable-admission-plugins=` 플래그의 인수 중 하나로 +{{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}} `--enable-admission-plugins=` 플래그의 인수 중 하나로 `ResourceQuota`가 있는 경우 활성화된다. 해당 네임스페이스에 리소스쿼터가 있는 경우 특정 네임스페이스에 @@ -122,6 +124,10 @@ GPU 리소스를 다음과 같이 쿼터를 정의할 수 있다. | `limits.ephemeral-storage` | 네임스페이스의 모든 파드에서 로컬 임시 스토리지 제한의 합은 이 값을 초과할 수 없음. | | `ephemeral-storage` | `requests.ephemeral-storage` 와 같음. | +{{< note >}} +CRI 컨테이너 런타임을 사용할 때, 컨테이너 로그는 임시 스토리지 쿼터에 포함된다. 이로 인해 스토리지 쿼터를 소진한 파드가 예기치 않게 축출될 수 있다. 자세한 내용은 [로깅 아키텍처](/ko/docs/concepts/cluster-administration/logging/)를 참조한다. +{{< /note >}} + ## 오브젝트 수 쿼터 다음 구문을 사용하여 모든 표준 네임스페이스 처리된(namespaced) 리소스 유형에 대한 @@ -186,7 +192,8 @@ GPU 리소스를 다음과 같이 쿼터를 정의할 수 있다. | `NotTerminating` | `.spec.activeDeadlineSeconds is nil`에 일치하는 파드 | | `BestEffort` | 최상의 서비스 품질을 제공하는 파드 | | `NotBestEffort` | 서비스 품질이 나쁜 파드 | -| `PriorityClass` | 지정된 [프라이올리티 클래스](/ko/docs/concepts/configuration/pod-priority-preemption)를 참조하여 일치하는 파드. | +| `PriorityClass` | 지정된 [프라이어리티 클래스](/ko/docs/concepts/configuration/pod-priority-preemption)를 참조하여 일치하는 파드. | +| `CrossNamespacePodAffinity` | 크로스-네임스페이스 파드 [(안티)어피니티 용어]가 있는 파드 | `BestEffort` 범위는 다음의 리소스를 추적하도록 쿼터를 제한한다. @@ -427,6 +434,63 @@ memory 0 20Gi pods 0 10 ``` +### 네임스페이스 간 파드 어피니티 쿼터 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +오퍼레이터는 네임스페이스를 교차하는 어피니티가 있는 파드를 가질 수 있는 네임스페이스를 +제한하기 위해 `CrossNamespacePodAffinity` 쿼터 범위를 사용할 수 있다. 특히, 파드 어피니티 용어의 +`namespaces` 또는 `namespaceSelector` 필드를 설정할 수 있는 파드를 제어한다. + +안티-어피니티 제약 조건이 있는 파드는 장애 도메인에서 다른 모든 네임스페이스의 파드가 예약되지 않도록 +차단할 수 있으므로 사용자가 네임스페이스 간 어피니티 용어를 +사용하지 못하도록 하는 것이 바람직할 수 있다. + +이 범위 오퍼레이터를 사용하면 `CrossNamespaceAffinity` 범위와 하드(hard) 제한이 0인 +네임스페이스에 리소스 쿼터 오브젝트를 생성하여 특정 네임스페이스(아래 예에서 `foo-ns`)가 네임스페이스 간 파드 어피니티를 +사용하는 파드를 사용하지 못하도록 방지할 수 있다. + +```yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: disable-cross-namespace-affinity + namespace: foo-ns +spec: + hard: + pods: "0" + scopeSelector: + matchExpressions: + - scopeName: CrossNamespaceAffinity +``` + +오퍼레이터가 기본적으로 `namespaces` 및 `namespaceSelector` 사용을 허용하지 않고, +특정 네임스페이스에만 허용하려는 경우, kube-apiserver 플래그 --admission-control-config-file를 +다음의 구성 파일의 경로로 설정하여 `CrossNamespaceAffinity` 를 +제한된 리소스로 구성할 수 있다. + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: "ResourceQuota" + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: ResourceQuotaConfiguration + limitedResources: + - resource: pods + matchScopes: + - scopeName: CrossNamespaceAffinity +``` + +위의 구성을 사용하면, 파드는 생성된 네임스페이스에 `CrossNamespaceAffinity` 범위가 있는 리소스 쿼터 오브젝트가 있고, +해당 필드를 사용하는 파드 수보다 크거나 같은 하드 제한이 있는 경우에만 +파드 어피니티에서 `namespaces` 및 `namespaceSelector` 를 사용할 수 있다. + +이 기능은 알파이며 기본적으로 비활성화되어 있다. kube-apiserver 및 kube-scheduler 모두에서 +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) +`PodAffinityNamespaceSelector` 를 설정하여 활성화할 수 있다. + ## 요청과 제한의 비교 {#requests-vs-limits} 컴퓨트 리소스를 할당할 때 각 컨테이너는 CPU 또는 메모리에 대한 요청과 제한값을 지정할 수 있다. @@ -608,17 +672,28 @@ plugins: values: ["cluster-services"] ``` -이제 "cluster-services" 파드는 `scopeSelector`와 일치하는 쿼터 오브젝트가 있는 네임스페이스에서만 허용된다. -예를 들면 다음과 같다. +그리고, `kube-system` 네임스페이스에 리소스 쿼터 오브젝트를 생성한다. -```yaml - scopeSelector: - matchExpressions: - - scopeName: PriorityClass - operator: In - values: ["cluster-services"] +{{< codenew file="policy/priority-class-resourcequota.yaml" >}} + +```shell +$ kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system ``` +``` +resourcequota/pods-cluster-services created +``` + +이 경우, 파드 생성은 다음의 조건을 만족해야 허용될 것이다. + +1. 파드의 `priorityClassName` 가 명시되지 않음. +1. 파드의 `priorityClassName` 가 `cluster-services` 이외의 다른 값으로 명시됨. +1. 파드의 `priorityClassName` 가 `cluster-services` 로 설정되고, 파드가 `kube-system` + 네임스페이스에 생성되었으며 리소스 쿼터 검증을 통과함. + +파드 생성 요청은 `priorityClassName` 가 `cluster-services` 로 명시되고 +`kube-system` 이외의 다른 네임스페이스에 생성되는 경우, 거절된다. + ## {{% heading "whatsnext" %}} - 자세한 내용은 [리소스쿼터 디자인 문서](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md)를 참고한다. diff --git a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md index ebbc00f91e02a..8c095e4a27459 100644 --- a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -5,24 +5,23 @@ title: 노드에 파드 할당하기 content_type: concept -weight: 50 +weight: 20 --- -{{< glossary_tooltip text="파드" term_id="pod" >}}를 특정한 {{< glossary_tooltip text="노드(들)" term_id="node" >}}에서만 동작하도록 하거나, -특정 노드들을 선호하도록 제한할 수 있다. -이를 수행하는 방법에는 여러 가지가 있으며, 권장되는 접근 방식은 모두 -[레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/)를 사용하여 선택한다. -보통 스케줄러가 자동으로 합리적인 배치(예: 노드들에 걸쳐 파드를 분배하거나, -자원이 부족한 노드에 파드를 배치하지 않는 등)를 수행하기에 이런 제약 조건은 필요하지 않지만 -간혹 파드가 배치되는 노드에 대해 더 많은 제어를 원할 수 있는 상황이 있다. +특정한 {{< glossary_tooltip text="노드(들)" term_id="node" >}} 집합에서만 동작하도록 +{{< glossary_tooltip text="파드" term_id="pod" >}}를 제한할 수 있다. +이를 수행하는 방법에는 여러 가지가 있으며 권장되는 접근 방식은 모두 +[레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/)를 사용하여 선택을 용이하게 한다. +보통 스케줄러가 자동으로 합리적인 배치(예: 자원이 부족한 노드에 파드를 배치하지 않도록 +노드 간에 파드를 분배하는 등)를 수행하기에 이러한 제약 조건은 필요하지 않지만 +간혹 파드가 배포할 노드를 제어해야 하는 경우가 있다. 예를 들어 SSD가 장착된 머신에 파드가 연결되도록 하거나 또는 동일한 가용성 영역(availability zone)에서 많은 것을 통신하는 두 개의 서로 다른 서비스의 파드를 같이 배치할 수 있다. - ## 노드 셀렉터(nodeSelector) @@ -120,13 +119,13 @@ spec: 여기에 현재 `requiredDuringSchedulingIgnoredDuringExecution` 와 `preferredDuringSchedulingIgnoredDuringExecution` 로 부르는 두 가지 종류의 노드 어피니티가 있다. 전자는 파드가 노드에 스케줄되도록 *반드시* -규칙을 만족해야 하는 것(`nodeSelector` 와 같으나 보다 표현적인 구문을 사용해서)을 지정하고, +규칙을 만족해야 하는 것(`nodeSelector` 와 비슷하나 보다 표현적인 구문을 사용해서)을 지정하고, 후자는 스케줄러가 시도하려고는 하지만, 보증하지 않는 *선호(preferences)* 를 지정한다는 점에서 이를 각각 "엄격함(hard)" 과 "유연함(soft)" 으로 생각할 수 있다. 이름의 "IgnoredDuringExecution" 부분은 `nodeSelector` 작동 방식과 유사하게 노드의 -레이블이 런타임 중에 변경되어 파드의 어피니티 규칙이 더 이상 충족되지 않으면 파드가 여전히 그 노드에서 +레이블이 런타임 중에 변경되어 파드의 어피니티 규칙이 더 이상 충족되지 않으면 파드가 그 노드에서 동작한다는 의미이다. 향후에는 파드의 노드 어피니티 요구 사항을 충족하지 않는 노드에서 파드를 제거한다는 -점을 제외하고는 `preferredDuringSchedulingIgnoredDuringExecution` 와 같은 `requiredDuringSchedulingIgnoredDuringExecution` 를 제공할 계획이다. +점을 제외하고는 `preferredDuringSchedulingIgnoredDuringExecution` 와 동일한 `requiredDuringSchedulingIgnoredDuringExecution` 를 제공할 계획이다. 따라서 `requiredDuringSchedulingIgnoredDuringExecution` 의 예로는 "인텔 CPU가 있는 노드에서만 파드 실행"이 될 수 있고, `preferredDuringSchedulingIgnoredDuringExecution` 의 예로는 "장애 조치 영역 XYZ에 파드 집합을 실행하려고 @@ -261,7 +260,7 @@ PodSpec에 지정된 NodeAffinity도 적용된다. `topologyKey` 의 빈 값을 허용하지 않는다. 2. 파드 안티-어피니티에서도 `requiredDuringSchedulingIgnoredDuringExecution` 와 `preferredDuringSchedulingIgnoredDuringExecution` 는 `topologyKey` 의 빈 값을 허용하지 않는다. -3. `requiredDuringSchedulingIgnoredDuringExecution` 파드 안티-어피니티에서 `topologyKey` 를 `kubernetes.io/hostname` 로 제한하기 위해 어드미션 컨트롤러 `LimitPodHardAntiAffinityTopology` 가 도입되었다. 사용자 지정 토폴로지를 사용할 수 있도록 하려면, 어드미션 컨트롤러를 수정하거나 아니면 간단히 이를 비활성화해야 한다. +3. `requiredDuringSchedulingIgnoredDuringExecution` 파드 안티-어피니티에서 `topologyKey` 를 `kubernetes.io/hostname` 로 제한하기 위해 어드미션 컨트롤러 `LimitPodHardAntiAffinityTopology` 가 도입되었다. 사용자 지정 토폴로지를 사용할 수 있도록 하려면, 어드미션 컨트롤러를 수정하거나 아니면 이를 비활성화해야 한다. 4. 위의 경우를 제외하고, `topologyKey` 는 적법한 어느 레이블-키도 가능하다. `labelSelector` 와 `topologyKey` 외에도 `labelSelector` 와 일치해야 하는 네임스페이스 목록 `namespaces` 를 @@ -271,6 +270,18 @@ PodSpec에 지정된 NodeAffinity도 적용된다. 파드를 노드에 스케줄하려면 `requiredDuringSchedulingIgnoredDuringExecution` 어피니티와 안티-어피니티와 연관된 `matchExpressions` 가 모두 충족되어야 한다. +#### 네임스페이스 셀렉터 +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +사용자는 네임스페이스 집합에 대한 레이블 쿼리인 `namespaceSelector` 를 사용하여 일치하는 네임스페이스를 선택할 수도 있다. +어피니티 용어는 `namespaceSelector` 에서 선택한 네임스페이스와 `namespaces` 필드에 나열된 네임스페이스의 결합에 적용된다. +빈 `namespaceSelector` ({})는 모든 네임스페이스와 일치하는 반면, null 또는 빈 `namespaces` 목록과 +null `namespaceSelector` 는 "이 파드의 네임스페이스"를 의미한다. + +이 기능은 알파이며 기본적으로 비활성화되어 있다. kube-apiserver 및 kube-scheduler 모두에서 +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) +`PodAffinityNamespaceSelector` 를 설정하여 활성화할 수 있다. + #### 더 실용적인 유스케이스 파드간 어피니티와 안티-어피니티는 레플리카셋, 스테이트풀셋, 디플로이먼트 등과 같은 diff --git a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md index 83059ba9315a3..86e67978c2137 100644 --- a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -86,6 +86,7 @@ _스코어링_ 단계에서 스케줄러는 목록에 남아있는 노드의 순 * [스케줄러 성능 튜닝](/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning/)에 대해 읽기 * [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)에 대해 읽기 * kube-scheduler의 [레퍼런스 문서](/docs/reference/command-line-tools-reference/kube-scheduler/) 읽기 +* [kube-scheduler 구성(v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) 레퍼런스 읽기 * [멀티 스케줄러 구성하기](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/)에 대해 배우기 * [토폴로지 관리 정책](/docs/tasks/administer-cluster/topology-manager/)에 대해 배우기 * [파드 오버헤드](/ko/docs/concepts/scheduling-eviction/pod-overhead/)에 대해 배우기 diff --git a/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md index f23396d936aa1..d11b7fe2ae5b8 100644 --- a/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -5,7 +5,7 @@ title: 확장된 리소스를 위한 리소스 빈 패킹(bin packing) content_type: concept -weight: 50 +weight: 30 --- diff --git a/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 4ed5c58a6324d..9e049cd348165 100644 --- a/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -1,4 +1,6 @@ --- + + title: 스케줄러 성능 튜닝 content_type: concept weight: 80 @@ -22,8 +24,6 @@ API 서버에 해당 결정을 통지한다. 본 페이지에서는 상대적으로 큰 규모의 쿠버네티스 클러스터에 대한 성능 튜닝 최적화에 대해 설명한다. - - 큰 규모의 클러스터에서는 스케줄러의 동작을 튜닝하여 응답 시간 @@ -42,8 +42,10 @@ kube-scheduler 의 `percentageOfNodesToScore` 설정을 통해 `percentageOfNodesToScore` 를 100 보다 높게 설정해도 kube-scheduler는 마치 100을 설정한 것처럼 작동한다. -값을 변경하려면, kube-scheduler 구성 파일(이 파일은 `/etc/kubernetes/config/kube-scheduler.yaml` -일 수 있다)을 편집한 다음 스케줄러를 재시작 한다. +값을 변경하려면, +[kube-scheduler 구성 파일](/docs/reference/config-api/kube-scheduler-config.v1beta1/)을 +편집한 다음 스케줄러를 재시작한다. +대부분의 경우, 구성 파일은 `/etc/kubernetes/config/kube-scheduler.yaml` 에서 찾을 수 있다. 이를 변경한 후에 다음을 실행해서 @@ -97,7 +99,6 @@ algorithmSource: percentageOfNodesToScore: 50 ``` - ### percentageOfNodesToScore 튜닝 `percentageOfNodesToScore`는 1과 100 사이의 값이어야 하며 @@ -157,3 +158,7 @@ percentageOfNodesToScore: 50 ``` 모든 노드를 검토한 후, 노드 1로 돌아간다. + +## {{% heading "whatsnext" %}} + +* [kube-scheduler 구성 레퍼런스(v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) 확인 diff --git a/content/ko/docs/concepts/security/controlling-access.md b/content/ko/docs/concepts/security/controlling-access.md index 0b4bb6e2cce90..9612159eb4807 100644 --- a/content/ko/docs/concepts/security/controlling-access.md +++ b/content/ko/docs/concepts/security/controlling-access.md @@ -38,7 +38,7 @@ API 서버가 하나 이상의 인증기 모듈을 실행하도록 구성한다. 인증기는 [여기](/docs/reference/access-authn-authz/authentication/)에서 더 자세히 서술한다. 인증 단계로 들어가는 것은 온전한 HTTP 요청이지만 -일반적으로 헤더 그리고/또는 클라이언트 인증서만 검사한다. +일반적으로 헤더 그리고/또는 클라이언트 인증서를 검사한다. 인증 모듈은 클라이언트 인증서, 암호 및 일반 토큰, 부트스트랩 토큰, JWT 토큰(서비스 어카운트에 사용됨)을 포함한다. @@ -132,7 +132,7 @@ Bob이 `projectCaribou` 네임스페이스에 있는 오브젝트에 쓰기(`cre 이전의 논의는 (일반적인 경우) API 서버의 보안 포트로 전송되는 요청에 적용된다. API 서버는 실제로 다음과 같이 2개의 포트에서 서비스할 수 있다. -기본적으로 쿠버네티스 API 서버는 2개의 포트에서 HTTP 서비스를 한다. +기본적으로, 쿠버네티스 API 서버는 2개의 포트에서 HTTP 서비스를 한다. 1. `로컬호스트 포트`: diff --git a/content/ko/docs/concepts/security/overview.md b/content/ko/docs/concepts/security/overview.md index 86240a41162f6..9cd48a172cd0b 100644 --- a/content/ko/docs/concepts/security/overview.md +++ b/content/ko/docs/concepts/security/overview.md @@ -119,6 +119,7 @@ RBAC 인증(쿠버네티스 API에 대한 접근) | https://kubernetes.io/docs/r 컨테이너 취약점 스캔 및 OS에 종속적인 보안 | 이미지 빌드 단계의 일부로 컨테이너에 알려진 취약점이 있는지 검사해야 한다. 이미지 서명 및 시행 | 컨테이너 이미지에 서명하여 컨테이너의 내용에 대한 신뢰 시스템을 유지한다. 권한있는 사용자의 비허용 | 컨테이너를 구성할 때 컨테이너의 목적을 수행하는데 필요한 최소 권한을 가진 사용자를 컨테이너 내에 만드는 방법에 대해서는 설명서를 참조한다. +더 강력한 격리로 컨테이너 런타임 사용 | 더 강력한 격리를 제공하는 [컨테이너 런타임 클래스](/ko/docs/concepts/containers/runtime-class/)를 선택한다. ## 코드 @@ -151,3 +152,4 @@ TLS를 통한 접근 | 코드가 TCP를 통해 통신해야 한다면, 미리 * 컨트롤 플레인을 위한 [전송 데이터 암호화](/docs/tasks/tls/managing-tls-in-a-cluster/) * [Rest에서 데이터 암호화](/docs/tasks/administer-cluster/encrypt-data/) * [쿠버네티스 시크릿](/ko/docs/concepts/configuration/secret/) +* [런타임 클래스](/ko/docs/concepts/containers/runtime-class) diff --git a/content/ko/docs/concepts/services-networking/connect-applications-service.md b/content/ko/docs/concepts/services-networking/connect-applications-service.md index 9002778ede3e1..0848c357725ff 100644 --- a/content/ko/docs/concepts/services-networking/connect-applications-service.md +++ b/content/ko/docs/concepts/services-networking/connect-applications-service.md @@ -383,7 +383,7 @@ $ curl https://: -k

    Welcome to nginx!

    ``` -이제 클라우드 로드 밸런서를 사용하도록 서비스를 재생성하고, `my-nginx` 서비스의 `Type` 을 `NodePort` 에서 `LoadBalancer` 로 변경한다. +이제 클라우드 로드 밸런서를 사용하도록 서비스를 재생성한다. `my-nginx` 서비스의 `Type` 을 `NodePort` 에서 `LoadBalancer` 로 변경한다. ```shell kubectl edit svc my-nginx diff --git a/content/ko/docs/concepts/services-networking/dns-pod-service.md b/content/ko/docs/concepts/services-networking/dns-pod-service.md index fc1074a86c069..006ffba99c20a 100644 --- a/content/ko/docs/concepts/services-networking/dns-pod-service.md +++ b/content/ko/docs/concepts/services-networking/dns-pod-service.md @@ -1,11 +1,14 @@ --- + + + title: 서비스 및 파드용 DNS content_type: concept weight: 20 --- -이 페이지는 쿠버네티스의 DNS 지원에 대한 개요를 설명한다. - +쿠버네티스는 파드와 서비스를 위한 DNS 레코드를 생성한다. 사용자는 IP 주소 대신에 +일관된 DNS 네임을 통해서 서비스에 접속할 수 있다. @@ -15,23 +18,51 @@ weight: 20 개별 컨테이너들이 DNS 네임을 해석할 때 DNS 서비스의 IP를 사용하도록 kubelets를 구성한다. -### DNS 네임이 할당되는 것들 - 클러스터 내의 모든 서비스(DNS 서버 자신도 포함하여)에는 DNS 네임이 할당된다. 기본적으로 클라이언트 파드의 DNS 검색 리스트는 파드 자체의 네임스페이스와 클러스터의 기본 도메인을 포함한다. -이 예시는 다음과 같다. - -쿠버네티스 네임스페이스 `bar`에 `foo`라는 서비스가 있다. 네임스페이스 `bar`에서 running 상태인 파드는 -단순하게 `foo`를 조회하는 DNS 쿼리를 통해서 서비스 `foo`를 찾을 수 있다. -네임스페이스 `quux`에서 실행 중인 파드는 -`foo.bar`를 조회하는 DNS 쿼리를 통해서 이 서비스를 찾을 수 있다. - -다음 절에서는 쿠버네티스 DNS에서 지원하는 레코드 유형과 레이아웃을 자세히 설명한다. -이 외에 동작하는 레이아웃, 네임 또는 쿼리는 구현 세부 정보로 간주하며 -경고 없이 변경될 수 있다. -최신 업데이트에 대한 자세한 설명은 다음 링크를 통해 참조할 수 있다. -[쿠버네티스 DNS 기반 서비스 디스커버리](https://github.com/kubernetes/dns/blob/master/docs/specification.md). + +### 서비스의 네임스페이스 + +DNS 쿼리는 그것을 생성하는 파드의 네임스페이스에 따라 다른 결과를 반환할 수 +있다. 네임스페이스를 지정하지 않은 DNS 쿼리는 파드의 네임스페이스에 +국한된다. DNS 쿼리에 네임스페이스를 명시하여 다른 네임스페이스에 있는 서비스에 접속한다. + +예를 들어, `test` 네임스페이스에 있는 파드를 생각해보자. `data` 서비스는 +`prod` 네임스페이스에 있다. + +이 경우, `data` 에 대한 쿼리는 파드의 `test` 네임스페이스를 사용하기 때문에 결과를 반환하지 않을 것이다. + +`data.prod` 로 쿼리하면 의도한 결과를 반환할 것이다. 왜냐하면 +네임스페이스가 명시되어 있기 때문이다. + +DNS 쿼리는 파드의 `/etc/resolv.conf` 를 사용하여 확장될 수 있을 것이다. Kubelet은 +각 파드에 대해서 파일을 설정한다. 예를 들어, `data` 만을 위한 쿼리는 +`data.test.cluster.local` 로 확장된다. `search` 옵션의 값은 +쿼리를 확장하기 위해서 사용된다. DNS 쿼리에 대해 더 자세히 알고 싶은 경우, +[`resolv.conf` 설명 페이지.](https://www.man7.org/linux/man-pages/man5/resolv.conf.5.html)를 참고한다. + +``` +nameserver 10.32.0.10 +search .svc.cluster.local svc.cluster.local cluster.local +options ndots:5 +``` + +요약하면, _test_ 네임스페이스에 있는 파드는 `data.prod` 또는 +`data.prod.cluster.local` 중 하나를 통해 성공적으로 해석될 수 있다. + +### DNS 레코드 + +어떤 오브젝트가 DNS 레코드를 가지는가? + +1. 서비스 +2. 파드 + +다음 섹션은 지원되는 DNS 레코드의 종류 및 레이아웃에 대한 상세 +내용이다. 혹시 동작시킬 필요가 있는 다른 레이아웃, 네임, 또는 쿼리는 +구현 세부 사항으로 간주되며 경고 없이 변경될 수 있다. +최신 명세 확인을 위해서는, +[쿠버네티스 DNS-기반 서비스 디스커버리](https://github.com/kubernetes/dns/blob/master/docs/specification.md)를 본다. ## 서비스 diff --git a/content/ko/docs/concepts/services-networking/dual-stack.md b/content/ko/docs/concepts/services-networking/dual-stack.md index cae986bb5d7b4..821ca34989e68 100644 --- a/content/ko/docs/concepts/services-networking/dual-stack.md +++ b/content/ko/docs/concepts/services-networking/dual-stack.md @@ -11,11 +11,11 @@ weight: 70 -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} - IPv4/IPv6 이중 스택을 사용하면 {{< glossary_tooltip text="파드" term_id="pod" >}} 와 {{< glossary_tooltip text="서비스" term_id="service" >}} 에 IPv4와 IPv6 주소를 모두 할당 할 수 있다. +IPv4/IPv6 이중 스택 네트워킹을 사용하면 {{< glossary_tooltip text="파드" term_id="pod" >}}와 {{< glossary_tooltip text="서비스" term_id="service" >}}에 IPv4와 IPv6 주소를 모두 할당할 수 있다. -만약 쿠버네티스 클러스터에서 IPv4/IPv6 이중 스택 네트워킹을 활성화하면, 클러스터는 IPv4와 IPv6 주소의 동시 할당을 지원하게 된다. +IPv4/IPv6 이중 스택 네트워킹은 1.21부터 쿠버네티스 클러스터에 기본적으로 활성화되어 있고, IPv4 및 IPv6 주소를 동시에 할당할 수 있다. @@ -23,7 +23,7 @@ weight: 70 ## 지원되는 기능 -쿠버네티스 클러스터에서 IPv4/IPv6 이중 스택을 활성화하면 다음의 기능을 제공한다. +쿠버네티스 클러스터의 IPv4/IPv6 이중 스택은 다음의 기능을 제공한다. * 이중 스택 파드 네트워킹(파드 당 단일 IPv4와 IPv6 주소 할당) * IPv4와 IPv6 지원 서비스 @@ -35,52 +35,50 @@ IPv4/IPv6 이중 스택 쿠버네티스 클러스터를 활용하려면 다음 * 쿠버네티스 1.20 이상 이전 버전과 함께 이중 스택 서비스를 사용하는 방법에 대한 정보 - 쿠버네티스 버전, 쿠버네티스 해당 버전에 대한 + 쿠버네티스 버전, 쿠버네티스 해당 버전에 대한 문서 참조 * 이중 스택 네트워킹을 위한 공급자의 지원(클라우드 공급자 또는 다른 방식으로 쿠버네티스 노드에 라우팅 가능한 IPv4/IPv6 네트워크 인터페이스를 제공할 수 있어야 한다.) * 이중 스택(예: Kubenet 또는 Calico)을 지원하는 네트워크 플러그인 -## IPv4/IPv6 이중 스택 활성화 +## IPv4/IPv6 이중 스택 구성 -IPv4/IPv6 이중 스택을 활성화 하려면, 클러스터의 관련 구성요소에 대해 `IPv6DualStack` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) 를 활성화 하고, 이중 스택 클러스터 네트워크 할당을 설정한다. +IPv4/IPv6 이중 스택을 사용하려면, 클러스터의 관련 구성 요소에 대해 `IPv6DualStack` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화한다. (1.21부터 IPv4/IPv6 이중 스택이 기본적으로 활성화된다.) + +IPv4/IPv6 이중 스택을 구성하려면, 이중 스택 클러스터 네트워크 할당을 설정한다. * kube-apiserver: - * `--feature-gates="IPv6DualStack=true"` * `--service-cluster-ip-range=,` * kube-controller-manager: - * `--feature-gates="IPv6DualStack=true"` * `--cluster-cidr=,` * `--service-cluster-ip-range=,` * `--node-cidr-mask-size-ipv4|--node-cidr-mask-size-ipv6` IPv4의 기본값은 /24 이고 IPv6의 기본값은 /64 이다. - * kubelet: - * `--feature-gates="IPv6DualStack=true"` * kube-proxy: * `--cluster-cidr=,` - * `--feature-gates="IPv6DualStack=true"` {{< note >}} IPv4 CIDR의 예: `10.244.0.0/16` (자신의 주소 범위를 제공하더라도) IPv6 CIDR의 예: `fdXY:IJKL:MNOP:15::/64` (이 형식으로 표시되지만, 유효한 주소는 아니다 - [RFC 4193](https://tools.ietf.org/html/rfc4193)을 본다.) +1.21부터, IPv4/IPv6 이중 스택은 기본적으로 활성화된다. +필요한 경우 kube-apiserver, kube-controller-manager, kubelet 및 kube-proxy 커맨드 라인에 +`--feature-gates="IPv6DualStack=false"` 를 지정하여 비활성화할 수 있다. {{< /note >}} ## 서비스 -클러스터에 이중 스택이 활성화된 경우 IPv4, IPv6 또는 둘 다를 사용할 수 있는 {{< glossary_tooltip text="서비스" term_id="service" >}}를 만들 수 있다. +IPv4, IPv6 또는 둘 다를 사용할 수 있는 {{< glossary_tooltip text="서비스" term_id="service" >}}를 생성할 수 있다. -서비스의 주소 계열은 기본적으로 첫 번째 서비스 클러스터 IP 범위의 주소 계열로 설정된다. (`--service-cluster-ip-range` 플래그를 통해 kube-controller-manager에 구성) +서비스의 주소 계열은 기본적으로 첫 번째 서비스 클러스터 IP 범위의 주소 계열로 설정된다. (`--service-cluster-ip-range` 플래그를 통해 kube-apiserver에 구성) -서비스를 정의할 때 선택적으로 이중 스택으로 구성할 수 있다. 원하는 동작을 지정하려면 `.spec.ipFamilyPolicy` 필드를 +서비스를 정의할 때 선택적으로 이중 스택으로 구성할 수 있다. 원하는 동작을 지정하려면 `.spec.ipFamilyPolicy` 필드를 다음 값 중 하나로 설정한다. * `SingleStack`: 단일 스택 서비스. 컨트롤 플레인은 첫 번째로 구성된 서비스 클러스터 IP 범위를 사용하여 서비스에 대한 클러스터 IP를 할당한다. * `PreferDualStack`: - * 클러스터에 이중 스택이 활성화된 경우에만 사용된다. 서비스에 대해 IPv4 및 IPv6 클러스터 IP를 할당한다. - * 클러스터에 이중 스택이 활성화되지 않은 경우, 이 설정은 `SingleStack`과 동일한 동작을 따른다. + * 서비스에 IPv4 및 IPv6 클러스터 IP를 할당한다. (클러스터에 `--feature-gates="IPv6DualStack=false"` 가 있는 경우, 이 설정은 `SingleStack` 과 동일한 동작을 따른다.) * `RequireDualStack`: IPv4 및 IPv6 주소 범위 모두에서 서비스 `.spec.ClusterIPs`를 할당한다. * `.spec.ipFamilies` 배열의 첫 번째 요소의 주소 계열을 기반으로 `.spec.ClusterIPs` 목록에서 `.spec.ClusterIP`를 선택한다. - * 클러스터에는 이중 스택 네트워킹이 구성되어 있어야 한다. 단일 스택에 사용할 IP 계열을 정의하거나 이중 스택에 대한 IP 군의 순서를 정의하려는 경우, 서비스에서 옵션 필드 `.spec.ipFamilies`를 설정하여 주소 군을 선택할 수 있다. @@ -121,7 +119,7 @@ IPv6 CIDR의 예: `fdXY:IJKL:MNOP:15::/64` (이 형식으로 표시되지만, #### 기존 서비스의 이중 스택 기본값 -이 예제는 서비스가 이미있는 클러스터에서 이중 스택이 새로 활성화된 경우의 기본 동작을 보여준다. +이 예제는 서비스가 이미 있는 클러스터에서 이중 스택이 새로 활성화된 경우의 기본 동작을 보여준다. (`--feature-gates="IPv6DualStack=false"` 가 설정되지 않은 경우 기존 클러스터를 1.21로 업그레이드하면 이중 스택이 활성화된다.) 1. 클러스터에서 이중 스택이 활성화된 경우 기존 서비스 (`IPv4` 또는 `IPv6`)는 컨트롤 플레인이 `.spec.ipFamilyPolicy`를 `SingleStack`으로 지정하고 `.spec.ipFamilies`를 기존 서비스의 주소 계열로 설정한다. 기존 서비스 클러스터 IP는 `.spec.ClusterIPs`에 저장한다. @@ -158,7 +156,7 @@ status: loadBalancer: {} ``` -1. 클러스터에서 이중 스택이 활성화된 경우, 셀렉터가 있는 기존 [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스)는 `.spec.ClusterIP`가 `None`이라도 컨트롤 플레인이 `.spec.ipFamilyPolicy`을 `SingleStack`으로 지정하고 `.spec.ipFamilies`는 첫 번째 서비스 클러스터 IP 범위(kube-controller-manager에 대한 `--service-cluster-ip-range` 플래그를 통해 구성)의 주소 계열으로 지정한다. +1. 클러스터에서 이중 스택이 활성화된 경우, 셀렉터가 있는 기존 [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스)는 `.spec.ClusterIP`가 `None`이라도 컨트롤 플레인이 `.spec.ipFamilyPolicy`을 `SingleStack`으로 지정하고 `.spec.ipFamilies`는 첫 번째 서비스 클러스터 IP 범위(kube-apiserver에 대한 `--service-cluster-ip-range` 플래그를 통해 구성)의 주소 계열으로 지정한다. {{< codenew file="service/networking/dual-stack-default-svc.yaml" >}} @@ -237,3 +235,5 @@ spec: * [IPv4/IPv6 이중 스택 검증](/ko/docs/tasks/network/validate-dual-stack) 네트워킹 +* [kubeadm을 사용하여 이중 스택 네트워킹 활성화 +](/docs/setup/production-environment/tools/kubeadm/dual-stack-support/) diff --git a/content/ko/docs/concepts/services-networking/endpoint-slices.md b/content/ko/docs/concepts/services-networking/endpoint-slices.md index f75dc819c3c6e..4e12cf9ff210d 100644 --- a/content/ko/docs/concepts/services-networking/endpoint-slices.md +++ b/content/ko/docs/concepts/services-networking/endpoint-slices.md @@ -1,13 +1,13 @@ --- title: 엔드포인트슬라이스 content_type: concept -weight: 35 +weight: 45 --- -{{< feature-state for_k8s_version="v1.17" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} _엔드포인트슬라이스_ 는 쿠버네티스 클러스터 내의 네트워크 엔드포인트를 추적하는 간단한 방법을 제공한다. 이것은 엔드포인트를 더 확장하고, 확장 가능한 @@ -50,7 +50,7 @@ term_id="selector" >}}가 지정되면 컨트롤 플레인은 자동으로 리소스 샘플이 있다. ```yaml -apiVersion: discovery.k8s.io/v1beta1 +apiVersion: discovery.k8s.io/v1 kind: EndpointSlice metadata: name: example-abc @@ -67,13 +67,12 @@ endpoints: conditions: ready: true hostname: pod-1 - topology: - kubernetes.io/hostname: node-1 - topology.kubernetes.io/zone: us-west2-a + nodeName: node-1 + zone: us-west2-a ``` 기본적으로, 컨트롤 플레인은 각각 100개 이하의 엔드포인트를 -갖도록 엔드포인트슬라이스를 +갖도록 엔드포인트슬라이스를 생성하고 관리한다. `--max-endpoints-per-slice` {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}} 플래그를 사용하여, 최대 1000개까지 구성할 수 있다. @@ -98,9 +97,9 @@ endpoints: #### 준비 -`ready`는 파드의 `Ready` 조건에 매핑되는 조건이다. `Ready` 조건이 `True`로 설정된 실행 중인 파드는 -이 엔드포인트슬라이스 조건도 `true`로 설정되어야 한다. 호환성의 -이유로, 파드가 종료될 때 `ready`는 절대 `true`가 되면 안 된다. 컨슈머는 `serving` 조건을 참조하여 +`ready`는 파드의 `Ready` 조건에 매핑되는 조건이다. `Ready` 조건이 `True`로 설정된 실행 중인 파드는 +이 엔드포인트슬라이스 조건도 `true`로 설정되어야 한다. 호환성의 +이유로, 파드가 종료될 때 `ready`는 절대 `true`가 되면 안 된다. 컨슈머는 `serving` 조건을 참조하여 파드 종료 준비 상태(readiness)를 검사해야 한다. 이 규칙의 유일한 예외는 `spec.publishNotReadyAddresses`가 `true`로 설정된 서비스이다. 이러한 서비스의 엔드 포인트는 항상 `ready`조건이 `true`로 설정된다. @@ -110,16 +109,16 @@ endpoints: {{< feature-state for_k8s_version="v1.20" state="alpha" >}} `serving`은 종료 상태를 고려하지 않는다는 점을 제외하면 `ready` 조건과 동일하다. -엔드포인트슬라이스 API 컨슈머는 파드가 종료되는 동안 파드 준비 상태에 관심이 있다면 +엔드포인트슬라이스 API 컨슈머는 파드가 종료되는 동안 파드 준비 상태에 관심이 있다면 이 조건을 확인해야 한다. {{< note >}} `serving`은 `ready`와 거의 동일하지만 `ready`의 기존 의미가 깨지는 것을 방지하기 위해 추가되었다. -엔드포인트를 종료하기 위해 `ready`가 `true` 일 수 있다면 기존 클라이언트에게는 예상치 못한 일이 될 수 있다. +엔드포인트를 종료하기 위해 `ready`가 `true` 일 수 있다면 기존 클라이언트에게는 예상치 못한 일이 될 수 있다. 역사적으로 종료된 엔드포인트는 처음부터 엔드포인트 또는 엔드포인트슬라이스 API에 포함되지 않았기 때문이다. -이러한 이유로 `ready`는 엔드포인트 종료를 위해 _always_ `false`이며, -클라이언트가 `ready`에 대한 기존 의미와 관계없이 파드 종료 준비 상태를 +이러한 이유로 `ready`는 엔드포인트 종료를 위해 _always_ `false`이며, +클라이언트가 `ready`에 대한 기존 의미와 관계없이 파드 종료 준비 상태를 추적 할 수 있도록 v1.20에 새로운 조건 `serving`이 추가되었다. {{< /note >}} @@ -133,30 +132,26 @@ endpoints: ### 토폴로지 정보 {#토폴로지} -{{< feature-state for_k8s_version="v1.20" state="deprecated" >}} +엔드포인트슬라이스 내의 각 엔드 포인트는 관련 토폴로지 정보를 포함할 수 있다. +토폴로지 정보에는 엔드 포인트의 위치와 해당 노드 및 +영역에 대한 정보가 포함된다. 엔드포인트슬라이스의 다음의 엔드 포인트별 +필드에서 사용할 수 있다. + +*`nodeName` - 이 엔드 포인트가 있는 노드의 이름이다. +*`zone` - 이 엔드 포인트가 있는 영역이다. {{< note >}} -엔드포인트슬라이스의 토폴로지 필드는 사용 중단되었으며 향후 릴리스에서 제거된다. -토폴로지에서 `kubernetes.io/hostname`을 설정하는 대신 새로운 `nodeName` 필드가 -사용된다. 영역 및 리전을 커버하는 다른 토폴로지 필드는 -엔드포인트슬라이스 내의 모든 엔드포인트에 적용되는 -엔드포인트슬라이스 레이블을 이용해 더 잘 표현될 수 있다. +v1 API에서는, 전용 필드 `nodeName` 및 `zone` 을 위해 엔드 포인트별 +`topology` 가 효과적으로 제거되었다. + +`EndpointSlice` 리소스의 `endpoint` 필드에 임의의 토폴로지 필드를 +설정하는 것은 더 이상 사용되지 않으며, v1 API에서 지원되지 않는다. 대신, +v1 API는 개별 `nodeName` 및 `zone` 필드 설정을 지원한다. 이러한 +필드는 API 버전 간에 자동으로 번역된다. 예를 들어, +v1beta1 API의 `topology` 필드에 있는 `"topology.kubernetes.io/zone"` +키 값은 v1 API의 `zone` 필드로 접근할 수 있다. {{< /note >}} -엔드포인트슬라이스 내 각 엔드포인트는 연관된 토폴로지 정보를 포함할 수 있다. -이는 해당 노드, 영역 그리고 지역에 대한 정보가 포함된 -엔드포인트가 있는 위치를 나타나는데 사용 한다. 값을 사용할 수 있으면, -컨트롤 플레인은 엔드포인트슬라이스에 대해 다음의 토폴로지 레이블을 설정한다. - -* `kubernetes.io/hostname` - 이 엔드포인트가 있는 노드의 이름. -* `topology.kubernetes.io/zone` - 이 엔드포인트가 있는 영역의 이름. -* `topology.kubernetes.io/region` - 이 엔드포인트가 있는 지역의 이름. - -이런 레이블 값은 슬라이스의 각 엔드포인트와 연관된 리소스에서 -파생된다. 호스트 이름 레이블은 해당 파드의 -NodeName 필드 값을 나타낸다. 영역 및 지역 레이블은 해당 -노드에서 이름이 같은 값을 나타낸다. - ### 관리 대부분의 경우, 컨트롤 플레인(특히, 엔드포인트 슬라이스 diff --git a/content/ko/docs/concepts/services-networking/ingress-controllers.md b/content/ko/docs/concepts/services-networking/ingress-controllers.md index 3af939488e267..41524039f0ced 100644 --- a/content/ko/docs/concepts/services-networking/ingress-controllers.md +++ b/content/ko/docs/concepts/services-networking/ingress-controllers.md @@ -9,11 +9,11 @@ weight: 40 인그레스 리소스가 작동하려면, 클러스터는 실행 중인 인그레스 컨트롤러가 반드시 필요하다. -kube-controller-manager 바이너리의 일부로 실행되는 컨트롤러의 다른 타입과 달리 인그레스 컨트롤러는 +`kube-controller-manager` 바이너리의 일부로 실행되는 컨트롤러의 다른 타입과 달리 인그레스 컨트롤러는 클러스터와 함께 자동으로 실행되지 않는다. 클러스터에 가장 적합한 인그레스 컨트롤러 구현을 선택하는데 이 페이지를 사용한다. -프로젝트로써 쿠버네티스는 [AWS](https://github.com/kubernetes-sigs/aws-load-balancer-controller#readme), [GCE](https://git.k8s.io/ingress-gce/README.md#readme)와 +프로젝트로서 쿠버네티스는 [AWS](https://github.com/kubernetes-sigs/aws-load-balancer-controller#readme), [GCE](https://git.k8s.io/ingress-gce/README.md#readme)와 [nginx](https://git.k8s.io/ingress-nginx/README.md#readme) 인그레스 컨트롤러를 지원하고 유지한다. @@ -23,32 +23,35 @@ kube-controller-manager 바이너리의 일부로 실행되는 컨트롤러의 {{% thirdparty-content %}} -* [AKS 애플리케이션 게이트웨이 인그레스 컨트롤러] (https://azure.github.io/application-gateway-kubernetes-ingress/)는 [Azure 애플리케이션 게이트웨이](https://docs.microsoft.com)를 구성하는 인그레스 컨트롤러다. +* [AKS 애플리케이션 게이트웨이 인그레스 컨트롤러](https://azure.github.io/application-gateway-kubernetes-ingress/)는 [Azure 애플리케이션 게이트웨이](https://docs.microsoft.com)를 구성하는 인그레스 컨트롤러다. * [Ambassador](https://www.getambassador.io/) API 게이트웨이는 [Envoy](https://www.envoyproxy.io) 기반 인그레스 컨트롤러다. +* [Apache APISIX 인그레스 컨트롤러](https://github.com/apache/apisix-ingress-controller)는 [Apache APISIX](https://github.com/apache/apisix) 기반의 인그레스 컨트롤러이다. * [Avi 쿠버네티스 오퍼레이터](https://github.com/vmware/load-balancer-and-ingress-services-for-kubernetes)는 [VMware NSX Advanced Load Balancer](https://avinetworks.com/)을 사용하는 L4-L7 로드 밸런싱을 제공한다. * [Citrix 인그레스 컨트롤러](https://github.com/citrix/citrix-k8s-ingress-controller#readme)는 Citrix 애플리케이션 딜리버리 컨트롤러에서 작동한다. * [Contour](https://projectcontour.io/)는 [Envoy](https://www.envoyproxy.io/) 기반 인그레스 컨트롤러다. +* [EnRoute](https://getenroute.io/)는 인그레스 컨트롤러로 실행할 수 있는 [Envoy](https://www.envoyproxy.io) 기반 API 게이트웨이다. * F5 BIG-IP [쿠버네티스 용 컨테이너 인그레스 서비스](https://clouddocs.f5.com/containers/latest/userguide/kubernetes/)를 이용하면 인그레스를 사용하여 F5 BIG-IP 가상 서버를 구성할 수 있다. * [Gloo](https://gloo.solo.io)는 API 게이트웨이 기능을 제공하는 [Envoy](https://www.envoyproxy.io) 기반의 오픈소스 인그레스 컨트롤러다. -* [HAProxy 인그레스](https://haproxy-ingress.github.io/)는 [HAProxy](http://www.haproxy.org/#desc)의 +* [HAProxy 인그레스](https://haproxy-ingress.github.io/)는 [HAProxy](https://www.haproxy.org/#desc)의 인그레스 컨트롤러다. -* [쿠버네티스 용 HAProxy 인그레스 컨트롤러](https://github.com/haproxytech/kubernetes-ingress#readme)는 [HAProxy](http://www.haproxy.org/#desc) 용 +* [쿠버네티스 용 HAProxy 인그레스 컨트롤러](https://github.com/haproxytech/kubernetes-ingress#readme)는 [HAProxy](https://www.haproxy.org/#desc) 용 인그레스 컨트롤러이기도 하다. * [Istio 인그레스](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress/)는 [Istio](https://istio.io/) 기반 인그레스 컨트롤러다. * [쿠버네티스 용 Kong 인그레스 컨트롤러](https://github.com/Kong/kubernetes-ingress-controller#readme)는 [Kong 게이트웨이](https://konghq.com/kong/)를 구동하는 인그레스 컨트롤러다. -* [쿠버네티스 용 NGINX 인그레스 컨트롤러](https://www.nginx.com/products/nginx/kubernetes-ingress-controller)는 [NGINX](https://www.nginx.com/resources/glossary) +* [쿠버네티스 용 NGINX 인그레스 컨트롤러](https://www.nginx.com/products/nginx-ingress-controller/)는 [NGINX](https://www.nginx.com/resources/glossary/nginx/) 웹서버(프록시로 사용)와 함께 작동한다. * [Skipper](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/)는 사용자의 커스텀 프록시를 구축하기 위한 라이브러리로 설계된 쿠버네티스 인그레스와 같은 유스케이스를 포함한 서비스 구성을 위한 HTTP 라우터 및 역방향 프록시다. * [Traefik 쿠버네티스 인그레스 제공자](https://doc.traefik.io/traefik/providers/kubernetes-ingress/)는 [Traefik](https://traefik.io/traefik/) 프록시 용 인그레스 컨트롤러다. +* [Tyk 오퍼레이터](https://github.com/TykTechnologies/tyk-operator)는 사용자 지정 리소스로 인그레스를 확장하여 API 관리 기능을 인그레스로 가져온다. Tyk 오퍼레이터는 오픈 소스 Tyk 게이트웨이 및 Tyk 클라우드 컨트롤 플레인과 함께 작동한다. * [Voyager](https://appscode.com/products/voyager)는 - [HAProxy](http://www.haproxy.org/#desc)의 인그레스 컨트롤러다. + [HAProxy](https://www.haproxy.org/#desc)의 인그레스 컨트롤러다. ## 여러 인그레스 컨트롤러 사용 @@ -63,7 +66,7 @@ kube-controller-manager 바이너리의 일부로 실행되는 컨트롤러의 다양한 인그레스 컨트롤러는 약간 다르게 작동한다. {{< note >}} -인그레스 컨트롤러의 설명서를 검토하여 선택 시 주의 사항을 이해해야한다. +인그레스 컨트롤러의 설명서를 검토하여 선택 시 주의 사항을 이해해야 한다. {{< /note >}} diff --git a/content/ko/docs/concepts/services-networking/ingress.md b/content/ko/docs/concepts/services-networking/ingress.md index 5b91356437030..802cc486bfff2 100644 --- a/content/ko/docs/concepts/services-networking/ingress.md +++ b/content/ko/docs/concepts/services-networking/ingress.md @@ -167,7 +167,7 @@ Events: ### 예제 -| 종류 | 경로 | 요청 경로 | 일치 여부 | +| 종류 | 경로 | 요청 경로 | 일치 여부 | |--------|---------------------------------|-------------------------------|------------------------------------| | Prefix | `/` | (모든 경로) | 예 | | Exact | `/foo` | `/foo` | 예 | @@ -218,7 +218,19 @@ Events: {{< codenew file="service/networking/external-lb.yaml" >}} IngressClass 리소스에는 선택적인 파라미터 필드가 있다. 이 클래스에 대한 -추가 구성을 참조하는데 사용할 수 있다. +추가 구현 별 구성을 참조하는데 사용할 수 있다. + +#### 네임스페이스 범위의 파라미터 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +`Parameters` 필드에는 인그레스 클래스 구성을 위해 네임스페이스 별 리소스를 참조하는 데 +사용할 수 있는 `scope` 및 `namespace` 필드가 있다. +`Scope` 필드의 기본값은 `Cluster` 이다. 즉, 기본값은 클러스터 범위의 +리소스이다. `Scope` 를 `Namespace` 로 설정하고 `Namespace` 필드를 +설정하면 특정 네임스페이스의 파라미터 리소스를 참조한다. + +{{< codenew file="service/networking/namespaced-params.yaml" >}} ### 사용중단(Deprecated) 어노테이션 @@ -257,7 +269,7 @@ IngressClass 리소스에는 선택적인 파라미터 필드가 있다. 이 클 {{< codenew file="service/networking/test-ingress.yaml" >}} -만약 `kubectl apply -f` 를 사용해서 생성한다면 방금 추가한 인그레스의 +만약 `kubectl apply -f` 를 사용해서 생성한다면 추가한 인그레스의 상태를 볼 수 있어야 한다. ```bash @@ -376,7 +388,7 @@ graph LR; 트래픽을 일치 시킬 수 있다. 예를 들어, 다음 인그레스는 `first.bar.com`에 요청된 트래픽을 -`service1`로, `second.foo.com`는 `service2`로, 호스트 이름이 정의되지 +`service1`로, `second.bar.com`는 `service2`로, 호스트 이름이 정의되지 않은(즉, 요청 헤더가 표시 되지 않는) IP 주소로의 모든 트래픽은 `service3`로 라우팅 한다. diff --git a/content/ko/docs/concepts/services-networking/network-policies.md b/content/ko/docs/concepts/services-networking/network-policies.md index d7872b1d92e02..c68d6f2862d82 100644 --- a/content/ko/docs/concepts/services-networking/network-policies.md +++ b/content/ko/docs/concepts/services-networking/network-policies.md @@ -220,18 +220,72 @@ __ipBlock__: 인그레스 소스 또는 이그레스 대상으로 허용할 IP C SCTP 프로토콜 네트워크폴리시를 지원하는 {{< glossary_tooltip text="CNI" term_id="cni" >}} 플러그인을 사용하고 있어야 한다. {{< /note >}} +## 포트 범위 지정 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +네트워크폴리시를 작성할 때, 단일 포트 대신 포트 범위를 대상으로 지정할 수 있다. + +다음 예와 같이 `endPort` 필드를 사용하면, 이 작업을 수행할 수 있다. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: multi-port-egress + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 32000 + endPort: 32768 +``` + +위 규칙은 대상 포트가 32000에서 32768 사이에 있는 경우, 네임스페이스 `default` 에 레이블이 `db` 인 모든 파드가 TCP를 통해 `10.0.0.0/24` 범위 내의 모든 IP와 통신하도록 허용한다. + +이 필드를 사용할 때 다음의 제한 사항이 적용된다. +* 알파 기능으로, 기본적으로 비활성화되어 있다. 클러스터 수준에서 `endPort` 필드를 활성화하려면, 사용자(또는 클러스터 관리자)가 `--feature-gates=NetworkPolicyEndPort=true,…` 가 있는 API 서버에 대해 `NetworkPolicyEndPort` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. +* `endPort` 필드는 `port` 필드보다 크거나 같아야 한다. +* `endPort` 는 `port` 도 정의된 경우에만 정의할 수 있다. +* 두 포트 모두 숫자여야 한다. + +{{< note >}} +클러스터는 {{< glossary_tooltip text="CNI" term_id="cni" >}} 플러그인을 사용해야 한다. +네트워크폴리시 명세에서 `endPort` 필드를 지원한다. +{{< /note >}} + +## 이름으로 네임스페이스 지정 + +{{< feature-state state="beta" for_k8s_version="1.21" >}} + +쿠버네티스 컨트롤 플레인은 `NamespaceDefaultLabelName` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화된 경우 +모든 네임스페이스에 변경할 수 없는(immutable) 레이블 `kubernetes.io/metadata.name` 을 설정한다. +레이블의 값은 네임스페이스 이름이다. + +네트워크폴리시는 일부 오브젝트 필드가 있는 이름으로 네임스페이스를 대상으로 지정할 수 없지만, 표준화된 레이블을 사용하여 +특정 네임스페이스를 대상으로 지정할 수 있다. + ## 네트워크 정책으로 할 수 없는 것(적어도 아직은 할 수 없는) -쿠버네티스 1.20부터 다음의 기능은 네트워크폴리시 API에 존재하지 않지만, 운영 체제 컴포넌트(예: SELinux, OpenVSwitch, IPTables 등) 또는 Layer 7 기술(인그레스 컨트롤러, 서비스 메시 구현) 또는 어드미션 컨트롤러를 사용하여 제2의 해결책을 구현할 수 있다. 쿠버네티스의 네트워크 보안을 처음 사용하는 경우, 네트워크폴리시 API를 사용하여 다음의 사용자 스토리를 (아직) 구현할 수 없다는 점에 유의할 가치가 있다. 이러한 사용자 스토리 중 일부(전부는 아님)가 네트워크폴리시 API의 향후 릴리스에서 활발히 논의되고 있다. +쿠버네티스 {{< skew latestVersion >}}부터 다음의 기능은 네트워크폴리시 API에 존재하지 않지만, 운영 체제 컴포넌트(예: SELinux, OpenVSwitch, IPTables 등) 또는 Layer 7 기술(인그레스 컨트롤러, 서비스 메시 구현) 또는 어드미션 컨트롤러를 사용하여 제2의 해결책을 구현할 수 있다. 쿠버네티스의 네트워크 보안을 처음 사용하는 경우, 네트워크폴리시 API를 사용하여 다음의 사용자 스토리를 (아직) 구현할 수 없다는 점에 유의할 필요가 있다. - 내부 클러스터 트래픽이 공통 게이트웨이를 통과하도록 강제한다(서비스 메시나 기타 프록시와 함께 제공하는 것이 가장 좋을 수 있음). - TLS와 관련된 모든 것(이를 위해 서비스 메시나 인그레스 컨트롤러 사용). - 노드별 정책(이에 대해 CIDR 표기법을 사용할 수 있지만, 특히 쿠버네티스 ID로 노드를 대상으로 지정할 수 없음). -- 이름으로 네임스페이스나 서비스를 타겟팅한다(그러나, {{< glossary_tooltip text="레이블" term_id="label" >}}로 파드나 네임스페이스를 타겟팅할 수 있으며, 이는 종종 실행할 수 있는 해결 방법임). +- 이름으로 서비스를 타겟팅한다(그러나, {{< glossary_tooltip text="레이블" term_id="label" >}}로 파드나 네임스페이스를 타겟팅할 수 있으며, 이는 종종 실행할 수 있는 해결 방법임). - 타사 공급사가 이행한 "정책 요청"의 생성 또는 관리. - 모든 네임스페이스나 파드에 적용되는 기본 정책(이를 수행할 수 있는 타사 공급사의 쿠버네티스 배포본 및 프로젝트가 있음). - 고급 정책 쿼리 및 도달 가능성 도구. -- 단일 정책 선언에서 포트 범위를 대상으로 하는 기능. - 네트워크 보안 이벤트를 기록하는 기능(예: 차단되거나 수락된 연결). - 명시적으로 정책을 거부하는 기능(현재 네트워크폴리시 모델은 기본적으로 거부하며, 허용 규칙을 추가하는 기능만 있음). - 루프백 또는 들어오는 호스트 트래픽을 방지하는 기능(파드는 현재 로컬 호스트 접근을 차단할 수 없으며, 상주 노드의 접근을 차단할 수 있는 기능도 없음). diff --git a/content/ko/docs/concepts/services-networking/service-topology.md b/content/ko/docs/concepts/services-networking/service-topology.md index 567b9987911dd..47799ba9f73fc 100644 --- a/content/ko/docs/concepts/services-networking/service-topology.md +++ b/content/ko/docs/concepts/services-networking/service-topology.md @@ -1,10 +1,8 @@ --- -title: 서비스 토폴로지 -feature: - title: 서비스 토폴로지 - description: > - 클러스터 토폴로지를 기반으로 서비스 트래픽 라우팅. + + +title: 토폴로지 키를 사용하여 토폴로지-인지 트래픽 라우팅 content_type: concept weight: 10 --- @@ -12,7 +10,16 @@ weight: 10 -{{< feature-state for_k8s_version="v1.17" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} + +{{< note >}} + +이 기능, 특히 알파 `topologyKeys` API는 쿠버네티스 v1.21부터 +더 이상 사용되지 않는다. +쿠버네티스 v1.21에 도입된 [토폴로지 인지 힌트](/docs/concepts/services-networking/topology-aware-hints/)는 +유사한 기능을 제공한다. + +{{}} _서비스 토폴로지_ 를 활성화 하면 서비스는 클러스터의 노드 토폴로지를 기반으로 트래픽을 라우팅한다. 예를 들어, 서비스는 트래픽을 @@ -20,33 +27,33 @@ _서비스 토폴로지_ 를 활성화 하면 서비스는 클러스터의 노 우선적으로 라우팅되도록 지정할 수 있다. - ## 소개 기본적으로 `ClusterIP` 또는 `NodePort` 서비스로 전송된 트래픽은 서비스의 -모든 백엔드 주소로 라우팅 될 수 있다. 쿠버네티스 1.7부터는 "외부(external)" -트래픽을 수신한 노드에서 실행중인 파드로 라우팅할 수 있었지만, -`ClusterIP` 서비스에서는 지원되지 않으며 더 복잡한 -토폴로지 — 영역별 라우팅과 같은 — 에서는 불가능 했다. -_서비스 토폴로지_ 기능은 서비스 생성자가 발신 노드와 수신 노드에 대해서 -노드 레이블에 기반한 트래픽 라우팅 정책을 정의할 수 있도록 -함으로써 이 문제를 해결한다. - -소스와 목적지의 노드 레이블 일치를 사용하여 운영자는 운영자의 요구 사항에 -적합한 메트릭에 대해서 서로 "근접(closer)" 하거나 "먼(farther)" -노드 그룹을 지정할 수 있다. 공용 클라우드의 많은 운영자들이 서비스 트래픽을 -동일한 영역에서 유지하는 것을 선호하는 것을 필요성의 예제로 볼 수 있다. 그 이유는 -지역간의 트래픽에는 관련 비용이 발생하지만 지역 내의 트래픽은 발생하지 않기 때문이다. -다른 일반적인 필요성으로는 DaemonSet이 관리하는 로컬 파드로 -트래픽을 라우팅 하거나, 대기시간을 최소화하기 위해 동일한 랙 상단(top-of-rack) 스위치에 -연결된 노드로 트래픽을 유지하는 것이 있다. +모든 백엔드 주소로 라우팅될 수 있다. 쿠버네티스 1.7을 사용하면 트래픽을 수신한 +동일한 노드에서 실행 중인 파드로 "외부(external)" 트래픽을 라우팅할 수 +있다. `ClusterIP` 서비스의 경우, 라우팅에 대한 동일한 노드 기본 설정이 +불가능했다. 또한 동일한 영역 내의 엔드 포인트에 대한 라우팅을 선호하도록 +클러스터를 구성할 수도 없다. +서비스에 `topologyKeys` 를 설정하면, 출발 및 대상 노드에 대한 +노드 레이블을 기반으로 트래픽을 라우팅하는 정책을 정의할 수 있다. + +소스와 목적지 사이의 레이블 일치를 통해 클러스터 운영자는 +서로 "근접(closer)"하거나 "먼(father)" 노드 그룹을 지정할 수 있다. +자신의 요구 사항에 맞는 메트릭을 나타내는 레이블을 정의할 수 있다. +예를 들어, 퍼블릭 클라우드에서는 지역 간의 트래픽에는 관련 비용이 발생(지역 내 +트래픽은 일반적으로 그렇지 않다)하기 때문에, 네트워크 트래픽을 동일한 지역 내에 유지하는 것을 +선호할 수 있다. 다른 일반적인 필요성으로는 데몬셋(DaemonSet)이 관리하는 +로컬 파드로 트래픽을 라우팅하거나, 대기 시간을 최소화하기 위해 +동일한 랙 상단(top-of-rack) 스위치에 연결된 노드로 트래픽을 +유지하는 것이 있다. ## 서비스 토폴로지 사용하기 -만약 클러스터에서 서비스 토폴로지가 활성화된 경우, 서비스 사양에서 +만약 클러스터에서 `ServiceTopology` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화된 경우, 서비스 사양에서 `topologyKeys` 필드를 지정해서 서비스 트래픽 라우팅을 제어할 수 있다. 이 필드는 이 서비스에 접근할 때 엔드포인트를 정렬하는데 사용되는 노드 레이블의 우선 순위 목록이다. 트래픽은 첫 번째 레이블 값이 해당 레이블의 @@ -196,5 +203,3 @@ spec: * [서비스 토폴로지 활성화하기](/docs/tasks/administer-cluster/enabling-service-topology)를 읽어보기. * [서비스와 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 읽어보기. - - diff --git a/content/ko/docs/concepts/services-networking/service.md b/content/ko/docs/concepts/services-networking/service.md index da9d353d6f86e..e5aa794ae0bdb 100644 --- a/content/ko/docs/concepts/services-networking/service.md +++ b/content/ko/docs/concepts/services-networking/service.md @@ -134,7 +134,7 @@ spec: * 한 서비스에서 다른 {{< glossary_tooltip term_id="namespace" text="네임스페이스">}} 또는 다른 클러스터의 서비스를 지정하려고 한다. * 워크로드를 쿠버네티스로 마이그레이션하고 있다. 해당 방식을 평가하는 동안, - 쿠버네티스에서는 일정 비율의 백엔드만 실행한다. + 쿠버네티스에서는 백엔드의 일부만 실행한다. 이러한 시나리오 중에서 파드 셀렉터 _없이_ 서비스를 정의 할 수 있다. 예를 들면 @@ -187,9 +187,14 @@ ExternalName 서비스는 셀렉터가 없고 DNS명을 대신 사용하는 특수한 상황의 서비스이다. 자세한 내용은 이 문서 뒷부분의 [ExternalName](#externalname) 섹션을 참조한다. +### 초과 용량 엔드포인트 +엔드포인트 리소스에 1,000개가 넘는 엔드포인트가 있는 경우 쿠버네티스 v1.21(또는 그 이상) +클러스터는 해당 엔드포인트에 `endpoints.kubernetes.io/over-capacity: warning` 어노테이션을 추가한다. +이 어노테이션은 영향을 받는 엔드포인트 오브젝트가 용량을 초과했음을 나타낸다. + ### 엔드포인트슬라이스 -{{< feature-state for_k8s_version="v1.17" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} 엔드포인트슬라이스는 엔드포인트에 보다 확장 가능한 대안을 제공할 수 있는 API 리소스이다. 개념적으로 엔드포인트와 매우 유사하지만, 엔드포인트슬라이스를 @@ -311,7 +316,7 @@ IPVS는 트래픽을 백엔드 파드로 밸런싱하기 위한 추가 옵션을 {{< note >}} IPVS 모드에서 kube-proxy를 실행하려면, kube-proxy를 시작하기 전에 노드에서 IPVS를 -사용 가능하도록 해야한다. +사용 가능하도록 해야 한다. kube-proxy가 IPVS 프록시 모드에서 시작될 때, IPVS 커널 모듈을 사용할 수 있는지 확인한다. IPVS 커널 모듈이 감지되지 않으면, kube-proxy는 @@ -430,8 +435,8 @@ CoreDNS와 같은, 클러스터-인식 DNS 서버는 새로운 서비스를 위 예를 들면, 쿠버네티스 네임스페이스 `my-ns`에 `my-service`라는 서비스가 있는 경우, 컨트롤 플레인과 DNS 서비스가 함께 작동하여 `my-service.my-ns`에 대한 DNS 레코드를 만든다. `my-ns` 네임 스페이스의 파드들은 -간단히 `my-service`에 대한 이름 조회를 수행하여 찾을 수 있어야 한다 -(`my-service.my-ns` 역시 동작함). +`my-service`(`my-service.my-ns` 역시 동작함)에 대한 이름 조회를 +수행하여 서비스를 찾을 수 있어야 한다. 다른 네임스페이스의 파드들은 이름을 `my-service.my-ns`으로 사용해야 한다. 이 이름은 서비스에 할당된 클러스터 IP로 변환된다. @@ -463,7 +468,7 @@ DNS SRV 쿼리를 수행할 수 있다. 셀렉터를 정의하는 헤드리스 서비스의 경우, 엔드포인트 컨트롤러는 API에서 `엔드포인트` 레코드를 생성하고, DNS 구성을 수정하여 -`서비스` 를 지원하는 `파드` 를 직접 가리키는 레코드 (주소)를 반환한다. +`서비스` 를 지원하는 `파드` 를 직접 가리키는 A 레코드(IP 주소)를 반환한다. ### 셀렉터가 없는 경우 @@ -513,8 +518,12 @@ API에서 `엔드포인트` 레코드를 생성하고, DNS 구성을 수정하 각 노드는 해당 포트 (모든 노드에서 동일한 포트 번호)를 서비스로 프록시한다. 서비스는 할당된 포트를 `.spec.ports[*].nodePort` 필드에 나타낸다. -포트를 프록시하기 위해 특정 IP를 지정하려면 kube-proxy의 `--nodeport-addresses` 플래그를 특정 IP 블록으로 설정할 수 있다. 이것은 쿠버네티스 v1.10부터 지원된다. -이 플래그는 쉼표로 구분된 IP 블록 목록 (예: 10.0.0.0/8, 192.0.2.0/25)을 사용하여 kube-proxy가 로컬 노드로 고려해야 하는 IP 주소 범위를 지정한다. +포트를 프록시하기 위해 특정 IP를 지정하려면, kube-proxy에 대한 +`--nodeport-addresses` 플래그 또는 +[kube-proxy 구성 파일](/docs/reference/config-api/kube-proxy-config.v1alpha1/)의 +동등한 `nodePortAddresses` 필드를 +특정 IP 블록으로 설정할 수 있다. +이 플래그는 쉼표로 구분된 IP 블록 목록(예: `10.0.0.0/8`, `192.0.2.0/25`)을 사용하여 kube-proxy가 로컬 노드로 고려해야 하는 IP 주소 범위를 지정한다. 예를 들어, `--nodeport-addresses=127.0.0.0/8` 플래그로 kube-proxy를 시작하면, kube-proxy는 NodePort 서비스에 대하여 루프백(loopback) 인터페이스만 선택한다. `--nodeport-addresses`의 기본 값은 비어있는 목록이다. 이것은 kube-proxy가 NodePort에 대해 사용 가능한 모든 네트워크 인터페이스를 고려해야 한다는 것을 의미한다. (이는 이전 쿠버네티스 릴리스와도 호환된다). @@ -530,7 +539,9 @@ NodePort를 사용하면 자유롭게 자체 로드 밸런싱 솔루션을 설 하나 이상의 노드 IP를 직접 노출시킬 수 있다. 이 서비스는 `:spec.ports[*].nodePort`와 -`.spec.clusterIP:spec.ports[*].port`로 표기된다. (kube-proxy에서 `--nodeport-addresses` 플래그가 설정되면, 는 NodeIP를 필터링한다.) +`.spec.clusterIP:spec.ports[*].port`로 표기된다. +kube-proxy에 대한 `--nodeport-addresses` 플래그 또는 kube-proxy 구성 파일의 +동등한 필드가 설정된 경우, `` 는 노드 IP를 필터링한다. 예를 들면 @@ -628,6 +639,25 @@ v1.20부터는 `spec.allocateLoadBalancerNodePorts` 필드를 `false`로 설정 이러한 노드 포트를 할당 해제하려면 모든 서비스 포트에서 `nodePorts` 항목을 명시적으로 제거해야 한다. 이 필드를 사용하려면 `ServiceLBNodePortControl` 기능 게이트를 활성화해야 한다. +#### 로드 밸런서 구현 클래스 지정 {#load-balancer-class} + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +v1.21부터는, `spec.loadBalancerClass` 필드를 설정하여 `LoadBalancer` 서비스 유형에 +대한 로드 밸런서 구현 클래스를 선택적으로 지정할 수 있다. +기본적으로, `spec.loadBalancerClass` 는 `nil` 이고 `LoadBalancer` 유형의 서비스는 +클라우드 공급자의 기본 로드 밸런서 구현을 사용한다. +`spec.loadBalancerClass` 가 지정되면, 지정된 클래스와 일치하는 로드 밸런서 +구현이 서비스를 감시하고 있다고 가정한다. +모든 기본 로드 밸런서 구현(예: 클라우드 공급자가 제공하는 +로드 밸런서 구현)은 이 필드가 설정된 서비스를 무시한다. +`spec.loadBalancerClass` 는 `LoadBalancer` 유형의 서비스에서만 설정할 수 있다. +한 번 설정하면 변경할 수 없다. +`spec.loadBalancerClass` 의 값은 "`internal-vip`" 또는 +"`example.com/internal-vip`" 와 같은 선택적 접두사가 있는 레이블 스타일 식별자여야 한다. +접두사가 없는 이름은 최종 사용자를 위해 예약되어 있다. +이 필드를 사용하려면 `ServiceLoadBalancerClass` 기능 게이트를 활성화해야 한다. + #### 내부 로드 밸런서 혼재된 환경에서는 서비스의 트래픽을 동일한 (가상) 네트워크 주소 블록 내로 @@ -785,8 +815,7 @@ TCP 및 SSL은 4 계층 프록시를 선택한다. ELB는 헤더를 수정하지 ``` 위의 예에서, 서비스에 `80`, `443`, `8443`의 3개 포트가 포함된 경우, -`443`, `8443`은 SSL 인증서를 사용하지만, `80`은 단순히 -프록시만 하는 HTTP이다. +`443`, `8443`은 SSL 인증서를 사용하지만, `80`은 프록시하는 HTTP이다. 쿠버네티스 v1.9부터는 서비스에 대한 HTTPS 또는 SSL 리스너와 함께 [사전에 정의된 AWS SSL 정책](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html)을 사용할 수 있다. 사용 가능한 정책을 확인하려면, `aws` 커맨드라인 툴을 사용한다. @@ -958,7 +987,8 @@ NLB는 특정 인스턴스 클래스에서만 작동한다. 지원되는 인스 | 규칙 | 프로토콜 | 포트 | IP 범위 | IP 범위 설명 | |------|----------|---------|------------|---------------------| -| 헬스 체크 | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| 헬스 체크 | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | Subnet CIDR | kubernetes.io/rule/nlb/health=\ | + | 클라이언트 트래픽 | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | | MTU 탐색 | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | @@ -1120,7 +1150,7 @@ VIP용 유저스페이스 프록시를 사용하면 중소 규모의 스케일 않아도 된다. 그것은 격리 실패이다. 서비스에 대한 포트 번호를 선택할 수 있도록 하기 위해, 두 개의 -서비스가 충돌하지 않도록 해야한다. 쿠버네티스는 각 서비스에 고유한 IP 주소를 +서비스가 충돌하지 않도록 해야 한다. 쿠버네티스는 각 서비스에 고유한 IP 주소를 할당하여 이를 수행한다. 각 서비스가 고유한 IP를 받도록 하기 위해, 내부 할당기는 @@ -1164,7 +1194,7 @@ IP 주소(예 : 10.0.0.1)를 할당한다. 서비스 포트를 1234라고 가정 이는 서비스 소유자가 충돌 위험 없이 원하는 어떤 포트든 선택할 수 있음을 의미한다. 클라이언트는 실제로 접근하는 파드를 몰라도, IP와 포트에 -간단히 연결할 수 있다. +연결할 수 있다. #### iptables diff --git a/content/ko/docs/concepts/storage/dynamic-provisioning.md b/content/ko/docs/concepts/storage/dynamic-provisioning.md index 412645e734208..d39289106e63d 100644 --- a/content/ko/docs/concepts/storage/dynamic-provisioning.md +++ b/content/ko/docs/concepts/storage/dynamic-provisioning.md @@ -75,8 +75,8 @@ parameters: 사용자는 `PersistentVolumeClaim` 에 스토리지 클래스를 포함시켜 동적으로 프로비전된 스토리지를 요청한다. 쿠버네티스 v1.6 이전에는 `volume.beta.kubernetes.io/storage-class` 어노테이션을 통해 수행되었다. 그러나 이 어노테이션은 -v1.6부터 더 이상 사용하지 않는다. 사용자는 이제 `PersistentVolumeClaim` 오브젝트의 -`storageClassName` 필드를 사용할 수 있기에 대신하여 사용해야 한다. 이 필드의 값은 +v1.9부터는 더 이상 사용하지 않는다. 사용자는 이제 `PersistentVolumeClaim` 오브젝트의 +`storageClassName` 필드를 사용해야 한다. 이 필드의 값은 관리자가 구성한 `StorageClass` 의 이름과 일치해야 한다. ([아래](#동적-프로비저닝-활성화하기)를 참고) diff --git a/content/ko/docs/concepts/storage/persistent-volumes.md b/content/ko/docs/concepts/storage/persistent-volumes.md index 9ce1eba6cf21f..3a85139cd207b 100644 --- a/content/ko/docs/concepts/storage/persistent-volumes.md +++ b/content/ko/docs/concepts/storage/persistent-volumes.md @@ -29,7 +29,7 @@ _퍼시스턴트볼륨_ (PV)은 관리자가 프로비저닝하거나 [스토리 _퍼시스턴트볼륨클레임_ (PVC)은 사용자의 스토리지에 대한 요청이다. 파드와 비슷하다. 파드는 노드 리소스를 사용하고 PVC는 PV 리소스를 사용한다. 파드는 특정 수준의 리소스(CPU 및 메모리)를 요청할 수 있다. 클레임은 특정 크기 및 접근 모드를 요청할 수 있다(예: ReadWriteOnce, ReadOnlyMany 또는 ReadWriteMany로 마운트 할 수 있음. [AccessModes](#접근-모드) 참고). -퍼시스턴트볼륨클레임을 사용하면 사용자가 추상화된 스토리지 리소스를 사용할 수 있지만, 다른 문제들 때문에 성능과 같은 다양한 속성을 가진 퍼시스턴트볼륨이 필요한 경우가 일반적이다. 클러스터 관리자는 사용자에게 해당 볼륨의 구현 방법에 대한 세부 정보를 제공하지 않고 단순히 크기와 접근 모드와는 다른 방식으로 다양한 퍼시스턴트볼륨을 제공할 수 있어야 한다. 이러한 요구에는 _스토리지클래스_ 리소스가 있다. +퍼시스턴트볼륨클레임을 사용하면 사용자가 추상화된 스토리지 리소스를 사용할 수 있지만, 다른 문제들 때문에 성능과 같은 다양한 속성을 가진 퍼시스턴트볼륨이 필요한 경우가 일반적이다. 클러스터 관리자는 사용자에게 해당 볼륨의 구현 방법에 대한 세부 정보를 제공하지 않고 크기와 접근 모드와는 다른 방식으로 다양한 퍼시스턴트볼륨을 제공할 수 있어야 한다. 이러한 요구에는 _스토리지클래스_ 리소스가 있다. [실습 예제와 함께 상세한 내용](/ko/docs/tasks/configure-pod-container/configure-persistent-volume-storage/)을 참고하길 바란다. @@ -487,7 +487,7 @@ PV는 `storageClassName` 속성을 * VsphereVolume * iSCSI -마운트 옵션의 유효성이 검사되지 않으므로 마운트 옵션이 유효하지 않으면 마운트가 실패한다. +마운트 옵션의 유효성이 검사되지 않는다. 마운트 옵션이 유효하지 않으면, 마운트가 실패한다. 이전에는 `mountOptions` 속성 대신 `volume.beta.kubernetes.io/mount-options` 어노테이션이 사용되었다. 이 어노테이션은 아직까지는 사용할 수 있지만, @@ -629,6 +629,11 @@ spec: 퍼시스턴트볼륨 바인딩은 배타적이며, 퍼시스턴트볼륨클레임은 네임스페이스 오브젝트이므로 "다중" 모드(`ROX`, `RWX`)를 사용한 클레임은 하나의 네임스페이스 내에서만 가능하다. +### `hostPath` 유형의 퍼시스턴트볼륨 + +`hostPath` 퍼시스턴트볼륨은 노드의 파일이나 디렉터리를 사용하여 네트워크 연결 스토리지를 에뮬레이션한다. +[`hostPath` 유형 볼륨의 예](/ko/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#퍼시스턴트볼륨-생성하기)를 참고한다. + ## 원시 블록 볼륨 지원 {{< feature-state for_k8s_version="v1.18" state="stable" >}} diff --git a/content/ko/docs/concepts/storage/storage-classes.md b/content/ko/docs/concepts/storage/storage-classes.md index 8bc6f7b1bf1d4..94577ca182cab 100644 --- a/content/ko/docs/concepts/storage/storage-classes.md +++ b/content/ko/docs/concepts/storage/storage-classes.md @@ -143,8 +143,8 @@ CSI | 1.14 (alpha), 1.16 (beta) 클래스의 `mountOptions` 필드에 지정된 마운트 옵션을 가진다. 만약 볼륨 플러그인이 마운트 옵션을 지원하지 않는데, 마운트 -옵션을 지정하면 프로비저닝은 실패한다. 마운트 옵션은 클래스 또는 PV 에서 -검증되지 않으므로 PV 마운트가 유효하지 않으면 마운트가 실패하게 된다. +옵션을 지정하면 프로비저닝은 실패한다. 마운트 옵션은 클래스 또는 PV에서 +검증되지 않는다. PV 마운트가 유효하지 않으면, 마운트가 실패하게 된다. ### 볼륨 바인딩 모드 diff --git a/content/ko/docs/concepts/storage/volume-pvc-datasource.md b/content/ko/docs/concepts/storage/volume-pvc-datasource.md index e6ff2caa38f79..e9857885d7a09 100644 --- a/content/ko/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/ko/docs/concepts/storage/volume-pvc-datasource.md @@ -19,7 +19,7 @@ weight: 30 복제는 표준 볼륨처럼 소비할 수 있는 쿠버네티스 볼륨의 복제본으로 정의된다. 유일한 차이점은 프로비저닝할 때 "새" 빈 볼륨을 생성하는 대신에 백엔드 장치가 지정된 볼륨의 정확한 복제본을 생성한다는 것이다. -쿠버네티스 API의 관점에서 복제를 구현하면 새로운 PVC 생성 중에 기존 PVC를 데이터 소스로 지정할 수 있는 기능이 추가된다. 소스 PVC는 바인딩되어있고, 사용가능해야 한다(사용 중이 아니어야함). +쿠버네티스 API의 관점에서 복제를 구현하면 새로운 PVC 생성 중에 기존 PVC를 데이터 소스로 지정할 수 있는 기능이 추가된다. 소스 PVC는 바인딩되어 있고, 사용 가능해야 한다(사용 중이 아니어야 함). 사용자는 이 기능을 사용할 때 다음 사항을 알고 있어야 한다. @@ -64,5 +64,3 @@ spec: ## 사용 새 PVC를 사용할 수 있게 되면, 복제된 PVC는 다른 PVC와 동일하게 소비된다. 또한, 이 시점에서 새롭게 생성된 PVC는 독립된 오브젝트이다. 원본 dataSource PVC와는 무관하게 독립적으로 소비하고, 복제하고, 스냅샷의 생성 또는 삭제를 할 수 있다. 이는 소스가 새롭게 생성된 복제본에 어떤 방식으로든 연결되어 있지 않으며, 새롭게 생성된 복제본에 영향 없이 수정하거나, 삭제할 수도 있는 것을 의미한다. - - diff --git a/content/ko/docs/concepts/storage/volume-snapshot-classes.md b/content/ko/docs/concepts/storage/volume-snapshot-classes.md index e5b6002e6ee2a..862c900feedea 100644 --- a/content/ko/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/ko/docs/concepts/storage/volume-snapshot-classes.md @@ -68,7 +68,7 @@ parameters: ### 드라이버 볼륨 스냅샷 클래스에는 볼륨스냅샷의 프로비저닝에 사용되는 CSI 볼륨 플러그인을 -결정하는 드라이버를 가지고 있다. 이 필드는 반드시 지정해야한다. +결정하는 드라이버를 가지고 있다. 이 필드는 반드시 지정해야 한다. ### 삭제정책(DeletionPolicy) diff --git a/content/ko/docs/concepts/storage/volumes.md b/content/ko/docs/concepts/storage/volumes.md index 2e37dc0a6755d..698ee14e726a6 100644 --- a/content/ko/docs/concepts/storage/volumes.md +++ b/content/ko/docs/concepts/storage/volumes.md @@ -31,9 +31,10 @@ weight: 10 임시 볼륨 유형은 파드의 수명을 갖지만, 퍼시스턴트 볼륨은 파드의 수명을 넘어 존재한다. 결과적으로, 볼륨은 파드 내에서 실행되는 모든 컨테이너보다 오래 지속되며, 컨테이너를 다시 시작해도 데이터가 보존된다. 파드가 -더 이상 존재하지 않으면, 볼륨은 삭제된다. +더 이상 존재하지 않으면, 쿠버네티스는 임시(ephemeral) 볼륨을 삭제하지만, +퍼시스턴트(persistent) 볼륨은 삭제하지 않는다. -기본적으로 볼륨은 디렉터리일 뿐이며, 일부 데이터가 있을 수 있으며, 파드 +기본적으로 볼륨은 디렉터리이며, 일부 데이터가 있을 수 있으며, 파드 내 컨테이너에서 접근할 수 있다. 디렉터리의 생성 방식, 이를 지원하는 매체와 내용은 사용된 특정 볼륨의 유형에 따라 결정된다. @@ -103,6 +104,8 @@ spec: fsType: ext4 ``` +EBS 볼륨이 파티션된 경우, 선택적 필드인 `partition: ""` 를 제공하여 마운트할 파티션을 지정할 수 있다. + #### AWS EBS CSI 마이그레이션 {{< feature-state for_k8s_version="v1.17" state="beta" >}} @@ -146,14 +149,16 @@ spec: #### azureFile CSI 마이그레이션 -{{< feature-state for_k8s_version="v1.15" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} `azureFile` 의 `CSIMigration` 기능이 활성화된 경우, 기존 트리 내 플러그인에서 `file.csi.azure.com` 컨테이너 스토리지 인터페이스(CSI) 드라이버로 모든 플러그인 작업을 수행한다. 이 기능을 사용하려면, 클러스터에 [Azure 파일 CSI 드라이버](https://github.com/kubernetes-sigs/azurefile-csi-driver) 를 설치하고 `CSIMigration` 과 `CSIMigrationAzureFile` -알파 기능을 활성화해야 한다. +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. + +Azure File CSI 드라이버는 동일한 볼륨을 다른 fsgroup에서 사용하는 것을 지원하지 않는다. Azurefile CSI 마이그레이션이 활성화된 경우, 다른 fsgroup에서 동일한 볼륨을 사용하는 것은 전혀 지원되지 않는다. ### cephfs @@ -202,14 +207,17 @@ spec: #### 오픈스택 CSI 마이그레이션 -{{< feature-state for_k8s_version="v1.18" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} -Cinder의 `CSIMigration` 기능이 활성화된 경우, 기존 트리 내 플러그인에서 -`cinder.csi.openstack.org` 컨테이너 스토리지 인터페이스(CSI) -드라이버로 모든 플러그인 작업을 수행한다. 이 기능을 사용하려면, 클러스터에 [오픈스택 Cinder CSI -드라이버](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md) -를 설치하고 `CSIMigration` 과 `CSIMigrationOpenStack` -베타 기능을 활성화해야 한다. +Cinder의`CSIMigration` 기능은 Kubernetes 1.21에서 기본적으로 활성화됩니다. +기존 트리 내 플러그인에서 `cinder.csi.openstack.org` 컨테이너 스토리지 인터페이스(CSI) +드라이버로 모든 플러그인 작업을 수행한다. +[오픈스택 Cinder CSI 드라이버](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md)가 +클러스터에 설치되어 있어야 한다. +`CSIMigrationOpenStack` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 +`false` 로 설정하여 클러스터에 대한 Cinder CSI 마이그레이션을 비활성화할 수 있다. +`CSIMigrationOpenStack` 기능을 비활성화하면, 트리 내 Cinder 볼륨 플러그인이 +Cinder 볼륨 스토리지 관리의 모든 측면을 담당한다. ### 컨피그맵(configMap) {#configmap} @@ -534,7 +542,7 @@ glusterfs 볼륨에 데이터를 미리 채울 수 있으며, 파드 간에 데 | 값 | 행동 | |:------|:---------| -| | 빈 문자열 (기본값)은 이전 버전과의 호환성을 위한 것으로, hostPash 볼륨은 마운트 하기 전에 아무런 검사도 수행되지 않는다. | +| | 빈 문자열 (기본값)은 이전 버전과의 호환성을 위한 것으로, hostPath 볼륨은 마운트 하기 전에 아무런 검사도 수행되지 않는다. | | `DirectoryOrCreate` | 만약 주어진 경로에 아무것도 없다면, 필요에 따라 Kubelet이 가지고 있는 동일한 그룹과 소유권, 권한을 0755로 설정한 빈 디렉터리를 생성한다. | | `Directory` | 주어진 경로에 디렉터리가 있어야 함 | | `FileOrCreate` | 만약 주어진 경로에 아무것도 없다면, 필요에 따라 Kubelet이 가지고 있는 동일한 그룹과 소유권, 권한을 0644로 설정한 빈 디렉터리를 생성한다. | @@ -922,7 +930,7 @@ CSI 는 쿠버네티스 내에서 Quobyte 볼륨을 사용하기 위해 권장 ### rbd `rbd` 볼륨을 사용하면 -[Rados Block Device](https://ceph.com/docs/master/rbd/rbd/)(RBD) 볼륨을 파드에 마운트할 수 +[Rados Block Device](https://docs.ceph.com/en/latest/rbd/)(RBD) 볼륨을 파드에 마운트할 수 있다. 파드를 제거할 때 지워지는 `emptyDir` 와는 다르게 `rbd` 볼륨의 내용은 유지되고, 볼륨은 마운트 해제만 된다. 이 의미는 RBD 볼륨에 데이터를 미리 채울 수 있으며, 데이터를 @@ -1330,7 +1338,7 @@ CSI 호환 볼륨 드라이버가 쿠버네티스 클러스터에 배포되면 * `controllerPublishSecretRef`: CSI의 `ControllerPublishVolume` 그리고 `ControllerUnpublishVolume` 호출을 완료하기 위해 CSI 드라이버에 전달하려는 민감한 정보가 포함된 시크릿 오브젝트에 대한 참조이다. 이 필드는 - 선택사항이며, 시크릿이 필요하지 않은 경우 비어있을 수 있다. 만약 시크릿에 + 선택 사항이며, 시크릿이 필요하지 않은 경우 비어있을 수 있다. 만약 시크릿에 둘 이상의 시크릿이 포함된 경우에도 모든 시크릿이 전달된다. * `nodeStageSecretRef`: CSI의 `NodeStageVolume` 호출을 완료하기위해 CSI 드라이버에 전달하려는 민감한 정보가 포함 된 시크릿 diff --git a/content/ko/docs/concepts/workloads/controllers/cron-jobs.md b/content/ko/docs/concepts/workloads/controllers/cron-jobs.md index ed29659a7e5fa..6935cf8fb404e 100644 --- a/content/ko/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/ko/docs/concepts/workloads/controllers/cron-jobs.md @@ -10,7 +10,7 @@ weight: 80 -{{< feature-state for_k8s_version="v1.8" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} _크론잡은_ 반복 일정에 따라 {{< glossary_tooltip term_id="job" text="잡" >}}을 만든다. @@ -89,6 +89,11 @@ kube-controller-manager 컨테이너에 설정된 시간대는 `concurrencyPolicy` 가 `Allow` 로 설정될 경우, 잡은 항상 적어도 한 번은 실행될 것이다. +{{< caution >}} +`startingDeadlineSeconds` 가 10초 미만의 값으로 설정되면, 크론잡이 스케줄되지 않을 수 있다. 이는 크론잡 컨트롤러가 10초마다 항목을 확인하기 때문이다. +{{< /caution >}} + + 모든 크론잡에 대해 크론잡 {{< glossary_tooltip term_id="controller" text="컨트롤러" >}} 는 마지막 일정부터 지금까지 얼마나 많은 일정이 누락되었는지 확인한다. 만약 100회 이상의 일정이 누락되었다면, 잡을 실행하지 않고 아래와 같은 에러 로그를 남긴다. ```` @@ -110,12 +115,17 @@ Cannot determine if job needs to be started. Too many missed start time (> 100). 크론잡은 오직 그 일정에 맞는 잡 생성에 책임이 있고, 잡은 그 잡이 대표하는 파드 관리에 책임이 있다. -## 새 컨트롤러 +## 컨트롤러 버전 {#new-controller} -쿠버네티스 1.20부터 알파 기능으로 사용할 수 있는 크론잡 컨트롤러의 대체 구현이 있다. 크론잡 컨트롤러의 버전 2를 선택하려면, 다음의 [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) 플래그를 {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}에 전달한다. +쿠버네티스 v1.21부터 크론잡 컨트롤러의 두 번째 버전이 +기본 구현이다. 기본 크론잡 컨트롤러를 비활성화하고 +대신 원래 크론잡 컨트롤러를 사용하려면, `CronJobControllerV2` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) +플래그를 {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}에 전달하고, +이 플래그를 `false` 로 설정한다. 예를 들면, 다음과 같다. ``` ---feature-gates="CronJobControllerV2=true" +--feature-gates="CronJobControllerV2=false" ``` diff --git a/content/ko/docs/concepts/workloads/controllers/daemonset.md b/content/ko/docs/concepts/workloads/controllers/daemonset.md index 589fe7c1ddd03..d7d583d142803 100644 --- a/content/ko/docs/concepts/workloads/controllers/daemonset.md +++ b/content/ko/docs/concepts/workloads/controllers/daemonset.md @@ -141,8 +141,8 @@ nodeAffinity: | ---------------------------------------- | ---------- | ------- | ------------------------------------------------------------ | | `node.kubernetes.io/not-ready` | NoExecute | 1.13+ | 네트워크 파티션과 같은 노드 문제가 발생해도 데몬셋 파드는 축출되지 않는다. | | `node.kubernetes.io/unreachable` | NoExecute | 1.13+ | 네트워크 파티션과 같은 노드 문제가 발생해도 데몬셋 파드는 축출되지 않는다. | -| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | | -| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | | +| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | 데몬셋 파드는 기본 스케줄러에서 디스크-압박(disk-pressure) 속성을 허용한다. | +| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | 데몬셋 파드는 기본 스케줄러에서 메모리-압박(memory-pressure) 속성을 허용한다. | | `node.kubernetes.io/unschedulable` | NoSchedule | 1.12+ | 데몬셋 파드는 기본 스케줄러의 스케줄할 수 없는(unschedulable) 속성을 극복한다. | | `node.kubernetes.io/network-unavailable` | NoSchedule | 1.12+ | 호스트 네트워크를 사용하는 데몬셋 파드는 기본 스케줄러에 의해 이용할 수 없는 네트워크(network-unavailable) 속성을 극복한다. | diff --git a/content/ko/docs/concepts/workloads/controllers/deployment.md b/content/ko/docs/concepts/workloads/controllers/deployment.md index 779fcfbe34965..ac782e700821e 100644 --- a/content/ko/docs/concepts/workloads/controllers/deployment.md +++ b/content/ko/docs/concepts/workloads/controllers/deployment.md @@ -45,14 +45,14 @@ _디플로이먼트(Deployment)_ 는 {{< glossary_tooltip text="파드" term_id= * `.metadata.name` 필드에 따라 `nginx-deployment` 이름으로 디플로이먼트가 생성된다. * `.spec.replicas` 필드에 따라 디플로이먼트는 3개의 레플리카 파드를 생성한다. * `.spec.selector` 필드는 디플로이먼트가 관리할 파드를 찾는 방법을 정의한다. - 이 사례에서는 간단하게 파드 템플릿에 정의된 레이블(`app: nginx`)을 선택한다. + 이 사례에서는 파드 템플릿에 정의된 레이블(`app: nginx`)을 선택한다. 그러나 파드 템플릿 자체의 규칙이 만족되는 한, 보다 정교한 선택 규칙의 적용이 가능하다. {{< note >}} `.spec.selector.matchLabels` 필드는 {key,value}의 쌍으로 매핑되어있다. `matchLabels` 에 매핑된 - 단일 {key,value}은 `matchExpressions` 의 요소에 해당하며, 키 필드는 "key"에 그리고 연산자는 "In"에 대응되며 - 값 배열은 "value"만 포함한다. + 단일 {key,value}은 `matchExpressions` 의 요소에 해당하며, `key` 필드는 "key"에 그리고 `operator`는 "In"에 대응되며 + `value` 배열은 "value"만 포함한다. 매칭을 위해서는 `matchLabels` 와 `matchExpressions` 의 모든 요건이 충족되어야 한다. {{< /note >}} @@ -169,13 +169,15 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml ```shell kubectl --record deployment.apps/nginx-deployment set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1 ``` - 또는 간단하게 다음의 명령어를 사용한다. + + 또는 다음의 명령어를 사용한다. ```shell kubectl set image deployment/nginx-deployment nginx=nginx:1.16.1 --record ``` - 이와 유사하게 출력된다. + 다음과 유사하게 출력된다. + ``` deployment.apps/nginx-deployment image updated ``` @@ -186,7 +188,8 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml kubectl edit deployment.v1.apps/nginx-deployment ``` - 이와 유사하게 출력된다. + 다음과 유사하게 출력된다. + ``` deployment.apps/nginx-deployment edited ``` @@ -198,10 +201,13 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml ``` 이와 유사하게 출력된다. + ``` Waiting for rollout to finish: 2 out of 3 new replicas have been updated... ``` + 또는 + ``` deployment "nginx-deployment" successfully rolled out ``` @@ -210,10 +216,11 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml * 롤아웃이 성공하면 `kubectl get deployments` 를 실행해서 디플로이먼트를 볼 수 있다. 이와 유사하게 출력된다. - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - nginx-deployment 3/3 3 3 36s - ``` + + ```ini + NAME READY UP-TO-DATE AVAILABLE AGE + nginx-deployment 3/3 3 3 36s + ``` * `kubectl get rs` 를 실행해서 디플로이먼트가 새 레플리카셋을 생성해서 파드를 업데이트 했는지 볼 수 있고, 새 레플리카셋을 최대 3개의 레플리카로 스케일 업, 이전 레플리카셋을 0개의 레플리카로 스케일 다운한다. @@ -334,7 +341,7 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml API 버전 `apps/v1` 에서 디플로이먼트의 레이블 셀렉터는 생성 이후에는 변경할 수 없다. {{< /note >}} -* 셀렉터 추가 시 디플로이먼트의 사양에 있는 파드 템플릿 레이블도 새 레이블로 업데이트 해야한다. +* 셀렉터 추가 시 디플로이먼트의 사양에 있는 파드 템플릿 레이블도 새 레이블로 업데이트해야 한다. 그렇지 않으면 유효성 검사 오류가 반환된다. 이 변경은 겹치지 않는 변경으로 새 셀렉터가 이전 셀렉터로 만든 레플리카셋과 파드를 선택하지 않게 되고, 그 결과로 모든 기존 레플리카셋은 고아가 되며, 새로운 레플리카셋을 생성하게 된다. @@ -699,7 +706,7 @@ nginx-deployment-618515232 11 11 11 7m 하나 이상의 업데이트를 트리거하기 전에 디플로이먼트를 일시 중지한 다음 다시 시작할 수 있다. 이렇게 하면 불필요한 롤아웃을 트리거하지 않고 일시 중지와 재개 사이에 여러 수정 사항을 적용할 수 있다. -* 예를 들어, 방금 생성된 디플로이먼트의 경우 +* 예를 들어, 생성된 디플로이먼트의 경우 디플로이먼트 상세 정보를 가져온다. ```shell kubectl get deploy @@ -1030,7 +1037,7 @@ echo $? ## 카나리 디플로이먼트 -만약 디플로이먼트를 이용해서 일부 사용자 또는 서버에 릴리즈를 롤아웃 하기 위해서는 +만약 디플로이먼트를 이용해서 일부 사용자 또는 서버에 릴리스를 롤아웃 하기 위해서는 [리소스 관리](/ko/docs/concepts/cluster-administration/manage-deployment/#카나리-canary-디플로이먼트)에 설명된 카나리 패던에 따라 각 릴리스 마다 하나씩 여러 디플로이먼트를 생성할 수 있다. @@ -1053,7 +1060,7 @@ echo $? 이것은 {{< glossary_tooltip text="파드" term_id="pod" >}}와 정확하게 동일한 스키마를 가지고 있고, 중첩된 것을 제외하면 `apiVersion` 과 `kind` 를 가지고 있지 않는다. 파드에 필요한 필드 외에 디플로이먼트 파드 템플릿은 적절한 레이블과 적절한 재시작 정책을 명시해야 한다. -레이블의 경우 다른 컨트롤러와 겹치지 않도록 해야한다. 자세한 것은 [셀렉터](#셀렉터)를 참조한다. +레이블의 경우 다른 컨트롤러와 겹치지 않도록 해야 한다. 자세한 것은 [셀렉터](#셀렉터)를 참조한다. [`.spec.template.spec.restartPolicy`](/ko/docs/concepts/workloads/pods/pod-lifecycle/#재시작-정책) 에는 오직 `Always` 만 허용되고, 명시되지 않으면 기본값이 된다. diff --git a/content/ko/docs/concepts/workloads/controllers/job.md b/content/ko/docs/concepts/workloads/controllers/job.md index 0f04051ff1f09..b9411ecc31226 100644 --- a/content/ko/docs/concepts/workloads/controllers/job.md +++ b/content/ko/docs/concepts/workloads/controllers/job.md @@ -13,10 +13,11 @@ weight: 50 -잡에서 하나 이상의 파드를 생성하고 지정된 수의 파드가 성공적으로 종료되도록 한다. +잡에서 하나 이상의 파드를 생성하고 지정된 수의 파드가 성공적으로 종료될 때까지 계속해서 파드의 실행을 재시도한다. 파드가 성공적으로 완료되면, 성공적으로 완료된 잡을 추적한다. 지정된 수의 성공 완료에 도달하면, 작업(즉, 잡)이 완료된다. 잡을 삭제하면 잡이 생성한 -파드가 정리된다. +파드가 정리된다. 작업을 일시 중지하면 작업이 다시 재개될 때까지 활성 파드가 +삭제된다. 간단한 사례는 잡 오브젝트를 하나 생성해서 파드 하나를 안정적으로 실행하고 완료하는 것이다. 첫 번째 파드가 실패 또는 삭제된 경우(예로는 노드 하드웨어의 실패 또는 @@ -98,8 +99,8 @@ echo $pods pi-5rwd7 ``` -여기서 셀렉터는 잡의 셀렉터와 동일하다. `--output=jsonpath` 옵션은 반환된 목록의 -각각의 파드에서 이름을 가져와서 표현하는 방식을 지정한다. +여기서 셀렉터는 잡의 셀렉터와 동일하다. `--output = jsonpath` 옵션은 반환된 +목록에 있는 각 파드의 이름으로 표현식을 지정한다. 파드 중 하나를 표준 출력으로 본다. @@ -145,8 +146,8 @@ kubectl logs $pods - 파드가 성공적으로 종료하자마자 즉시 잡이 완료된다. 1. *고정적(fixed)인 완료 횟수* 를 가진 병렬 잡: - `.spec.completions` 에 0이 아닌 양수 값을 지정한다. - - 잡은 전체 작업을 나타내며 1에서 `.spec.completions` 까지의 범위의 각 값에 대해 한 개씩 성공한 파드가 있으면 완료된다. - - **아직 구현되지 않음:** 각 파드에게는 1부터 `.spec.completions` 까지의 범위 내의 서로 다른 인덱스가 전달된다. + - 잡은 전체 작업을 나타내며, `.spec.completions` 성공한 파드가 있을 때 완료된다. + - `.spec.completionMode="Indexed"` 를 사용할 때, 각 파드는 0에서 `.spec.completions-1` 범위 내의 서로 다른 인덱스를 가져온다. 1. *작업 큐(queue)* 가 있는 병렬 잡: - `.spec.completions` 를 지정하지 않고, `.spec.parallelism` 를 기본으로 한다. - 파드는 각자 또는 외부 서비스 간에 조정을 통해 각각의 작업을 결정해야 한다. 예를 들어 파드는 작업 큐에서 최대 N 개의 항목을 일괄로 가져올(fetch) 수 있다. @@ -166,7 +167,6 @@ _작업 큐_ 잡은 `.spec.completions` 를 설정하지 않은 상태로 두고 다른 유형의 잡을 사용하는 방법에 대한 더 자세한 정보는 [잡 패턴](#잡-패턴) 섹션을 본다. - #### 병렬 처리 제어하기 요청된 병렬 처리(`.spec.parallelism`)는 음수가 아닌 값으로 설정할 수 있다. @@ -185,6 +185,33 @@ _작업 큐_ 잡은 `.spec.completions` 를 설정하지 않은 상태로 두고 - 잡 컨트롤러는 동일한 잡에서 과도하게 실패한 이전 파드들로 인해 새로운 파드의 생성을 조절할 수 있다. - 파드가 정상적으로(gracefully) 종료되면, 중지하는데 시간이 소요된다. +### 완료 모드 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +{{< note >}} +인덱싱된 잡을 생성하려면, [API 서버](/docs/reference/command-line-tools-reference/kube-apiserver/) +및 [컨트롤러 관리자](/docs/reference/command-line-tools-reference/kube-controller-manager/)에서 +`IndexedJob` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 +활성화해야 한다. +{{< /note >}} + +완료 횟수가 _고정적인 완료 횟수_ 즉, null이 아닌 `.spec.completions` 가 있는 잡은 +`.spec.completionMode` 에 지정된 완료 모드를 가질 수 있다. + +- `NonIndexed` (기본값): `.spec.completions` 가 성공적으로 + 완료된 파드가 있는 경우 작업이 완료된 것으로 간주된다. 즉, 각 파드 + 완료는 서로 상동하다(homologous). null `.spec.completions` 가 있는 + 잡은 암시적으로 `NonIndexed` 이다. +- `Indexed`: 잡의 파드는 `batch.kubernetes.io/job-completion-index` + 어노테이션에서 사용할 수 있는 0에서 `.spec.completions-1` 까지 연결된 완료 인덱스를 가져온다. + 각 인덱스에 대해 성공적으로 완료된 파드가 하나 있으면 작업이 완료된 것으로 + 간주된다. 이 모드를 사용하는 방법에 대한 자세한 내용은 + [정적 작업 할당을 사용한 병렬 처리를 위해 인덱싱된 잡](/docs/tasks/job/indexed-parallel-processing-static/)을 참고한다. + 참고로, 드물기는 하지만, 동일한 인덱스에 대해 둘 이상의 파드를 시작할 수 + 있지만, 그 중 하나만 완료 횟수에 포함된다. + + ## 파드와 컨테이너 장애 처리하기 파드내 컨테이너의 프로세스가 0이 아닌 종료 코드로 종료되었거나 컨테이너 메모리 제한을 @@ -348,12 +375,12 @@ spec: 여기에 트레이드오프가 요약되어있고, 2열에서 4열까지가 위의 트레이드오프에 해당한다. 패턴 이름은 예시와 더 자세한 설명을 위한 링크이다. -| 패턴 | 단일 잡 오브젝트 | 작업 항목보다 파드가 적은가? | 수정하지 않은 앱을 사용하는가? | Kube 1.1에서 작동하는가? | -| -------------------------------------------------------------------- |:-----------------:|:---------------------------:|:-------------------:|:-------------------:| -| [잡 템플릿 확장](/ko/docs/tasks/job/parallel-processing-expansion/) | | | ✓ | ✓ | -| [작업 항목 당 파드가 있는 큐](/docs/tasks/job/coarse-parallel-processing-work-queue/) | ✓ | | 때때로 | ✓ | -| [가변 파드 수를 가진 큐](/ko/docs/tasks/job/fine-parallel-processing-work-queue/) | ✓ | ✓ | | ✓ | -| 정적 작업이 할당된 단일 잡 | ✓ | | ✓ | | +| 패턴 | 단일 잡 오브젝트 | 작업 항목보다 파드가 적은가? | 수정되지 않은 앱을 사용하는가? | +| ----------------------------------------- |:-----------------:|:---------------------------:|:-------------------:| +| [작업 항목 당 파드가 있는 큐] | ✓ | | 때때로 | +| [가변 파드 수를 가진 큐] | ✓ | ✓ | | +| [정적 작업 할당을 사용한 인덱싱된 잡] | ✓ | | ✓ | +| [잡 템플릿 확장] | | | ✓ | `.spec.completions` 로 완료를 지정할 때, 잡 컨트롤러에 의해 생성된 각 파드는 동일한 [`사양`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)을 갖는다. 이 의미는 @@ -364,16 +391,121 @@ spec: 이 표는 각 패턴에 필요한 `.spec.parallelism` 그리고 `.spec.completions` 설정을 보여준다. 여기서 `W` 는 작업 항목의 수이다. -| 패턴 | `.spec.completions` | `.spec.parallelism` | -| -------------------------------------------------------------------- |:-------------------:|:--------------------:| -| [잡 템플릿 확장](/ko/docs/tasks/job/parallel-processing-expansion/) | 1 | 1이어야 함 | -| [작업 항목 당 파드가 있는 큐](/docs/tasks/job/coarse-parallel-processing-work-queue/) | W | any | -| [가변 파드 수를 가진 큐](/ko/docs/tasks/job/fine-parallel-processing-work-queue/) | 1 | any | -| 정적 작업이 할당된 단일 잡 | W | any | +| 패턴 | `.spec.completions` | `.spec.parallelism` | +| ----------------------------------------- |:-------------------:|:--------------------:| +| [작업 항목 당 파드가 있는 큐] | W | any | +| [가변 파드 수를 가진 큐] | null | any | +| [정적 작업 할당을 사용한 인덱싱된 잡] | W | any | +| [잡 템플릿 확장] | 1 | 1이어야 함 | +[작업 항목 당 파드가 있는 큐]: /docs/tasks/job/coarse-parallel-processing-work-queue/ +[가변 파드 수를 가진 큐]: /docs/tasks/job/fine-parallel-processing-work-queue/ +[정적 작업 할당을 사용한 인덱싱된 잡]: /docs/tasks/job/indexed-parallel-processing-static/ +[잡 템플릿 확장]: /docs/tasks/job/parallel-processing-expansion/ ## 고급 사용법 +### 잡 일시 중지 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +{{< note >}} +잡 일시 중지는 쿠버네티스 버전 1.21 이상에서 사용할 수 있다. 이 기능을 +사용하려면 [API 서버](/docs/reference/command-line-tools-reference/kube-apiserver/) +및 [컨트롤러 관리자](/docs/reference/command-line-tools-reference/kube-controller-manager/)에서 +`SuspendJob` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 +활성화해야 한다. +{{< /note >}} + +잡이 생성되면, 잡 컨트롤러는 잡의 요구 사항을 충족하기 위해 +즉시 파드 생성을 시작하고 잡이 완료될 때까지 +계속한다. 그러나, 잡의 실행을 일시적으로 중단하고 나중에 +다시 시작할 수도 있다. 잡을 일시 중지하려면, 잡의 `.spec.suspend` 필드를 true로 +업데이트할 수 있다. 나중에, 다시 재개하려면, false로 업데이트한다. +`.spec.suspend` 로 설정된 잡을 생성하면 일시 중지된 상태로 +생성된다. + +잡이 일시 중지에서 재개되면, 해당 `.status.startTime` 필드가 +현재 시간으로 재설정된다. 즉, 잡이 일시 중지 및 재개되면 `.spec.activeDeadlineSeconds` +타이머가 중지되고 재설정된다. + +잡을 일시 중지하면 모든 활성 파드가 삭제된다. 잡이 +일시 중지되면, SIGTERM 시그널로 [파드가 종료된다](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination). +파드의 정상 종료 기간이 적용되며 사용자의 파드는 이 기간 동안에 +이 시그널을 처리해야 한다. 나중에 진행 상황을 저장하거나 +변경 사항을 취소하는 작업이 포함될 수 있다. 이 방법으로 종료된 파드는 +잡의 `completions` 수에 포함되지 않는다. + +일시 중지된 상태의 잡 정의 예시는 다음과 같다. + +```shell +kubectl get job myjob -o yaml +``` + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: myjob +spec: + suspend: true + parallelism: 1 + completions: 5 + template: + spec: + ... +``` + +잡의 상태를 사용하여 잡이 일시 중지되었는지 또는 과거에 일시 중지되었는지 +확인할 수 있다. + +```shell +kubectl get jobs/myjob -o yaml +``` + +```json +apiVersion: batch/v1 +kind: Job +# .metadata and .spec omitted +status: + conditions: + - lastProbeTime: "2021-02-05T13:14:33Z" + lastTransitionTime: "2021-02-05T13:14:33Z" + status: "True" + type: Suspended + startTime: "2021-02-05T13:13:48Z" +``` + +"True" 상태인 "Suspended" 유형의 잡의 컨디션은 잡이 +일시 중지되었음을 의미한다. 이 `lastTransitionTime` 필드는 잡이 일시 중지된 +기간을 결정하는 데 사용할 수 있다. 해당 컨디션의 상태가 "False"이면, 잡이 +이전에 일시 중지되었다가 현재 실행 중이다. 이러한 컨디션이 +잡의 상태에 없으면, 잡이 중지되지 않은 것이다. + +잡이 일시 중지 및 재개될 때에도 이벤트가 생성된다. + +```shell +kubectl describe jobs/myjob +``` + +``` +Name: myjob +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 12m job-controller Created pod: myjob-hlrpl + Normal SuccessfulDelete 11m job-controller Deleted pod: myjob-hlrpl + Normal Suspended 11m job-controller Job suspended + Normal SuccessfulCreate 3s job-controller Created pod: myjob-jvb44 + Normal Resumed 3s job-controller Job resumed +``` + +마지막 4개의 이벤트, 특히 "Suspended" 및 "Resumed" 이벤트는 +`.spec.suspend` 필드를 전환한 결과이다. 이 두 이벤트 사이의 시간동안 +파드가 생성되지 않았지만, 잡이 재개되자마자 파드 생성이 다시 +시작되었음을 알 수 있다. + ### 자신의 파드 셀렉터를 지정하기 일반적으로 잡 오브젝트를 생성할 때 `.spec.selector` 를 지정하지 않는다. diff --git a/content/ko/docs/concepts/workloads/controllers/replicaset.md b/content/ko/docs/concepts/workloads/controllers/replicaset.md index 8966029620015..7cf399d24275c 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicaset.md +++ b/content/ko/docs/concepts/workloads/controllers/replicaset.md @@ -222,7 +222,7 @@ pod2 1/1 Running 0 36s ## 레플리카셋 매니페스트 작성하기 레플리카셋은 모든 쿠버네티스 API 오브젝트와 마찬가지로 `apiVersion`, `kind`, `metadata` 필드가 필요하다. -레플리카셋에 대한 kind 필드의 값은 항상 레플리카셋이다. +레플리카셋에 대한 `kind` 필드의 값은 항상 레플리카셋이다. 쿠버네티스 1.9에서의 레플리카셋의 kind에 있는 API 버전 `apps/v1`은 현재 버전이며, 기본으로 활성화 되어있다. API 버전 `apps/v1beta2`은 사용 중단(deprecated)되었다. API 버전에 대해서는 `frontend.yaml` 예제의 첫 번째 줄을 참고한다. @@ -237,7 +237,7 @@ API 버전에 대해서는 `frontend.yaml` 예제의 첫 번째 줄을 참고한 우리는 `frontend.yaml` 예제에서 `tier: frontend`이라는 레이블을 하나 가지고 있다. 이 파드를 다른 컨트롤러가 취하지 않도록 다른 컨트롤러의 셀렉터와 겹치지 않도록 주의해야 한다. -템플릿의 [재시작 정책](/ko/docs/concepts/workloads/pods/pod-lifecycle/#재시작-정책) 필드인 +템플릿의 [재시작 정책](/ko/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) 필드인 `.spec.template.spec.restartPolicy`는 기본값인 `Always`만 허용된다. ### 파드 셀렉터 @@ -307,9 +307,51 @@ curl -X DELETE 'localhost:8080/apis/apps/v1/namespaces/default/replicasets/fron ### 레플리카셋의 스케일링 -레플리카셋을 손쉽게 스케일 업 또는 다운하는 방법은 단순히 `.spec.replicas` 필드를 업데이트 하면 된다. +레플리카셋을 손쉽게 스케일 업 또는 다운하는 방법은 단순히 `.spec.replicas` 필드를 업데이트하면 된다. 레플리카셋 컨트롤러는 일치하는 레이블 셀렉터가 있는 파드가 의도한 수 만큼 가용하고 운영 가능하도록 보장한다. +스케일 다운할 때, 레플리카셋 컨트롤러는 스케일 다운할 파드의 +우선순위를 정하기 위해 다음의 기준으로 가용 파드를 정렬하여 삭제할 파드를 결정한다. + 1. Pending 상태인 (+ 스케줄링할 수 없는) 파드가 먼저 스케일 다운된다. + 2. `controller.kubernetes.io/pod-deletion-cost` 어노테이션이 설정되어 있는 + 파드에 대해서는, 낮은 값을 갖는 파드가 먼저 스케일 다운된다. + 3. 더 많은 레플리카가 있는 노드의 파드가 더 적은 레플리카가 있는 노드의 파드보다 먼저 스케일 다운된다. + 4. 파드 생성 시간이 다르면, 더 최근에 생성된 파드가 + 이전에 생성된 파드보다 먼저 스케일 다운된다. + (`LogarithmicScaleDown` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화되어 있으면 생성 시간이 정수 로그 스케일로 버킷화된다) + +모든 기준에 대해 동등하다면, 스케일 다운할 파드가 임의로 선택된다. + +### 파드 삭제 비용 +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +[`controller.kubernetes.io/pod-deletion-cost`](/docs/reference/labels-annotations-taints/#pod-deletion-cost) 어노테이션을 이용하여, +레플리카셋을 스케일 다운할 때 어떤 파드부터 먼저 삭제할지에 대한 우선순위를 설정할 수 있다. + +이 어노테이션은 파드에 설정되어야 하며, [-2147483647, 2147483647] 범위를 갖는다. +이 어노테이션은 하나의 레플리카셋에 있는 다른 파드와의 상대적 삭제 비용을 나타낸다. +삭제 비용이 낮은 파드는 삭제 비용이 높은 파드보다 삭제 우선순위가 높다. + +파드에 대해 이 값을 명시하지 않으면 기본값은 0이다. 음수로도 설정할 수 있다. +유효하지 않은 값은 API 서버가 거부한다. + +이 기능은 알파 상태이며 기본적으로는 비활성화되어 있다. +kube-apiserver와 kube-controller-manager에서 `PodDeletionCost` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 켜서 활성화할 수 있다. + +{{< note >}} +- 이 기능은 best-effort 방식으로 동작하므로, 파드 삭제 순서를 보장하지는 않는다. +- 이 값을 자주 바꾸는 것은 피해야 한다 (예: 메트릭 값에 따라 변경). +apiserver에서 많은 양의 파드 업데이트를 동반하기 때문이다. +{{< /note >}} + +#### 사용 예시 +한 애플리케이션 내의 여러 파드는 각각 사용률이 다를 수 있다. 스케일 다운 시, +애플리케이션은 사용률이 낮은 파드를 먼저 삭제하고 싶을 수 있다. 파드를 자주 +업데이트하는 것을 피하기 위해, 애플리케이션은 `controller.kubernetes.io/pod-deletion-cost` 값을 +스케일 다운하기 전에 1회만 업데이트해야 한다 (파드 사용률에 비례하는 값으로 설정). +이 방식은 Spark 애플리케이션의 드라이버 파드처럼 애플리케이션이 스스로 다운스케일링을 수행하는 경우에 유효하다. + ### 레플리카셋을 Horizontal Pod Autoscaler 대상으로 설정 레플리카셋은 diff --git a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md index 9c3450851a580..db69cf921c8d9 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md @@ -49,12 +49,14 @@ kubectl 명령에서 숏컷으로 사용된다. {{< codenew file="controllers/replication.yaml" >}} -예제 파일을 다운로드 한 후 다음 명령을 실행하여 예제 작업을 실행하라. +예제 파일을 다운로드한 후 다음 명령을 실행하여 예제 작업을 실행하라. ```shell kubectl apply -f https://k8s.io/examples/controllers/replication.yaml ``` + 출력 결과는 다음과 같다. + ``` replicationcontroller/nginx created ``` @@ -64,7 +66,9 @@ replicationcontroller/nginx created ```shell kubectl describe replicationcontrollers/nginx ``` + 출력 결과는 다음과 같다. + ``` Name: nginx Namespace: default @@ -103,14 +107,16 @@ Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed pods=$(kubectl get pods --selector=app=nginx --output=jsonpath={.items..metadata.name}) echo $pods ``` + 출력 결과는 다음과 같다. + ``` nginx-3ntk0 nginx-4ok8v nginx-qrm3m ``` 여기서 셀렉터는 레플리케이션컨트롤러(`kubectl describe` 의 출력에서 보인)의 셀렉터와 같고, -다른 형식의 파일인 `replication.yaml` 의 것과 동일하다. `--output=jsonpath` 옵션은 -반환된 목록의 각 파드에서 이름을 가져오는 표현식을 지정한다. +다른 형식의 파일인 `replication.yaml` 의 것과 동일하다. `--output=jsonpath` 은 +반환된 목록의 각 파드의 이름을 출력하도록 하는 옵션이다. ## 레플리케이션 컨트롤러의 Spec 작성 @@ -118,7 +124,7 @@ nginx-3ntk0 nginx-4ok8v nginx-qrm3m 다른 모든 쿠버네티스 컨피그와 마찬가지로 레플리케이션 컨트롤러는 `apiVersion`, `kind`, `metadata` 와 같은 필드가 필요하다. 레플리케이션 컨트롤러 오브젝트의 이름은 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. -컨피그 파일의 동작에 관련된 일반적인 정보는 [쿠버네티스 오브젝트 관리](/ko/docs/concepts/overview/working-with-objects/object-management/)를 참고한다. +환경설정 파일의 동작에 관련된 일반적인 정보는 [쿠버네티스 오브젝트 관리](/ko/docs/concepts/overview/working-with-objects/object-management/)를 참고한다. 레플리케이션 컨트롤러는 또한 [`.spec` section](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)도 필요하다. @@ -180,7 +186,7 @@ delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) 를 사용 Kubectl은 레플리케이션 컨트롤러를 0으로 스케일하고 레플리케이션 컨트롤러 자체를 삭제하기 전에 각 파드를 삭제하기를 기다린다. 이 kubectl 명령이 인터럽트되면 다시 시작할 수 있다. -REST API나 go 클라이언트 라이브러리를 사용하는 경우 명시적으로 단계를 수행해야 한다 (레플리카를 0으로 스케일하고 파드의 삭제를 기다린 이후, +REST API나 Go 클라이언트 라이브러리를 사용하는 경우 명시적으로 단계를 수행해야 한다(레플리카를 0으로 스케일하고 파드의 삭제를 기다린 이후, 레플리케이션 컨트롤러를 삭제). ### 레플리케이션 컨트롤러만 삭제 @@ -189,7 +195,7 @@ REST API나 go 클라이언트 라이브러리를 사용하는 경우 명시적 kubectl을 사용하여, [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete)에 옵션으로 `--cascade=false`를 지정하라. -REST API나 go 클라이언트 라이브러리를 사용하는 경우 간단히 레플리케이션 컨트롤러 오브젝트를 삭제하라. +REST API나 Go 클라이언트 라이브러리를 사용하는 경우 레플리케이션 컨트롤러 오브젝트를 삭제하라. 원본이 삭제되면 대체할 새로운 레플리케이션 컨트롤러를 생성하여 교체할 수 있다. 오래된 파드와 새로운 파드의 `.spec.selector` 가 동일하다면, 새로운 레플리케이션 컨트롤러는 오래된 파드를 채택할 것이다. 그러나 기존 파드를 @@ -198,7 +204,7 @@ REST API나 go 클라이언트 라이브러리를 사용하는 경우 간단히 ### 레플리케이션 컨트롤러에서 파드 격리 -파드는 레이블을 변경하여 레플리케이션 컨트롤러의 대상 셋에서 제거될 수 있다. 이 기술은 디버깅, 데이터 복구 등을 위해 서비스에서 파드를 제거하는데 사용될 수 있다. 이 방법으로 제거된 파드는 자동으로 교체된다 (레플리카 수가 변경되지 않는다고 가정). +파드는 레이블을 변경하여 레플리케이션 컨트롤러의 대상 셋에서 제거될 수 있다. 이 기술은 디버깅과 데이터 복구를 위해 서비스에서 파드를 제거하는 데 사용될 수 있다. 이 방법으로 제거된 파드는 자동으로 교체된다 (레플리카 수가 변경되지 않는다고 가정). ## 일반적인 사용법 패턴 @@ -208,7 +214,7 @@ REST API나 go 클라이언트 라이브러리를 사용하는 경우 간단히 ### 스케일링 -레플리케이션 컨트롤러는 `replicas` 필드를 업데이트함으로써 수동으로 또는 오토 스케일링 제어 에이전트로 레플리카의 수를 쉽게 스케일 업하거나 스케일 다운할 수 있다. +레플리케이션컨트롤러는 `replicas` 필드를 업데이트하여, 수동으로 또는 오토 스케일링 제어 에이전트를 통해, 레플리카의 수를 늘리거나 줄일 수 있다. ### 롤링 업데이트 @@ -239,13 +245,12 @@ REST API나 go 클라이언트 라이브러리를 사용하는 경우 간단히 ## 레플리케이션 컨트롤러의 책임 -레플리케이션 컨트롤러는 의도한 수의 파드가 해당 레이블 선택기와 일치하고 동작하는지를 단순히 확인한다. 현재, 종료된 파드만 해당 파드의 수에서 제외된다. 향후 시스템에서 사용할 수 있는 [readiness](https://issue.k8s.io/620) 및 기타 정보가 고려될 수 있으며 교체 정책에 대한 통제를 더 추가 할 수 있고 외부 클라이언트가 임의로 정교한 교체 또는 스케일 다운 정책을 구현하기 위해 사용할 수 있는 이벤트를 내보낼 계획이다. +레플리케이션 컨트롤러는 의도한 수의 파드가 해당 레이블 셀렉터와 일치하고 동작하는지를 확인한다. 현재, 종료된 파드만 해당 파드의 수에서 제외된다. 향후 시스템에서 사용할 수 있는 [readiness](https://issue.k8s.io/620) 및 기타 정보가 고려될 수 있으며 교체 정책에 대한 통제를 더 추가 할 수 있고 외부 클라이언트가 임의로 정교한 교체 또는 스케일 다운 정책을 구현하기 위해 사용할 수 있는 이벤트를 내보낼 계획이다. 레플리케이션 컨트롤러는 이 좁은 책임에 영원히 제약을 받는다. 그 자체로는 준비성 또는 활성 프로브를 실행하지 않을 것이다. 오토 스케일링을 수행하는 대신, 외부 오토 스케일러 ([#492](https://issue.k8s.io/492)에서 논의된)가 레플리케이션 컨트롤러의 `replicas` 필드를 변경함으로써 제어되도록 의도되었다. 레플리케이션 컨트롤러에 스케줄링 정책 (예를 들어 [spreading](https://issue.k8s.io/367#issuecomment-48428019))을 추가하지 않을 것이다. 오토사이징 및 기타 자동화 된 프로세스를 방해할 수 있으므로 제어된 파드가 현재 지정된 템플릿과 일치하는지 확인해야 한다. 마찬가지로 기한 완료, 순서 종속성, 구성 확장 및 기타 기능은 다른 곳에 속한다. 대량의 파드 생성 메커니즘 ([#170](https://issue.k8s.io/170))까지도 고려해야 한다. 레플리케이션 컨트롤러는 조합 가능한 빌딩-블록 프리미티브가 되도록 고안되었다. 향후 사용자의 편의를 위해 더 상위 수준의 API 및/또는 도구와 그리고 다른 보완적인 기본 요소가 그 위에 구축 될 것으로 기대한다. 현재 kubectl이 지원하는 "매크로" 작업 (실행, 스케일)은 개념 증명의 예시이다. 예를 들어 [Asgard](https://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html)와 같이 레플리케이션 컨트롤러, 오토 스케일러, 서비스, 정책 스케줄링, 카나리 등을 관리할 수 있다. - ## API 오브젝트 레플리케이션 컨트롤러는 쿠버네티스 REST API의 최상위 수준의 리소스이다. @@ -260,8 +265,7 @@ API 오브젝트에 대한 더 자세한 것은 이것은 주로 [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/)에 의해 파드의 생성, 삭제 및 업데이트를 오케스트레이션 하는 메커니즘으로 사용된다. 사용자 지정 업데이트 조정이 필요하거나 업데이트가 필요하지 않은 경우가 아니면 레플리카셋을 직접 사용하는 대신 디플로이먼트를 사용하는 것이 좋다. - -### 디플로이먼트 (권장되는) +### 디플로이먼트 (권장됨) [`Deployment`](/ko/docs/concepts/workloads/controllers/deployment/)는 기본 레플리카셋과 그 파드를 업데이트하는 상위 수준의 API 오브젝트이다. 선언적이며, 서버 사이드이고, 추가 기능이 있기 때문에 롤링 업데이트 기능을 원한다면 디플로이먼트를 권장한다. diff --git a/content/ko/docs/concepts/workloads/controllers/statefulset.md b/content/ko/docs/concepts/workloads/controllers/statefulset.md index 6b8299a0c3d3a..3a1f784259870 100644 --- a/content/ko/docs/concepts/workloads/controllers/statefulset.md +++ b/content/ko/docs/concepts/workloads/controllers/statefulset.md @@ -107,7 +107,7 @@ spec: ## 파드 셀렉터 -스테이트풀셋의 `.spec.selector` 필드는 `.spec.template.metadata.labels` 레이블과 일치하도록 설정 해야 한다. 쿠버네티스 1.8 이전에서는 생략시에 `.spec.selector` 필드가 기본 설정 되었다. 1.8 과 이후 버전에서는 파드 셀렉터를 명시하지 않으면 스테이트풀셋 생성시 유효성 검증 오류가 발생하는 결과가 나오게 된다. +스테이트풀셋의 `.spec.selector` 필드는 `.spec.template.metadata.labels` 레이블과 일치하도록 설정해야 한다. 쿠버네티스 1.8 이전에서는 생략시에 `.spec.selector` 필드가 기본 설정 되었다. 1.8 과 이후 버전에서는 파드 셀렉터를 명시하지 않으면 스테이트풀셋 생성시 유효성 검증 오류가 발생하는 결과가 나오게 된다. ## 파드 신원 @@ -173,7 +173,7 @@ N개의 레플리카가 있는 스테이트풀셋은 스테이트풀셋에 있 파드의 `volumeMounts` 는 퍼시스턴트 볼륨 클레임과 관련된 퍼시스턴트 볼륨이 마운트 된다. 참고로, 파드 퍼시스턴트 볼륨 클레임과 관련된 퍼시스턴트 볼륨은 파드 또는 스테이트풀셋이 삭제되더라도 삭제되지 않는다. -이것은 반드시 수동으로 해야한다. +이것은 반드시 수동으로 해야 한다. ### 파드 이름 레이블 diff --git a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md index a9412662309d4..4703b63b4a3c9 100644 --- a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -6,7 +6,7 @@ weight: 70 -{{< feature-state for_k8s_version="v1.12" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} TTL 컨트롤러는 실행이 완료된 리소스 오브젝트의 수명을 제한하는 TTL (time to live) 메커니즘을 제공한다. TTL 컨트롤러는 현재 @@ -14,9 +14,9 @@ TTL 컨트롤러는 실행이 완료된 리소스 오브젝트의 수명을 처리하며, 파드와 커스텀 리소스와 같이 실행을 완료할 다른 리소스를 처리하도록 확장될 수 있다. -알파(Alpha) 고지 사항: 이 기능은 현재 알파이고, -kube-apiserver와 kube-controller-manager와 함께 -[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)로 `TTLAfterFinished` 를 활성화할 수 있다. +이 기능은 현재 베타이고 기본적으로 활성화되어 있다. +kube-apiserver와 kube-controller-manager에서 `TTLAfterFinished` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 이용하여 비활성화할 수 있다. @@ -76,4 +76,4 @@ TTL 컨트롤러는 쿠버네티스 리소스에 * [자동으로 잡 정리](/ko/docs/concepts/workloads/controllers/job/#완료된-잡을-자동으로-정리) -* [디자인 문서](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/0026-ttl-after-finish.md) +* [디자인 문서](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/592-ttl-after-finish/README.md) diff --git a/content/ko/docs/concepts/workloads/pods/disruptions.md b/content/ko/docs/concepts/workloads/pods/disruptions.md index 02647adb70a75..9d2319ae6af96 100644 --- a/content/ko/docs/concepts/workloads/pods/disruptions.md +++ b/content/ko/docs/concepts/workloads/pods/disruptions.md @@ -89,7 +89,7 @@ weight: 60 ## 파드 disruption budgets -{{< feature-state for_k8s_version="v1.5" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} 쿠버네티스는 자발적인 중단이 자주 발생하는 경우에도 고 가용성 애플리케이션을 실행하는 데 도움이 되는 기능을 제공한다. @@ -103,7 +103,7 @@ PDB는 자발적 중단으로 일정 비율 이하로 떨어지지 않도록 보장할 수 있다. 클러스터 관리자와 호스팅 공급자는 직접적으로 파드나 디플로이먼트를 제거하는 대신 -[Eviction API](/docs/tasks/administer-cluster/safely-drain-node/#the-eviction-api)로 +[Eviction API](/docs/tasks/administer-cluster/safely-drain-node/#eviction-api)로 불리는 PodDisruptionBudget을 준수하는 도구를 이용해야 한다. 예를 들어, `kubectl drain` 하위 명령을 사용하면 노드를 서비스 중단으로 표시할 수 diff --git a/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md b/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md index 27e48b192fc43..9aa9e9bf51764 100644 --- a/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md @@ -76,7 +76,7 @@ API에서 특별한 `ephemeralcontainers` 핸들러를 사용해서 만들어지 임시 컨테이너를 사용해서 문제를 해결하는 예시는 [임시 디버깅 컨테이너로 디버깅하기] -(/docs/tasks/debug-application-cluster/debug-running-pod/#debugging-with-ephemeral-debug-container)를 참조한다. +(/docs/tasks/debug-application-cluster/debug-running-pod/#ephemeral-container)를 참조한다. ## 임시 컨테이너 API @@ -100,7 +100,7 @@ API에서 특별한 `ephemeralcontainers` 핸들러를 사용해서 만들어지 "apiVersion": "v1", "kind": "EphemeralContainers", "metadata": { - "name": "example-pod" + "name": "example-pod" }, "ephemeralContainers": [{ "command": [ diff --git a/content/ko/docs/concepts/workloads/pods/init-containers.md b/content/ko/docs/concepts/workloads/pods/init-containers.md index 56f59c1d304c1..b7a1241fc26c6 100644 --- a/content/ko/docs/concepts/workloads/pods/init-containers.md +++ b/content/ko/docs/concepts/workloads/pods/init-containers.md @@ -313,17 +313,16 @@ myapp-pod 1/1 Running 0 9m 파드는 다음과 같은 사유로, 초기화 컨테이너들의 재-실행을 일으키는, 재시작을 수행할 수 있다. -* 사용자가 초기화 컨테이너 이미지의 변경을 일으키는 파드 스펙 업데이트를 수행했다. - Init Container 이미지를 변경하면 파드가 다시 시작된다. 앱 컨테이너 - 이미지의 변경은 앱 컨테이너만 재시작시킨다. -* 파드 인프라스트럭처 컨테이너가 재시작되었다. 이는 일반적인 상황이 아니며 노드에 +* 파드 인프라스트럭처 컨테이너가 재시작된 상황. 이는 일반적인 상황이 아니며 노드에 대해서 root 접근 권한을 가진 누군가에 의해서 수행됐을 것이다. -* 파드 내의 모든 컨테이너들이, 재시작을 강제하는 `restartPolicy` 가 항상(Always)으로 설정되어 있는, - 동안 종료되었다. 그리고 초기화 컨테이너의 완료 기록이 가비지 수집 - 때문에 유실되었다. - - - +* 초기화 컨테이너의 완료 기록이 가비지 수집 때문에 유실된 상태에서, + `restartPolicy`가 Always로 설정된 파드의 모든 컨테이너가 종료되어 + 모든 컨테이너를 재시작해야 하는 상황 + +초기화 컨테이너 이미지가 변경되거나 초기화 컨테이너의 완료 기록이 가비지 수집 +때문에 유실된 상태이면 파드는 재시작되지 않는다. 이는 쿠버네티스 버전 1.20 이상에 +적용된다. 이전 버전의 쿠버네티스를 사용하는 경우 해당 쿠버네티스 버전의 문서를 +참고한다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md index 1066e3eb83878..71523e183a515 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md @@ -38,8 +38,7 @@ ID([UID](/ko/docs/concepts/overview/working-with-objects/names/#uids))가 타임아웃 기간 후에 [삭제되도록 스케줄된다](#pod-garbage-collection). 파드는 자체적으로 자가 치유되지 않는다. 파드가 -{{< glossary_tooltip text="노드" term_id="node" >}}에 스케줄된 후에 실패하거나, -스케줄 작업 자체가 실패하면, 파드는 삭제된다. 마찬가지로, 파드는 +{{< glossary_tooltip text="노드" term_id="node" >}}에 스케줄된 후에 해당 노드가 실패하면, 파드는 삭제된다. 마찬가지로, 파드는 리소스 부족 또는 노드 유지 관리 작업으로 인해 축출되지 않는다. 쿠버네티스는 {{< glossary_tooltip term_id="controller" text="컨트롤러" >}}라 부르는 하이-레벨 추상화를 사용하여 @@ -313,8 +312,8 @@ kubelet은 실행 중인 컨테이너들에 대해서 선택적으로 세 가지 준비성 프로브는 활성 프로브와는 다르게 준비성에 특정된 엔드포인트를 확인한다. {{< note >}} -파드가 삭제될 때 단지 요청들을 흘려 보낼(drain) 목적으로, -준비성 프로브가 필요하지는 않다는 점을 유념해야 한다. 삭제 시에, 파드는 +파드가 삭제될 때 요청들을 흘려 보내기(drain) 위해 +준비성 프로브가 꼭 필요한 것은 아니다. 삭제 시에, 파드는 프로브의 존재 여부와 무관하게 자동으로 스스로를 준비되지 않은 상태(unready)로 변경한다. 파드는 파드 내의 모든 컨테이너들이 중지될 때까지 준비되지 않은 상태로 남아 있다. diff --git a/content/ko/docs/contribute/generate-ref-docs/_index.md b/content/ko/docs/contribute/generate-ref-docs/_index.md index 756c509206395..ad9d221f1599d 100644 --- a/content/ko/docs/contribute/generate-ref-docs/_index.md +++ b/content/ko/docs/contribute/generate-ref-docs/_index.md @@ -1,11 +1,11 @@ --- -title: 참조 문서 개요 +title: 레퍼런스 문서 개요 main_menu: true weight: 80 --- -이 섹션은 쿠버네티스 참조 가이드를 생성하는 방법에 대해 설명한다. +이 섹션은 쿠버네티스 레퍼런스 가이드를 생성하는 방법에 대해 설명한다. -참조 문서화 시스템을 빌드하려면, 다음의 가이드를 참고한다. +레퍼런스 문서를 생성하려면, 다음의 가이드를 참고한다. -* [참조 문서 생성에 대한 퀵스타트 가이드](/docs/contribute/generate-ref-docs/quickstart/) \ No newline at end of file +* [레퍼런스 문서 생성에 대한 퀵스타트 가이드](/ko/docs/contribute/generate-ref-docs/quickstart/) diff --git a/content/ko/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md b/content/ko/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md new file mode 100644 index 0000000000000..b7c77889d8278 --- /dev/null +++ b/content/ko/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md @@ -0,0 +1,22 @@ + +### 필요 사항: {#Requirements} + +- 리눅스 또는 macOS 로 구동되는 개발 환경이 필요하다. + +- 다음의 도구들이 설치되어 있어야 한다. + + - [Python](https://www.python.org/downloads/) v3.7.x + - [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) + - [Golang](https://golang.org/doc/install) version 1.13+ + - [Pip](https://pypi.org/project/pip/) (PyYAML 설치에 필요함) + - [PyYAML](https://pyyaml.org/) v5.1.2 + - [make](https://www.gnu.org/software/make/) + - [gcc compiler/linker](https://gcc.gnu.org/) + - [Docker](https://docs.docker.com/engine/installation/) (`kubectl` 명령어 레퍼런스 업데이트에만 필요함) + +- 위에 나열된 도구들 (예: `Go` 바이너리나 `python`) 을 사용할 수 있도록 `PATH` 환경 변수를 알맞게 설정해야 한다. + +- GitHub 저장소로 풀 리퀘스트를 생성하는 방법을 알고 있어야 한다. +이를 위해 `kubernetes/website` 저장소를 개인 계정으로 포크해야 한다. +더 자세한 내용은 [로컬 포크에서 작업하기](/ko/docs/contribute/new-content/open-a-pr/#fork-the-repo)를 참조한다. + diff --git a/content/ko/docs/contribute/generate-ref-docs/quickstart.md b/content/ko/docs/contribute/generate-ref-docs/quickstart.md new file mode 100644 index 0000000000000..6e3fbb52636e1 --- /dev/null +++ b/content/ko/docs/contribute/generate-ref-docs/quickstart.md @@ -0,0 +1,257 @@ +--- +title: 퀵스타트 가이드 +content_type: task +weight: 40 +--- + + + +이 문서에서는 `update-imported-docs` 스크립트를 사용하여 +쿠버네티스 레퍼런스 문서를 생성하는 방법에 대해 설명한다. +이 스크립트는 특정 쿠버네티스 릴리스 버전에 대해 빌드 설정을 자동으로 수행하고 레퍼런스 문서를 생성한다. + +## {{% heading "prerequisites" %}} + +{{< include "prerequisites-ref-docs.md" >}} + + + +## `website` 저장소 클론하기 {#Getting-the-docs-repository} + +개인 계정에 있는 포크 버전의 `website` 저장소가 `kubernetes/website` 저장소의 master 브랜치만큼 최신인지 확인한 뒤, +개인 계정에 있는 포크 버전의 `website` 저장소를 로컬 개발 환경으로 클론한다. + +```shell +mkdir github.com +cd github.com +git clone git@github.com:/website.git +``` + +아래에서 사용될 '베이스 디렉터리'를 숙지해야 한다. 예를 들어 위에 안내된 대로 +저장소를 클론했다면, 베이스 디렉터리는 +`github.com/website` 가 된다. 이제 이 문서의 나머지 부분에서 `` 라는 구문이 나오면 +이 부분에 당신의 베이스 디렉터리를 대입하면 된다. + +{{< note>}} +만약 쿠버네티스 구성 도구와 API 레퍼런스에 기여하고 싶다면, +[업스트림 코드에 기여하기 (영문)](/docs/contribute/generate-ref-docs/contribute-upstream) 를 참조한다. +{{< /note >}} + +## `update-imported-docs` 스크립트 개요 {#Overview-of-update-imported-docs} + +`update-imported-docs` 스크립트는 `/update-imported-docs/` +디렉터리에 존재한다. + +이 스크립트는 다음 레퍼런스를 생성한다. + +* 구성요소 및 도구 레퍼런스 페이지 +* `kubectl` 명령어 레퍼런스 +* 쿠버네티스 API 레퍼런스 + +`update-imported-docs` 스크립트는 쿠버네티스 소스코드로부터 레퍼런스 문서를 +생성한다. 스크립트가 실행되면 개발 머신의 `/tmp` 디렉터리 아래에 임시 디렉터리를 +생성하고, 이 임시 디렉터리 아래에 레퍼런스 문서 생성에 필요한 `kubernetes/kubernetes` 저장소와 +`kubernetes-sigs/reference-docs` 저장소를 클론하며, +`GOPATH` 환경 변수를 이 임시 디렉터리로 지정한다. +또한 이 스크립트는 다음의 환경 변수를 설정한다. + +* `K8S_RELEASE` +* `K8S_ROOT` +* `K8S_WEBROOT` + +스크립트가 정상적으로 실행되려면 인자 2개를 전달해야 한다. + +* 환경설정 YAML 파일 (`reference.yml`) +* 쿠버네티스 릴리스 버전 (예: `1.17`) + +환경설정 파일은 `generate-command` 라는 필드를 포함하는데, +이 필드에는 +`kubernetes-sigs/reference-docs/Makefile` 에 있는 Make 타겟들을 활용하여 빌드하는 일련의 과정이 명시되어 있다. +`K8S_RELEASE` 환경 변수는 릴리스 버전을 결정한다. + +`update-imported-docs` 스크립트는 다음의 과정을 수행한다. + +1. 환경설정 파일에 있는 관련 저장소를 클론한다. + 레퍼런스 문서 생성을 위해 + 기본적으로는 `kubernetes-sigs/reference-docs` 저장소를 클론하도록 되어 있다. +1. 클론한 안에서, 문서 생성에 필요한 사항을 준비하기 위한 명령어를 실행한 뒤, + HTML 파일과 마크다운 파일을 생성한다. +1. 생성된 HTML 파일과 마크다운 파일을 + 환경설정 파일에 명시된 규칙에 따라 `` 로 복사한다. +1. `kubectl`.md 에 있는 `kubectl` 명령어 링크들이 + `kubectl` 명령어 레퍼런스 페이지의 올바른 섹션으로 연결되도록 업데이트한다. + +생성된 파일이 `` 아래에 복사되었으면, +`kubernetes/website` 저장소로 [풀 리퀘스트를 생성](/ko/docs/contribute/new-content/open-a-pr/) +할 수 있다. + +## 환경설정 파일 형식 {#Configuration-file-format} + +각 환경설정 파일은 레퍼런스 생성을 위해 필요한 여러 저장소의 정보를 담을 수 있다. +필요한 경우, 환경설정 파일을 직접 수정하여 사용할 수도 있다. +또는, 다른 그룹의 문서를 임포트하기 위해 새로운 환경설정 파일을 작성할 수도 있다. +다음은 환경설정 YAML 파일의 예시이다. + +```yaml +repos: +- name: community + remote: https://github.com/kubernetes/community.git + branch: master + files: + - src: contributors/devel/README.md + dst: docs/imported/community/devel.md + - src: contributors/guide/README.md + dst: docs/imported/community/guide.md +``` + +이 도구에 의해 처리될 단일 페이지 마크다운 문서는 +[문서 스타일 가이드](/docs/contribute/style/style-guide/)의 내용을 만족해야 한다. + +## reference.yml 환경설정 파일 다루기 {#Customizing-reference-yml} + +`/update-imported-docs/reference.yml` 환경설정 파일을 열어 수정할 수 있다. +레퍼런스 문서 생성을 위해 명령어들이 어떻게 사용되고 있는지 파악하지 못했다면, +`generate-command` 필드의 내용은 수정하지 말아야 한다. +대부분의 경우 `reference.yml` 을 직접 수정해야 할 필요는 없다. +때때로, 업스트림 소스코드 업데이트 때문에 이 환경설정 파일을 수정해야 할 수도 있다. +(예: Golang 버전 의존성, 서드파티 라이브러리 변경 등) +만약 스크립트 사용 시 빌드 문제가 있다면, +[쿠버네티스 슬랙의 #sig-docs 채널](https://kubernetes.slack.com/archives/C1J0BPD2M)에서 SIG-Docs 팀에 문의하면 된다. + +{{< note >}} +`generate-command` 는 특정 저장소로부터 문서를 만들기 위한 +명령어나 스크립트를 실행하기 위해 사용할 수 있는 선택적 필드이다. +{{< /note >}} + +`reference.yml` 환경설정 파일에서, `files` 필드는 `src` 와 `dst` 필드를 포함한다. +`src` 필드에는 `kubernetes-sigs/reference-docs` 디렉터리 아래에 있는 생성된 마크다운 파일의 위치를 명시하고, +`dst` 필드에는 이 파일을 +`kubernetes/website` 디렉터리 아래의 어느 위치로 복사할지를 명시한다. +예시는 다음과 같다. + +```yaml +repos: +- name: reference-docs + remote: https://github.com/kubernetes-sigs/reference-docs.git + files: + - src: gen-compdocs/build/kube-apiserver.md + dst: content/en/docs/reference/command-line-tools-reference/kube-apiserver.md + ... +``` + +만약 하나의 `src` 디렉터리에서 하나의 `dst` 디렉터리로 많은 파일이 복사되어야 한다면, +`src` 필드에 와일드카드를 사용할 수 있다. +이 경우, `dst` 필드에는 단일 파일의 경로가 아니라 디렉터리의 경로를 명시해야 한다. +예시는 다음과 같다. + +```yaml + files: + - src: gen-compdocs/build/kubeadm*.md + dst: content/en/docs/reference/setup-tools/kubeadm/generated/ +``` + +## `update-imported-docs` 도구 실행하기 {#Running-the-update-imported-docs-tool} + +다음과 같이 `update-imported-docs` 도구를 실행할 수 있다. + +```shell +cd /update-imported-docs +./update-imported-docs +``` + +예를 들면 다음과 같다. + +```shell +./update-imported-docs reference.yml 1.17 +``` + + +## 링크 업데이트하기 {#Fixing-Links} + +`release.yml` 환경설정 파일은 상대경로 링크를 수정하는 방법을 포함하고 있다. +임포트하는 파일 안에 있는 상대경로 링크를 수정하려면, `gen-absolute-links` 필드를 +`true` 로 명시한다. 이에 대한 예시는 +[`release.yml`](https://github.com/kubernetes/website/blob/master/update-imported-docs/release.yml) 에서 볼 수 있다. + +## `kubernetes/website` 의 변경사항을 커밋하기 {#Adding-and-committing-changes-in-kubernetes-website} + +다음의 명령을 실행하여, 스크립트에 의해 생성된 뒤 `` 아래에 복사된 파일의 목록을 볼 수 있다. + +```shell +cd +git status +``` + +위의 명령을 실행하면 새로 추가된 파일과 수정된 파일의 목록을 볼 수 있다. +아래의 결과 예시는 업스트림 소스코드의 변경사항에 따라 다르게 나타날 수 있다. + +### 생성된 구성요소 도구 레퍼런스 {#Generated-component-tool-files} + +``` +content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md +content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +content/en/docs/reference/command-line-tools-reference/kube-proxy.md +content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md +content/en/docs/reference/kubectl/kubectl.md +``` + +### 생성된 kubectl 명령어 레퍼런스 {#Generated-kubectl-command-reference-files} + +``` +static/docs/reference/generated/kubectl/kubectl-commands.html +static/docs/reference/generated/kubectl/navData.js +static/docs/reference/generated/kubectl/scroll.js +static/docs/reference/generated/kubectl/stylesheet.css +static/docs/reference/generated/kubectl/tabvisibility.js +static/docs/reference/generated/kubectl/node_modules/bootstrap/dist/css/bootstrap.min.css +static/docs/reference/generated/kubectl/node_modules/highlight.js/styles/default.css +static/docs/reference/generated/kubectl/node_modules/jquery.scrollto/jquery.scrollTo.min.js +static/docs/reference/generated/kubectl/node_modules/jquery/dist/jquery.min.js +static/docs/reference/generated/kubectl/css/font-awesome.min.css +``` + +### 생성된 쿠버네티스 API 레퍼런스 와 파일 {#Generated-Kubernetes-API-reference-directories-and-files} + +``` +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/index.html +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/navData.js +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/scroll.js +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/query.scrollTo.min.js +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/font-awesome.min.css +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/bootstrap.min.css +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/stylesheet.css +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/FontAwesome.otf +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.eot +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.svg +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.ttf +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff2 +``` + +`git add` 와 `git commit` 명령을 실행하여 추가/변경된 파일을 커밋한다. + +## 풀 리퀘스트 만들기 {#Creating-a-pull-request} + +`kubernetes/website` 저장소에 풀 리퀘스트를 등록한다. +등록한 풀 리퀘스트를 모니터하고, 리뷰 커멘트가 달리면 그에 대해 대응을 한다. +풀 리퀘스트가 머지될 때 까지 계속 모니터한다. + +풀 리퀘스트가 머지된 뒤 몇 분이 지나면, +변경사항을 +[쿠버네티스 문서 홈페이지](/docs/home/)에서 확인할 수 있다. + + + +## {{% heading "whatsnext" %}} + + +수동으로 빌드 저장소를 설정하고 빌드 타겟을 실행하여 개별 레퍼런스 문서를 생성하려면, +다음의 가이드를 참고한다. + +* [쿠버네티스 구성요소와 도구에 대한 레퍼런스 문서 생성하기](/docs/contribute/generate-ref-docs/kubernetes-components/) +* [kubectl 명령어에 대한 레퍼런스 문서 생성하기](/docs/contribute/generate-ref-docs/kubectl/) +* [쿠버네티스 API에 대한 레퍼런스 문서 생성하기](/docs/contribute/generate-ref-docs/kubernetes-api/) + + diff --git a/content/ko/docs/contribute/new-content/open-a-pr.md b/content/ko/docs/contribute/new-content/open-a-pr.md index 516c22bfea384..8697159261b25 100644 --- a/content/ko/docs/contribute/new-content/open-a-pr.md +++ b/content/ko/docs/contribute/new-content/open-a-pr.md @@ -224,7 +224,7 @@ website의 컨테이너 이미지를 만들거나 Hugo를 로컬에서 실행할 {{% tab name="Hugo 컨테이너" %}} {{< note >}} -아래 명령은 도커를 기본 컨테이너 엔진으로 사용한다. 이 동작을 무시하려면 `CONTAINER_ENGINE` 환경변수를 설정한다. +아래 명령은 도커를 기본 컨테이너 엔진으로 사용한다. 이 동작을 무시하려면 `CONTAINER_ENGINE` 환경 변수를 설정한다. {{< /note >}} 1. 로컬에서 이미지를 빌드한다. diff --git a/content/ko/docs/contribute/participate/roles-and-responsibilities.md b/content/ko/docs/contribute/participate/roles-and-responsibilities.md index 354e9d669fd44..448502c0c3763 100644 --- a/content/ko/docs/contribute/participate/roles-and-responsibilities.md +++ b/content/ko/docs/contribute/participate/roles-and-responsibilities.md @@ -51,7 +51,7 @@ GitHub 계정을 가진 누구나 쿠버네티스에 기여할 수 있다. SIG D - 풀 리퀘스트에 `/lgtm` 코멘트를 사용하여 LGTM(looks good to me) 레이블을 추가한다. {{< note >}} - `/lgtm` 사용은 자동화를 트리거한다. 만약 구속력 없는 승인을 제공하려면, 단순히 "LGTM" 코멘트를 남기는 것도 좋다! + `/lgtm` 사용은 자동화를 트리거한다. 만약 구속력 없는 승인을 제공하려면, "LGTM" 코멘트를 남기는 것도 좋다! {{< /note >}} - `/hold` 코멘트를 사용하여 풀 리퀘스트에 대한 병합을 차단한다. diff --git a/content/ko/docs/reference/_index.md b/content/ko/docs/reference/_index.md index 14fee6ee9caa2..4c0b0b8177664 100644 --- a/content/ko/docs/reference/_index.md +++ b/content/ko/docs/reference/_index.md @@ -6,6 +6,7 @@ linkTitle: "레퍼런스" main_menu: true weight: 70 content_type: concept +no_list: true --- @@ -18,10 +19,15 @@ content_type: concept ## API 레퍼런스 -* [쿠버네티스 API 레퍼런스 {{< param "version" >}}](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) +* [표준 용어집](/ko/docs/reference/glossary/) - 포괄적이고, 표준화 된 쿠버네티스 용어 목록 + +* [쿠버네티스 API 레퍼런스](/docs/reference/kubernetes-api/) +* [쿠버네티스 {{< param "version" >}}용 원페이지(One-page) API 레퍼런스](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) * [쿠버네티스 API 사용](/ko/docs/reference/using-api/) - 쿠버네티스 API에 대한 개요 +* [API 접근 제어](/ko/docs/reference/access-authn-authz/) - 쿠버네티스가 API 접근을 제어하는 방법에 대한 세부사항 +* [잘 알려진 레이블, 어노테이션과 테인트](/docs/reference/kubernetes-api/labels-annotations-taints/) -## API 클라이언트 라이브러리 +## 공식적으로 지원되는 클라이언트 라이브러리 프로그래밍 언어에서 쿠버네티스 API를 호출하기 위해서, [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/)를 사용할 수 있다. @@ -31,22 +37,46 @@ content_type: concept - [쿠버네티스 Python 클라이언트 라이브러리](https://github.com/kubernetes-client/python) - [쿠버네티스 Java 클라이언트 라이브러리](https://github.com/kubernetes-client/java) - [쿠버네티스 JavaScript 클라이언트 라이브러리](https://github.com/kubernetes-client/javascript) +- [쿠버네티스 Dotnet 클라이언트 라이브러리](https://github.com/kubernetes-client/csharp) +- [쿠버네티스 Haskell 클라이언트 라이브러리](https://github.com/kubernetes-client/haskell) -## CLI 레퍼런스 +## CLI * [kubectl](/ko/docs/reference/kubectl/overview/) - 명령어를 실행하거나 쿠버네티스 클러스터를 관리하기 위해 사용하는 주된 CLI 도구. * [JSONPath](/ko/docs/reference/kubectl/jsonpath/) - kubectl에서 [JSONPath 표현](https://goessner.net/articles/JsonPath/)을 사용하기 위한 문법 가이드. * [kubeadm](/ko/docs/reference/setup-tools/kubeadm/) - 안정적인 쿠버네티스 클러스터를 쉽게 프로비전하기 위한 CLI 도구. -## 컴포넌트 레퍼런스 +## 컴포넌트 -* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - 각 노드에서 구동되는 주요한 *노드 에이전트*. kubelet은 PodSpecs 집합을 가지며 기술된 컨테이너가 구동되고 있는지, 정상 작동하는지를 보장한다. -* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - 파드, 서비스, 레플리케이션 컨트롤러와 같은 API 오브젝트에 대한 검증과 구성을 수행하는 REST API. +* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - 각 +노드에서 구동되는 주요한 에이전트. kubelet은 PodSpecs 집합을 가지며 +기술된 컨테이너가 구동되고 있는지, 정상 작동하는지를 보장한다. +* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - +파드, 서비스, 레플리케이션 컨트롤러와 같은 API 오브젝트에 대한 검증과 구성을 +수행하는 REST API. * [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) - 쿠버네티스에 탑재된 핵심 제어 루프를 포함하는 데몬. -* [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - 간단한 TCP/UDP 스트림 포워딩이나 백-엔드 집합에 걸쳐서 라운드-로빈 TCP/UDP 포워딩을 할 수 있다. +* [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - 간단한 +TCP/UDP 스트림 포워딩이나 백-엔드 집합에 걸쳐서 라운드-로빈 TCP/UDP 포워딩을 +할 수 있다. * [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) - 가용성, 성능 및 용량을 관리하는 스케줄러. + * [kube-scheduler 정책](/ko/docs/reference/scheduling/policies) - * [kube-scheduler 프로파일](/docs/reference/scheduling/config#profiles) + * [kube-scheduler 프로파일](/ko/docs/reference/scheduling/config/#여러-프로파일) + +## 환경설정 API + +이 섹션은 쿠버네티스 구성요소 또는 도구를 환경설정하는 데에 사용되는 +"미발표된" API를 다룬다. 이 API들은 사용자나 관리자가 클러스터를 +사용/관리하는 데에 중요하지만, 이들 API의 대부분은 아직 API 서버가 +제공하지 않는다. + +* [kubelet 환경설정 (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) +* [kube-scheduler 환경설정 (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +* [kube-scheduler 정책 레퍼런스 (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) +* [kube-proxy 환경설정 (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/) +* [`audit.k8s.io/v1` API](/docs/reference/config-api/apiserver-audit.v1/) +* [클라이언트 인증 API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) +* [WebhookAdmission 환경설정 (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/) ## 설계 문서 diff --git a/content/ko/docs/reference/access-authn-authz/_index.md b/content/ko/docs/reference/access-authn-authz/_index.md index 9e23c5d767838..0c910199b7676 100644 --- a/content/ko/docs/reference/access-authn-authz/_index.md +++ b/content/ko/docs/reference/access-authn-authz/_index.md @@ -1,6 +1,6 @@ --- title: API 접근 제어 -weight: 20 +weight: 15 no_list: true --- diff --git a/content/ko/docs/reference/access-authn-authz/authorization.md b/content/ko/docs/reference/access-authn-authz/authorization.md index b34f68e392db0..a9370ea74b9a0 100644 --- a/content/ko/docs/reference/access-authn-authz/authorization.md +++ b/content/ko/docs/reference/access-authn-authz/authorization.md @@ -99,6 +99,9 @@ DELETE | delete(개별 리소스), deletecollection(리소스 모음) ```bash kubectl auth can-i create deployments --namespace dev ``` + +다음과 유사하게 출력된다. + ``` yes ``` @@ -106,6 +109,9 @@ yes ```shell kubectl auth can-i create deployments --namespace prod ``` + +다음과 유사하게 출력된다. + ``` no ``` @@ -116,6 +122,9 @@ no ```bash kubectl auth can-i list secrets --namespace dev --as dave ``` + +다음과 유사하게 출력된다. + ``` no ``` @@ -145,7 +154,7 @@ EOF ``` 생성된 `SelfSubjectAccessReview` 는 다음과 같다. -``` +```yaml apiVersion: authorization.k8s.io/v1 kind: SelfSubjectAccessReview metadata: diff --git a/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md b/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md index f9bc6a1112722..a64633ed52c80 100644 --- a/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md @@ -55,9 +55,9 @@ weight: 50 1. `/var/run/secrets/kubernetes.io/serviceaccount` 에 마운트된 파드의 각 컨테이너에 `volumeSource` 를 추가한다. #### 바인딩된 서비스 어카운트 토큰 볼륨 -{{< feature-state for_k8s_version="v1.13" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} -`BoundServiceAccountTokenVolume` 기능 게이트가 활성화되면, 서비스 어카운트 어드미션 컨트롤러가 +`BoundServiceAccountTokenVolume` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화되면, 서비스 어카운트 어드미션 컨트롤러가 시크릿 볼륨 대신 프로젝티드 서비스 어카운트 토큰 볼륨을 추가한다. 서비스 어카운트 토큰은 기본적으로 1시간 후에 만료되거나 파드가 삭제된다. [프로젝티드 볼륨](/docs/tasks/configure-pod-container/configure-projected-volume-storage/)에 대한 자세한 내용을 참고한다. 이 기능은 모든 네임스페이스에 "kube-root-ca.crt" 컨피그맵을 게시하는 활성화된 `RootCAConfigMap` 기능 게이트에 따라 다르다. 이 컨피그맵에는 kube-apiserver에 대한 연결을 확인하는 데 사용되는 CA 번들이 포함되어 있다. diff --git a/content/ko/docs/reference/command-line-tools-reference/_index.md b/content/ko/docs/reference/command-line-tools-reference/_index.md index 14025d0f71e73..d975db37b1708 100644 --- a/content/ko/docs/reference/command-line-tools-reference/_index.md +++ b/content/ko/docs/reference/command-line-tools-reference/_index.md @@ -1,4 +1,4 @@ --- -title: 커맨드 라인 도구 레퍼런스 +title: 컴포넌트 도구 weight: 60 --- diff --git a/content/ko/docs/reference/command-line-tools-reference/feature-gates.md b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md index 6fa2c58a56f9c..a0f970d7c40b0 100644 --- a/content/ko/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md @@ -48,20 +48,21 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | 기능 | 디폴트 | 단계 | 도입 | 종료 | |---------|---------|-------|-------|-------| -| `AnyVolumeDataSource` | `false` | 알파 | 1.18 | | | `APIListChunking` | `false` | 알파 | 1.8 | 1.8 | | `APIListChunking` | `true` | 베타 | 1.9 | | | `APIPriorityAndFairness` | `false` | 알파 | 1.17 | 1.19 | | `APIPriorityAndFairness` | `true` | 베타 | 1.20 | | -| `APIResponseCompression` | `false` | 알파 | 1.7 | | +| `APIResponseCompression` | `false` | 알파 | 1.7 | 1.15 | +| `APIResponseCompression` | `false` | 베타 | 1.16 | | | `APIServerIdentity` | `false` | 알파 | 1.20 | | +| `AllowInsecureBackendProxy` | `true` | 베타 | 1.17 | | +| `AnyVolumeDataSource` | `false` | 알파 | 1.18 | | | `AppArmor` | `true` | 베타 | 1.4 | | | `BalanceAttachedNodeVolumes` | `false` | 알파 | 1.11 | | -| `BoundServiceAccountTokenVolume` | `false` | 알파 | 1.13 | | +| `BoundServiceAccountTokenVolume` | `false` | 알파 | 1.13 | 1.20 | +| `BoundServiceAccountTokenVolume` | `true` | 베타 | 1.21 | | | `CPUManager` | `false` | 알파 | 1.8 | 1.9 | | `CPUManager` | `true` | 베타 | 1.10 | | -| `CRIContainerLogRotation` | `false` | 알파 | 1.10 | 1.10 | -| `CRIContainerLogRotation` | `true` | 베타| 1.11 | | | `CSIInlineVolume` | `false` | 알파 | 1.15 | 1.15 | | `CSIInlineVolume` | `true` | 베타 | 1.16 | - | | `CSIMigration` | `false` | 알파 | 1.14 | 1.16 | @@ -72,43 +73,42 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `CSIMigrationAzureDisk` | `false` | 알파 | 1.15 | 1.18 | | `CSIMigrationAzureDisk` | `false` | 베타 | 1.19 | | | `CSIMigrationAzureDiskComplete` | `false` | 알파 | 1.17 | | -| `CSIMigrationAzureFile` | `false` | 알파 | 1.15 | | +| `CSIMigrationAzureFile` | `false` | 알파 | 1.15 | 1.19 | +| `CSIMigrationAzureFile` | `false` | 베타 | 1.21 | | | `CSIMigrationAzureFileComplete` | `false` | 알파 | 1.17 | | | `CSIMigrationGCE` | `false` | 알파 | 1.14 | 1.16 | | `CSIMigrationGCE` | `false` | 베타 | 1.17 | | | `CSIMigrationGCEComplete` | `false` | 알파 | 1.17 | | -| `CSIMigrationOpenStack` | `false` | 알파 | 1.14 | | +| `CSIMigrationOpenStack` | `false` | 알파 | 1.14 | 1.17 | +| `CSIMigrationOpenStack` | `true` | 베타 | 1.18 | | | `CSIMigrationOpenStackComplete` | `false` | 알파 | 1.17 | | | `CSIMigrationvSphere` | `false` | 베타 | 1.19 | | | `CSIMigrationvSphereComplete` | `false` | 베타 | 1.19 | | -| `CSIServiceAccountToken` | `false` | 알파 | 1.20 | | -| `CSIStorageCapacity` | `false` | 알파 | 1.19 | | +| `CSIServiceAccountToken` | `false` | 알파 | 1.20 | 1.20 | +| `CSIServiceAccountToken` | `true` | 베타 | 1.21 | | +| `CSIStorageCapacity` | `false` | 알파 | 1.19 | 1.20 | +| `CSIStorageCapacity` | `true` | 베타 | 1.21 | | | `CSIVolumeFSGroupPolicy` | `false` | 알파 | 1.19 | 1.19 | | `CSIVolumeFSGroupPolicy` | `true` | 베타 | 1.20 | | | `ConfigurableFSGroupPolicy` | `false` | 알파 | 1.18 | 1.19 | | `ConfigurableFSGroupPolicy` | `true` | 베타 | 1.20 | | -| `CronJobControllerV2` | `false` | 알파 | 1.20 | | +| `CronJobControllerV2` | `false` | 알파 | 1.20 | 1.20 | +| `CronJobControllerV2` | `true` | 베타 | 1.21 | | | `CustomCPUCFSQuotaPeriod` | `false` | 알파 | 1.12 | | -| `CustomResourceDefaulting` | `false` | 알파| 1.15 | 1.15 | -| `CustomResourceDefaulting` | `true` | 베타 | 1.16 | | | `DefaultPodTopologySpread` | `false` | 알파 | 1.19 | 1.19 | | `DefaultPodTopologySpread` | `true` | 베타 | 1.20 | | | `DevicePlugins` | `false` | 알파 | 1.8 | 1.9 | | `DevicePlugins` | `true` | 베타 | 1.10 | | | `DisableAcceleratorUsageMetrics` | `false` | 알파 | 1.19 | 1.19 | -| `DisableAcceleratorUsageMetrics` | `true` | 베타 | 1.20 | 1.22 | -| `DownwardAPIHugePages` | `false` | 알파 | 1.20 | | -| `DryRun` | `false` | 알파 | 1.12 | 1.12 | -| `DryRun` | `true` | 베타 | 1.13 | | +| `DisableAcceleratorUsageMetrics` | `true` | 베타 | 1.20 | | +| `DownwardAPIHugePages` | `false` | 알파 | 1.20 | 1.20 | +| `DownwardAPIHugePages` | `false` | 베타 | 1.21 | | | `DynamicKubeletConfig` | `false` | 알파 | 1.4 | 1.10 | | `DynamicKubeletConfig` | `true` | 베타 | 1.11 | | -| `EndpointSlice` | `false` | 알파 | 1.16 | 1.16 | -| `EndpointSlice` | `false` | 베타 | 1.17 | | -| `EndpointSlice` | `true` | 베타 | 1.18 | | -| `EndpointSliceNodeName` | `false` | 알파 | 1.20 | | +| `EfficientWatchResumption` | `false` | 알파 | 1.20 | | | `EndpointSliceProxying` | `false` | 알파 | 1.18 | 1.18 | | `EndpointSliceProxying` | `true` | 베타 | 1.19 | | -| `EndpointSliceTerminating` | `false` | 알파 | 1.20 | | +| `EndpointSliceTerminatingCondition` | `false` | 알파 | 1.20 | | | `EphemeralContainers` | `false` | 알파 | 1.16 | | | `ExpandCSIVolumes` | `false` | 알파 | 1.14 | 1.15 | | `ExpandCSIVolumes` | `true` | 베타 | 1.16 | | @@ -117,64 +117,76 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `ExpandPersistentVolumes` | `false` | 알파 | 1.8 | 1.10 | | `ExpandPersistentVolumes` | `true` | 베타 | 1.11 | | | `ExperimentalHostUserNamespaceDefaulting` | `false` | 베타 | 1.5 | | -| `GenericEphemeralVolume` | `false` | 알파 | 1.19 | | -| `GracefulNodeShutdown` | `false` | 알파 | 1.20 | | +| `GenericEphemeralVolume` | `false` | 알파 | 1.19 | 1.20 | +| `GenericEphemeralVolume` | `true` | 베타 | 1.21 | | +| `GracefulNodeShutdown` | `false` | 알파 | 1.20 | 1.20 | +| `GracefulNodeShutdown` | `true` | 베타 | 1.21 | | +| `HPAContainerMetrics` | `false` | 알파 | 1.20 | | | `HPAScaleToZero` | `false` | 알파 | 1.16 | | | `HugePageStorageMediumSize` | `false` | 알파 | 1.18 | 1.18 | | `HugePageStorageMediumSize` | `true` | 베타 | 1.19 | | -| `HyperVContainer` | `false` | 알파 | 1.10 | | -| `ImmutableEphemeralVolumes` | `false` | 알파 | 1.18 | 1.18 | -| `ImmutableEphemeralVolumes` | `true` | 베타 | 1.19 | | -| `IPv6DualStack` | `false` | 알파 | 1.16 | | -| `LegacyNodeRoleBehavior` | `true` | 알파 | 1.16 | | +| `IngressClassNamespacedParams` | `false` | 알파 | 1.21 | | +| `IPv6DualStack` | `false` | 알파 | 1.15 | 1.20 | +| `IPv6DualStack` | `true` | 베타 | 1.21 | | +| `KubeletCredentialProviders` | `false` | 알파 | 1.20 | | +| `KubeletPodResources` | `true` | 알파 | 1.13 | 1.14 | +| `KubeletPodResources` | `true` | 베타 | 1.15 | | +| `LegacyNodeRoleBehavior` | `false` | 알파 | 1.16 | 1.18 | +| `LegacyNodeRoleBehavior` | `true` | True | 1.19 | | | `LocalStorageCapacityIsolation` | `false` | 알파 | 1.7 | 1.9 | | `LocalStorageCapacityIsolation` | `true` | 베타 | 1.10 | | | `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | 알파 | 1.15 | | +| `LogarithmicScaleDown` | `false` | 알파 | 1.21 | | +| `KubeletPodResourcesGetAllocatable` | `false` | 알파 | 1.21 | | | `MixedProtocolLBService` | `false` | 알파 | 1.20 | | -| `MountContainers` | `false` | 알파 | 1.9 | | +| `NamespaceDefaultLabelName` | `true` | 베타 | 1.21 | | +| `NetworkPolicyEndPort` | `false` | 알파 | 1.21 | | | `NodeDisruptionExclusion` | `false` | 알파 | 1.16 | 1.18 | | `NodeDisruptionExclusion` | `true` | 베타 | 1.19 | | | `NonPreemptingPriority` | `false` | 알파 | 1.15 | 1.18 | | `NonPreemptingPriority` | `true` | 베타 | 1.19 | | -| `PodDisruptionBudget` | `false` | 알파 | 1.3 | 1.4 | -| `PodDisruptionBudget` | `true` | 베타 | 1.5 | | +| `PodDeletionCost` | `false` | 알파 | 1.21 | | +| `PodAffinityNamespaceSelector` | `false` | 알파 | 1.21 | | | `PodOverhead` | `false` | 알파 | 1.16 | 1.17 | | `PodOverhead` | `true` | 베타 | 1.18 | | +| `ProbeTerminationGracePeriod` | `false` | 알파 | 1.21 | | | `ProcMountType` | `false` | 알파 | 1.12 | | | `QOSReserved` | `false` | 알파 | 1.11 | | | `RemainingItemCount` | `false` | 알파 | 1.15 | | -| `RootCAConfigMap` | `false` | 알파 | 1.13 | 1.19 | -| `RootCAConfigMap` | `true` | 베타 | 1.20 | | +| `RemoveSelfLink` | `false` | 알파 | 1.16 | 1.19 | +| `RemoveSelfLink` | `true` | 베타 | 1.20 | | | `RotateKubeletServerCertificate` | `false` | 알파 | 1.7 | 1.11 | | `RotateKubeletServerCertificate` | `true` | 베타 | 1.12 | | | `RunAsGroup` | `true` | 베타 | 1.14 | | -| `RuntimeClass` | `false` | 알파 | 1.12 | 1.13 | -| `RuntimeClass` | `true` | 베타 | 1.14 | | | `SCTPSupport` | `false` | 알파 | 1.12 | 1.18 | | `SCTPSupport` | `true` | 베타 | 1.19 | | | `ServerSideApply` | `false` | 알파 | 1.14 | 1.15 | | `ServerSideApply` | `true` | 베타 | 1.16 | | -| `ServiceAccountIssuerDiscovery` | `false` | 알파 | 1.18 | | -| `ServiceLBNodePortControl` | `false` | 알파 | 1.20 | 1.20 | +| `ServiceInternalTrafficPolicy` | `false` | 알파 | 1.21 | | +| `ServiceLBNodePortControl` | `false` | 알파 | 1.20 | | +| `ServiceLoadBalancerClass` | `false` | 알파 | 1.21 | | | `ServiceNodeExclusion` | `false` | 알파 | 1.8 | 1.18 | | `ServiceNodeExclusion` | `true` | 베타 | 1.19 | | | `ServiceTopology` | `false` | 알파 | 1.17 | | -| `SizeMemoryBackedVolumes` | `false` | 알파 | 1.20 | | | `SetHostnameAsFQDN` | `false` | 알파 | 1.19 | 1.19 | | `SetHostnameAsFQDN` | `true` | 베타 | 1.20 | | +| `SizeMemoryBackedVolumes` | `false` | 알파 | 1.20 | | +| `StorageVersionAPI` | `false` | 알파 | 1.20 | | | `StorageVersionHash` | `false` | 알파 | 1.14 | 1.14 | | `StorageVersionHash` | `true` | 베타 | 1.15 | | -| `Sysctls` | `true` | 베타 | 1.11 | | +| `SuspendJob` | `false` | 알파 | 1.21 | | | `TTLAfterFinished` | `false` | 알파 | 1.12 | | +| `TopologyAwareHints` | `false` | 알파 | 1.21 | | | `TopologyManager` | `false` | 알파 | 1.16 | 1.17 | | `TopologyManager` | `true` | 베타 | 1.18 | | | `ValidateProxyRedirects` | `false` | 알파 | 1.12 | 1.13 | | `ValidateProxyRedirects` | `true` | 베타 | 1.14 | | -| `WindowsEndpointSliceProxying` | `false` | 알파 | 1.19 | | -| `WindowsGMSA` | `false` | 알파 | 1.14 | | -| `WindowsGMSA` | `true` | 베타 | 1.16 | | +| `WarningHeaders` | `true` | 베타 | 1.19 | | | `WinDSR` | `false` | 알파 | 1.14 | | -| `WinOverlay` | `false` | 알파 | 1.14 | | +| `WinOverlay` | `false` | 알파 | 1.14 | 1.19 | +| `WinOverlay` | `true` | 베타 | 1.20 | | +| `WindowsEndpointSliceProxying` | `false` | 알파 | 1.19 | 1.20 | +| `WindowsEndpointSliceProxying` | `true` | beta | 1.21 | | {{< /table >}} ### GA 또는 사용 중단된 기능을 위한 기능 게이트 @@ -195,6 +207,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `BlockVolume` | `false` | 알파 | 1.9 | 1.12 | | `BlockVolume` | `true` | 베타 | 1.13 | 1.17 | | `BlockVolume` | `true` | GA | 1.18 | - | +| `CRIContainerLogRotation` | `false` | 알파 | 1.10 | 1.10 | +| `CRIContainerLogRotation` | `true` | 베타 | 1.11 | 1.20 | +| `CRIContainerLogRotation` | `true` | GA | 1.21 | - | | `CSIBlockVolume` | `false` | 알파 | 1.11 | 1.13 | | `CSIBlockVolume` | `true` | 베타 | 1.14 | 1.17 | | `CSIBlockVolume` | `true` | GA | 1.18 | - | @@ -210,6 +225,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `CSIPersistentVolume` | `false` | 알파 | 1.9 | 1.9 | | `CSIPersistentVolume` | `true` | 베타 | 1.10 | 1.12 | | `CSIPersistentVolume` | `true` | GA | 1.13 | - | +| `CSIVolumeHealth` | `false` | 알파 | 1.21 | - | | `CustomPodDNS` | `false` | 알파 | 1.9 | 1.9 | | `CustomPodDNS` | `true` | 베타| 1.10 | 1.13 | | `CustomPodDNS` | `true` | GA | 1.14 | - | @@ -228,14 +244,24 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `CustomResourceWebhookConversion` | `false` | 알파 | 1.13 | 1.14 | | `CustomResourceWebhookConversion` | `true` | 베타 | 1.15 | 1.15 | | `CustomResourceWebhookConversion` | `true` | GA | 1.16 | - | +| `DryRun` | `false` | 알파 | 1.12 | 1.12 | +| `DryRun` | `true` | 베타 | 1.13 | 1.18 | +| `DryRun` | `true` | GA | 1.19 | - | | `DynamicAuditing` | `false` | 알파 | 1.13 | 1.18 | | `DynamicAuditing` | - | 사용중단 | 1.19 | - | | `DynamicProvisioningScheduling` | `false` | 알파 | 1.11 | 1.11 | | `DynamicProvisioningScheduling` | - | 사용중단| 1.12 | - | | `DynamicVolumeProvisioning` | `true` | 알파 | 1.3 | 1.7 | | `DynamicVolumeProvisioning` | `true` | GA | 1.8 | - | +| `EnableAggregatedDiscoveryTimeout` | `true` | 사용중단 | 1.16 | - | | `EnableEquivalenceClassCache` | `false` | 알파 | 1.8 | 1.14 | | `EnableEquivalenceClassCache` | - | 사용중단 | 1.15 | - | +| `EndpointSlice` | `false` | 알파 | 1.16 | 1.16 | +| `EndpointSlice` | `false` | 베타 | 1.17 | 1.17 | +| `EndpointSlice` | `true` | 베타 | 1.18 | 1.21 | +| `EndpointSlice` | `true` | GA | 1.21 | - | +| `EndpointSliceNodeName` | `false` | 알파 | 1.20 | 1.21 | +| `EndpointSliceNodeName` | `true` | GA | 1.21 | - | | `ExperimentalCriticalPodAnnotation` | `false` | 알파 | 1.5 | 1.12 | | `ExperimentalCriticalPodAnnotation` | `false` | 사용중단 | 1.13 | - | | `EvenPodsSpread` | `false` | 알파 | 1.16 | 1.17 | @@ -247,26 +273,38 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `HugePages` | `false` | 알파 | 1.8 | 1.9 | | `HugePages` | `true` | 베타| 1.10 | 1.13 | | `HugePages` | `true` | GA | 1.14 | - | +| `HyperVContainer` | `false` | 알파 | 1.10 | 1.19 | +| `HyperVContainer` | `false` | 사용중단 | 1.20 | - | +| `ImmutableEphemeralVolumes` | `false` | 알파 | 1.18 | 1.18 | +| `ImmutableEphemeralVolumes` | `true` | 베타 | 1.19 | 1.20 | +| `ImmutableEphemeralVolumes` | `true` | GA | 1.21 | | +| `IndexedJob` | `false` | 알파 | 1.21 | | | `Initializers` | `false` | 알파 | 1.7 | 1.13 | | `Initializers` | - | 사용중단 | 1.14 | - | | `KubeletConfigFile` | `false` | 알파 | 1.8 | 1.9 | | `KubeletConfigFile` | - | 사용중단 | 1.10 | - | -| `KubeletCredentialProviders` | `false` | 알파 | 1.20 | 1.20 | | `KubeletPluginsWatcher` | `false` | 알파 | 1.11 | 1.11 | | `KubeletPluginsWatcher` | `true` | 베타 | 1.12 | 1.12 | | `KubeletPluginsWatcher` | `true` | GA | 1.13 | - | | `KubeletPodResources` | `false` | 알파 | 1.13 | 1.14 | | `KubeletPodResources` | `true` | 베타 | 1.15 | | | `KubeletPodResources` | `true` | GA | 1.20 | | +| `MountContainers` | `false` | 알파 | 1.9 | 1.16 | +| `MountContainers` | `false` | 사용중단 | 1.17 | - | | `MountPropagation` | `false` | 알파 | 1.8 | 1.9 | | `MountPropagation` | `true` | 베타 | 1.10 | 1.11 | | `MountPropagation` | `true` | GA | 1.12 | - | | `NodeLease` | `false` | 알파 | 1.12 | 1.13 | | `NodeLease` | `true` | 베타 | 1.14 | 1.16 | | `NodeLease` | `true` | GA | 1.17 | - | +| `PVCProtection` | `false` | 알파 | 1.9 | 1.9 | +| `PVCProtection` | - | 사용중단 | 1.10 | - | | `PersistentLocalVolumes` | `false` | 알파 | 1.7 | 1.9 | | `PersistentLocalVolumes` | `true` | 베타 | 1.10 | 1.13 | | `PersistentLocalVolumes` | `true` | GA | 1.14 | - | +| `PodDisruptionBudget` | `false` | 알파 | 1.3 | 1.4 | +| `PodDisruptionBudget` | `true` | 베타 | 1.5 | 1.20 | +| `PodDisruptionBudget` | `true` | GA | 1.21 | - | | `PodPriority` | `false` | 알파 | 1.8 | 1.10 | | `PodPriority` | `true` | 베타 | 1.11 | 1.13 | | `PodPriority` | `true` | GA | 1.14 | - | @@ -276,14 +314,15 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `PodShareProcessNamespace` | `false` | 알파 | 1.10 | 1.11 | | `PodShareProcessNamespace` | `true` | 베타 | 1.12 | 1.16 | | `PodShareProcessNamespace` | `true` | GA | 1.17 | - | -| `PVCProtection` | `false` | 알파 | 1.9 | 1.9 | -| `PVCProtection` | - | 사용중단 | 1.10 | - | | `RequestManagement` | `false` | 알파 | 1.15 | 1.16 | | `ResourceLimitsPriorityFunction` | `false` | 알파 | 1.9 | 1.18 | | `ResourceLimitsPriorityFunction` | - | 사용중단 | 1.19 | - | | `ResourceQuotaScopeSelectors` | `false` | 알파 | 1.11 | 1.11 | | `ResourceQuotaScopeSelectors` | `true` | 베타 | 1.12 | 1.16 | | `ResourceQuotaScopeSelectors` | `true` | GA | 1.17 | - | +| `RootCAConfigMap` | `false` | 알파 | 1.13 | 1.19 | +| `RootCAConfigMap` | `true` | 베타 | 1.20 | 1.20 | +| `RootCAConfigMap` | `true` | GA | 1.21 | - | | `RotateKubeletClientCertificate` | `true` | 베타 | 1.8 | 1.18 | | `RotateKubeletClientCertificate` | `true` | GA | 1.19 | - | | `RuntimeClass` | `false` | 알파 | 1.12 | 1.13 | @@ -295,6 +334,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `SCTPSupport` | `false` | 알파 | 1.12 | 1.18 | | `SCTPSupport` | `true` | 베타 | 1.19 | 1.19 | | `SCTPSupport` | `true` | GA | 1.20 | - | +| `ServiceAccountIssuerDiscovery` | `false` | 알파 | 1.18 | 1.19 | +| `ServiceAccountIssuerDiscovery` | `true` | 베타 | 1.20 | 1.20 | +| `ServiceAccountIssuerDiscovery` | `true` | GA | 1.21 | - | | `ServiceAppProtocol` | `false` | 알파 | 1.18 | 1.18 | | `ServiceAppProtocol` | `true` | 베타 | 1.19 | | | `ServiceAppProtocol` | `true` | GA | 1.20 | - | @@ -319,6 +361,8 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `SupportPodPidsLimit` | `false` | 알파 | 1.10 | 1.13 | | `SupportPodPidsLimit` | `true` | 베타 | 1.14 | 1.19 | | `SupportPodPidsLimit` | `true` | GA | 1.20 | - | +| `Sysctls` | `true` | 베타 | 1.11 | 1.20 | +| `Sysctls` | `true` | GA | 1.21 | | | `TaintBasedEvictions` | `false` | 알파 | 1.6 | 1.12 | | `TaintBasedEvictions` | `true` | 베타 | 1.13 | 1.17 | | `TaintBasedEvictions` | `true` | GA | 1.18 | - | @@ -331,6 +375,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `TokenRequestProjection` | `false` | 알파 | 1.11 | 1.11 | | `TokenRequestProjection` | `true` | 베타 | 1.12 | 1.19 | | `TokenRequestProjection` | `true` | GA | 1.20 | - | +| `VolumeCapacityPriority` | `false` | 알파 | 1.21 | - | | `VolumeSnapshotDataSource` | `false` | 알파 | 1.12 | 1.16 | | `VolumeSnapshotDataSource` | `true` | 베타 | 1.17 | 1.19 | | `VolumeSnapshotDataSource` | `true` | GA | 1.20 | - | @@ -340,7 +385,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `VolumeScheduling` | `false` | 알파 | 1.9 | 1.9 | | `VolumeScheduling` | `true` | 베타 | 1.10 | 1.12 | | `VolumeScheduling` | `true` | GA | 1.13 | - | -| `VolumeSubpath` | `true` | GA | 1.13 | - | +| `VolumeSubpath` | `true` | GA | 1.10 | - | | `VolumeSubpathEnvExpansion` | `false` | 알파 | 1.14 | 1.14 | | `VolumeSubpathEnvExpansion` | `true` | 베타 | 1.15 | 1.16 | | `VolumeSubpathEnvExpansion` | `true` | GA | 1.17 | - | @@ -398,62 +443,134 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 각 기능 게이트는 특정 기능을 활성화/비활성화하도록 설계되었다. +- `APIListChunking`: API 클라이언트가 API 서버에서 (`LIST` 또는 `GET`) + 리소스를 청크(chunks)로 검색할 수 있도록 한다. +- `APIPriorityAndFairness`: 각 서버의 우선 순위와 공정성을 통해 동시 요청을 + 관리할 수 있다. (`RequestManagement` 에서 이름이 변경됨) +- `APIResponseCompression`: `LIST` 또는 `GET` 요청에 대한 API 응답을 압축한다. +- `APIServerIdentity`: 클러스터의 각 API 서버에 ID를 할당한다. - `Accelerators`: 도커 사용 시 Nvidia GPU 지원 활성화한다. - `AdvancedAuditing`: [고급 감사](/docs/tasks/debug-application-cluster/audit/#advanced-audit) 기능을 활성화한다. -- `AffinityInAnnotations`(*사용 중단됨*): [파드 어피니티 또는 안티-어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#어피니티-affinity-와-안티-어피니티-anti-affinity) 설정을 활성화한다. +- `AffinityInAnnotations`(*사용 중단됨*): [파드 어피니티 또는 안티-어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#어피니티-affinity-와-안티-어피니티-anti-affinity) + 설정을 활성화한다. - `AllowExtTrafficLocalEndpoints`: 서비스가 외부 요청을 노드의 로컬 엔드포인트로 라우팅할 수 있도록 한다. +- `AllowInsecureBackendProxy`: 사용자가 파드 로그 요청에서 kubelet의 + TLS 확인을 건너뛸 수 있도록 한다. - `AnyVolumeDataSource`: {{< glossary_tooltip text="PVC" term_id="persistent-volume-claim" >}}의 `DataSource` 로 모든 사용자 정의 리소스 사용을 활성화한다. -- `APIListChunking`: API 클라이언트가 API 서버에서 (`LIST` 또는 `GET`) 리소스를 청크(chunks)로 검색할 수 있도록 한다. -- `APIPriorityAndFairness`: 각 서버의 우선 순위와 공정성을 통해 동시 요청을 관리할 수 있다. (`RequestManagement` 에서 이름이 변경됨) -- `APIResponseCompression`: `LIST` 또는 `GET` 요청에 대한 API 응답을 압축한다. -- `APIServerIdentity`: 클러스터의 각 kube-apiserver에 ID를 할당한다. - `AppArmor`: 도커를 사용할 때 리눅스 노드에서 AppArmor 기반의 필수 접근 제어를 활성화한다. - 자세한 내용은 [AppArmor 튜토리얼](/ko/docs/tutorials/clusters/apparmor/)을 참고한다. + 자세한 내용은 [AppArmor 튜토리얼](/ko/docs/tutorials/clusters/apparmor/)을 참고한다. - `AttachVolumeLimit`: 볼륨 플러그인이 노드에 연결될 수 있는 볼륨 수에 대한 제한을 보고하도록 한다. - 자세한 내용은 [동적 볼륨 제한](/ko/docs/concepts/storage/storage-limits/#동적-볼륨-한도)을 참고한다. + 자세한 내용은 [동적 볼륨 제한](/ko/docs/concepts/storage/storage-limits/#동적-볼륨-한도)을 참고한다. - `BalanceAttachedNodeVolumes`: 스케줄링 시 균형 잡힌 리소스 할당을 위해 고려할 노드의 볼륨 수를 포함한다. 스케줄러가 결정을 내리는 동안 CPU, 메모리 사용률 및 볼륨 수가 더 가까운 노드가 선호된다. - `BlockVolume`: 파드에서 원시 블록 장치의 정의와 사용을 활성화한다. - 자세한 내용은 [원시 블록 볼륨 지원](/ko/docs/concepts/storage/persistent-volumes/#원시-블록-볼륨-지원)을 - 참고한다. + 자세한 내용은 [원시 블록 볼륨 지원](/ko/docs/concepts/storage/persistent-volumes/#원시-블록-볼륨-지원)을 + 참고한다. - `BoundServiceAccountTokenVolume`: ServiceAccountTokenVolumeProjection으로 구성된 프로젝션 볼륨을 사용하도록 서비스어카운트 볼륨을 - 마이그레이션한다. 클러스터 관리자는 `serviceaccount_stale_tokens_total` 메트릭을 사용하여 - 확장 토큰에 의존하는 워크로드를 모니터링 할 수 있다. 이러한 워크로드가 없는 경우 `--service-account-extend-token-expiration=false` 플래그로 - `kube-apiserver`를 시작하여 확장 토큰 기능을 끈다. - 자세한 내용은 [바운드 서비스 계정 토큰](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md)을 - 확인한다. -- `ConfigurableFSGroupPolicy`: 파드에 볼륨을 마운트할 때 fsGroups에 대한 볼륨 권한 변경 정책을 구성할 수 있다. 자세한 내용은 [파드에 대한 볼륨 권한 및 소유권 변경 정책 구성](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods)을 참고한다. --`CronJobControllerV2` : {{< glossary_tooltip text="크론잡" term_id="cronjob" >}} 컨트롤러의 대체 구현을 사용한다. 그렇지 않으면 동일한 컨트롤러의 버전 1이 선택된다. 버전 2 컨트롤러는 실험적인 성능 향상을 제공한다. -- `CPUManager`: 컨테이너 수준의 CPU 어피니티 지원을 활성화한다. [CPU 관리 정책](/docs/tasks/administer-cluster/cpu-management-policies/)을 참고한다. -- `CRIContainerLogRotation`: cri 컨테이너 런타임에 컨테이너 로그 로테이션을 활성화한다. -- `CSIBlockVolume`: 외부 CSI 볼륨 드라이버가 블록 스토리지를 지원할 수 있게 한다. 자세한 내용은 [`csi` 원시 블록 볼륨 지원](/ko/docs/concepts/storage/volumes/#csi-원시-raw-블록-볼륨-지원) 문서를 참고한다. -- `CSIDriverRegistry`: csi.storage.k8s.io에서 CSIDriver API 오브젝트와 관련된 모든 로직을 활성화한다. + 마이그레이션한다. 클러스터 관리자는 `serviceaccount_stale_tokens_total` 메트릭을 사용하여 + 확장 토큰에 의존하는 워크로드를 모니터링 할 수 있다. 이러한 워크로드가 없는 경우 `--service-account-extend-token-expiration=false` 플래그로 + `kube-apiserver`를 시작하여 확장 토큰 기능을 끈다. + 자세한 내용은 [바운드 서비스 계정 토큰](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md)을 + 확인한다. +- `CPUManager`: 컨테이너 수준의 CPU 어피니티 지원을 활성화한다. + [CPU 관리 정책](/docs/tasks/administer-cluster/cpu-management-policies/)을 참고한다. +- `CRIContainerLogRotation`: cri 컨테이너 런타임에 컨테이너 로그 로테이션을 활성화한다. 로그 파일 사이즈 기본값은 10MB이며, +컨테이너 당 최대 로그 파일 수 기본값은 5이다. 이 값은 kubelet 환경설정으로 변경할 수 있다. +더 자세한 내용은 [노드 레벨에서의 로깅](/ko/docs/concepts/cluster-administration/logging/#노드-레벨에서의-로깅)을 참고한다. +- `CSIBlockVolume`: 외부 CSI 볼륨 드라이버가 블록 스토리지를 지원할 수 있게 한다. + 자세한 내용은 [`csi` 원시 블록 볼륨 지원](/ko/docs/concepts/storage/volumes/#csi-원시-raw-블록-볼륨-지원) + 문서를 참고한다. +- `CSIDriverRegistry`: csi.storage.k8s.io에서 CSIDriver API 오브젝트와 관련된 + 모든 로직을 활성화한다. - `CSIInlineVolume`: 파드에 대한 CSI 인라인 볼륨 지원을 활성화한다. -- `CSIMigration`: shim 및 변환 로직을 통해 볼륨 작업을 인-트리 플러그인에서 사전 설치된 해당 CSI 플러그인으로 라우팅할 수 있다. -- `CSIMigrationAWS`: shim 및 변환 로직을 통해 볼륨 작업을 AWS-EBS 인-트리 플러그인에서 EBS CSI 플러그인으로 라우팅할 수 있다. 노드에 EBS CSI 플러그인이 설치와 구성이 되어 있지 않은 경우 인-트리 EBS 플러그인으로 폴백(falling back)을 지원한다. CSIMigration 기능 플래그가 필요하다. -- `CSIMigrationAWSComplete`: kubelet 및 볼륨 컨트롤러에서 EBS 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 사용하여 볼륨 작업을 AWS-EBS 인-트리 플러그인에서 EBS CSI 플러그인으로 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAWS 기능 플래그가 활성화되고 EBS CSI 플러그인이 설치 및 구성이 되어 있어야 한다. -- `CSIMigrationAzureDisk`: shim 및 변환 로직을 통해 볼륨 작업을 Azure-Disk 인-트리 플러그인에서 AzureDisk CSI 플러그인으로 라우팅할 수 있다. 노드에 AzureDisk CSI 플러그인이 설치와 구성이 되어 있지 않은 경우 인-트리 AzureDisk 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. -- `CSIMigrationAzureDiskComplete`: kubelet 및 볼륨 컨트롤러에서 Azure-Disk 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 사용하여 볼륨 작업을 Azure-Disk 인-트리 플러그인에서 AzureDisk CSI 플러그인으로 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAzureDisk 기능 플래그가 활성화되고 AzureDisk CSI 플러그인이 설치 및 구성이 되어 있어야 한다. -- `CSIMigrationAzureFile`: shim 및 변환 로직을 통해 볼륨 작업을 Azure-File 인-트리 플러그인에서 AzureFile CSI 플러그인으로 라우팅할 수 있다. 노드에 AzureFile CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 AzureFile 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. -- `CSIMigrationAzureFileComplete`: kubelet 및 볼륨 컨트롤러에서 Azure 파일 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 통해 볼륨 작업을 Azure 파일 인-트리 플러그인에서 AzureFile CSI 플러그인으로 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAzureFile 기능 플래그가 활성화되고 AzureFile CSI 플러그인이 설치 및 구성이 되어 있어야 한다. -- `CSIMigrationGCE`: shim 및 변환 로직을 통해 볼륨 작업을 GCE-PD 인-트리 플러그인에서 PD CSI 플러그인으로 라우팅할 수 있다. 노드에 PD CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 GCE 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. -- `CSIMigrationGCEComplete`: kubelet 및 볼륨 컨트롤러에서 GCE-PD 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 통해 볼륨 작업을 GCE-PD 인-트리 플러그인에서 PD CSI 플러그인으로 라우팅할 수 있다. CSIMigration과 CSIMigrationGCE 기능 플래그가 필요하다. -- `CSIMigrationOpenStack`: shim 및 변환 로직을 통해 볼륨 작업을 Cinder 인-트리 플러그인에서 Cinder CSI 플러그인으로 라우팅할 수 있다. 노드에 Cinder CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 Cinder 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. -- `CSIMigrationOpenStackComplete`: kubelet 및 볼륨 컨트롤러에서 Cinder 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직이 Cinder 인-트리 플러그인에서 Cinder CSI 플러그인으로 볼륨 작업을 라우팅할 수 있도록 한다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationOpenStack 기능 플래그가 활성화되고 Cinder CSI 플러그인이 설치 및 구성이 되어 있어야 한다. -- `CSIMigrationvSphere`: vSphere 인-트리 플러그인에서 vSphere CSI 플러그인으로 볼륨 작업을 라우팅하는 shim 및 변환 로직을 사용한다. 노드에 vSphere CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 vSphere 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. -- `CSIMigrationvSphereComplete`: kubelet 및 볼륨 컨트롤러에서 vSphere 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 활성화하여 vSphere 인-트리 플러그인에서 vSphere CSI 플러그인으로 볼륨 작업을 라우팅할 수 있도록 한다. CSIMigration 및 CSIMigrationvSphere 기능 플래그가 활성화되고 vSphere CSI 플러그인이 클러스터의 모든 노드에 설치 및 구성이 되어 있어야 한다. +- `CSIMigration`: shim 및 변환 로직을 통해 볼륨 작업을 인-트리 플러그인에서 + 사전 설치된 해당 CSI 플러그인으로 라우팅할 수 있다. +- `CSIMigrationAWS`: shim 및 변환 로직을 통해 볼륨 작업을 + AWS-EBS 인-트리 플러그인에서 EBS CSI 플러그인으로 라우팅할 수 있다. 노드에 + EBS CSI 플러그인이 설치와 구성이 되어 있지 않은 경우 인-트리 EBS 플러그인으로 + 폴백(falling back)을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationAWSComplete`: kubelet 및 볼륨 컨트롤러에서 EBS 인-트리 + 플러그인 등록을 중지하고 shim 및 변환 로직을 사용하여 볼륨 작업을 AWS-EBS + 인-트리 플러그인에서 EBS CSI 플러그인으로 라우팅할 수 있다. + 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAWS 기능 플래그가 활성화되고 + EBS CSI 플러그인이 설치 및 구성이 되어 있어야 한다. +- `CSIMigrationAzureDisk`: shim 및 변환 로직을 통해 볼륨 작업을 + Azure-Disk 인-트리 플러그인에서 AzureDisk CSI 플러그인으로 라우팅할 수 있다. + 노드에 AzureDisk CSI 플러그인이 설치와 구성이 되어 있지 않은 경우 인-트리 + AzureDisk 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 + 필요하다. +- `CSIMigrationAzureDiskComplete`: kubelet 및 볼륨 컨트롤러에서 Azure-Disk 인-트리 + 플러그인 등록을 중지하고 shim 및 변환 로직을 사용하여 볼륨 작업을 + Azure-Disk 인-트리 플러그인에서 AzureDisk CSI 플러그인으로 + 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAzureDisk 기능 + 플래그가 활성화되고 AzureDisk CSI 플러그인이 설치 및 구성이 되어 + 있어야 한다. +- `CSIMigrationAzureFile`: shim 및 변환 로직을 통해 볼륨 작업을 + Azure-File 인-트리 플러그인에서 AzureFile CSI 플러그인으로 라우팅할 수 있다. + 노드에 AzureFile CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 + AzureFile 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 + 필요하다. +- `CSIMigrationAzureFileComplete`: kubelet 및 볼륨 컨트롤러에서 Azure 파일 인-트리 + 플러그인 등록을 중지하고 shim 및 변환 로직을 통해 볼륨 작업을 + Azure 파일 인-트리 플러그인에서 AzureFile CSI 플러그인으로 + 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAzureFile 기능 + 플래그가 활성화되고 AzureFile CSI 플러그인이 설치 및 구성이 되어 + 있어야 한다. +- `CSIMigrationGCE`: shim 및 변환 로직을 통해 볼륨 작업을 + GCE-PD 인-트리 플러그인에서 PD CSI 플러그인으로 라우팅할 수 있다. 노드에 + PD CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 GCE 플러그인으로 폴백을 + 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationGCEComplete`: kubelet 및 볼륨 컨트롤러에서 GCE-PD + 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 통해 볼륨 작업을 GCE-PD + 인-트리 플러그인에서 PD CSI 플러그인으로 라우팅할 수 있다. + CSIMigration과 CSIMigrationGCE 기능 플래그가 활성화되고 PD CSI + 플러그인이 클러스터의 모든 노드에 설치 및 구성이 되어 있어야 한다. +- `CSIMigrationOpenStack`: shim 및 변환 로직을 통해 볼륨 작업을 + Cinder 인-트리 플러그인에서 Cinder CSI 플러그인으로 라우팅할 수 있다. 노드에 + Cinder CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 + Cinder 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationOpenStackComplete`: kubelet 및 볼륨 컨트롤러에서 + Cinder 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직이 Cinder 인-트리 + 플러그인에서 Cinder CSI 플러그인으로 볼륨 작업을 라우팅할 수 있도록 한다. + 클러스터의 모든 노드에 CSIMigration과 CSIMigrationOpenStack 기능 플래그가 활성화되고 + Cinder CSI 플러그인이 설치 및 구성이 되어 있어야 한다. +- `CSIMigrationvSphere`: vSphere 인-트리 플러그인에서 vSphere CSI 플러그인으로 볼륨 작업을 + 라우팅하는 shim 및 변환 로직을 사용한다. + 노드에 vSphere CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 + 인-트리 vSphere 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationvSphereComplete`: kubelet 및 볼륨 컨트롤러에서 vSphere 인-트리 + 플러그인 등록을 중지하고 shim 및 변환 로직을 활성화하여 vSphere 인-트리 플러그인에서 + vSphere CSI 플러그인으로 볼륨 작업을 라우팅할 수 있도록 한다. CSIMigration 및 + CSIMigrationvSphere 기능 플래그가 활성화되고 vSphere CSI 플러그인이 + 클러스터의 모든 노드에 설치 및 구성이 되어 있어야 한다. - `CSINodeInfo`: csi.storage.k8s.io에서 CSINodeInfo API 오브젝트와 관련된 모든 로직을 활성화한다. - `CSIPersistentVolume`: [CSI (Container Storage Interface)](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md) 호환 볼륨 플러그인을 통해 프로비저닝된 볼륨을 감지하고 마운트할 수 있다. -- `CSIServiceAccountToken` : 볼륨을 마운트하는 파드의 서비스 계정 토큰을 받을 수 있도록 CSI 드라이버를 활성화한다. [토큰 요청](https://kubernetes-csi.github.io/docs/token-requests.html)을 참조한다. -- `CSIStorageCapacity`: CSI 드라이버가 스토리지 용량 정보를 게시하고 쿠버네티스 스케줄러가 파드를 스케줄할 때 해당 정보를 사용하도록 한다. [스토리지 용량](/docs/concepts/storage/storage-capacity/)을 참고한다. +- `CSIServiceAccountToken` : 볼륨을 마운트하는 파드의 서비스 계정 토큰을 받을 수 있도록 + CSI 드라이버를 활성화한다. + [토큰 요청](https://kubernetes-csi.github.io/docs/token-requests.html)을 참조한다. +- `CSIStorageCapacity`: CSI 드라이버가 스토리지 용량 정보를 게시하고 + 쿠버네티스 스케줄러가 파드를 스케줄할 때 해당 정보를 사용하도록 한다. + [스토리지 용량](/docs/concepts/storage/storage-capacity/)을 참고한다. 자세한 내용은 [`csi` 볼륨 유형](/ko/docs/concepts/storage/volumes/#csi) 문서를 확인한다. -- `CSIVolumeFSGroupPolicy`: CSI드라이버가 `fsGroupPolicy` 필드를 사용하도록 허용한다. 이 필드는 CSI드라이버에서 생성된 볼륨이 마운트될 때 볼륨 소유권과 권한 수정을 지원하는지 여부를 제어한다. -- `CustomCPUCFSQuotaPeriod`: 노드가 CPUCFSQuotaPeriod를 변경하도록 한다. +- `CSIVolumeFSGroupPolicy`: CSI드라이버가 `fsGroupPolicy` 필드를 사용하도록 허용한다. + 이 필드는 CSI드라이버에서 생성된 볼륨이 마운트될 때 볼륨 소유권과 + 권한 수정을 지원하는지 여부를 제어한다. +- `CSIVolumeHealth`: 노드에서의 CSI 볼륨 상태 모니터링 기능을 활성화한다. +- `ConfigurableFSGroupPolicy`: 사용자가 파드에 볼륨을 마운트할 때 fsGroups에 대한 + 볼륨 권한 변경 정책을 구성할 수 있다. 자세한 내용은 + [파드의 볼륨 권한 및 소유권 변경 정책 구성](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods)을 + 참고한다. +- `CronJobControllerV2`: {{< glossary_tooltip text="크론잡(CronJob)" term_id="cronjob" >}} + 컨트롤러의 대체 구현을 사용한다. 그렇지 않으면, + 동일한 컨트롤러의 버전 1이 선택된다. + 버전 2 컨트롤러는 실험적인 성능 향상을 제공한다. +- `CustomCPUCFSQuotaPeriod`: [kubelet config](/docs/tasks/administer-cluster/kubelet-config-file/)에서 + `cpuCFSQuotaPeriod` 를 노드가 변경할 수 있도록 한다. - `CustomPodDNS`: `dnsConfig` 속성을 사용하여 파드의 DNS 설정을 사용자 정의할 수 있다. 자세한 내용은 [파드의 DNS 설정](/ko/docs/concepts/services-networking/dns-pod-service/#pod-dns-config)을 확인한다. @@ -465,148 +582,278 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 생성된 리소스에서 스키마 기반 유효성 검사를 활성화한다. - `CustomResourceWebhookConversion`: [커스텀리소스데피니션](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에서 생성된 리소스에 대해 웹 훅 기반의 변환을 활성화한다. - 실행 중인 파드 문제를 해결한다. -- `DisableAcceleratorUsageMetrics`: [kubelet이 수집한 액셀러레이터 지표 비활성화](/ko/docs/concepts/cluster-administration/system-metrics/#액셀러레이터-메트릭-비활성화). -- `DevicePlugins`: 노드에서 [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) - 기반 리소스 프로비저닝을 활성화한다. - `DefaultPodTopologySpread`: `PodTopologySpread` 스케줄링 플러그인을 사용하여 [기본 분배](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/#내부-기본-제약)를 수행한다. -- `DownwardAPIHugePages`: 다운워드 API에서 hugepages 사용을 활성화한다. +- `DevicePlugins`: 노드에서 [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) + 기반 리소스 프로비저닝을 활성화한다. +- `DisableAcceleratorUsageMetrics`: + [kubelet이 수집한 액셀러레이터 지표 비활성화](/ko/docs/concepts/cluster-administration/system-metrics/#액셀러레이터-메트릭-비활성화). +- `DownwardAPIHugePages`: [다운워드 API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information)에서 + hugepages 사용을 활성화한다. - `DryRun`: 서버 측의 [dry run](/docs/reference/using-api/api-concepts/#dry-run) 요청을 요청을 활성화하여 커밋하지 않고 유효성 검사, 병합 및 변화를 테스트할 수 있다. - `DynamicAuditing`(*사용 중단됨*): v1.19 이전의 버전에서 동적 감사를 활성화하는 데 사용된다. -- `DynamicKubeletConfig`: kubelet의 동적 구성을 활성화한다. [kubelet 재구성](/docs/tasks/administer-cluster/reconfigure-kubelet/)을 참고한다. -- `DynamicProvisioningScheduling`: 볼륨 스케줄을 인식하고 PV 프로비저닝을 처리하도록 기본 스케줄러를 확장한다. +- `DynamicKubeletConfig`: kubelet의 동적 구성을 활성화한다. + [kubelet 재구성](/docs/tasks/administer-cluster/reconfigure-kubelet/)을 참고한다. +- `DynamicProvisioningScheduling`: 볼륨 토폴로지를 인식하고 PV 프로비저닝을 처리하도록 + 기본 스케줄러를 확장한다. 이 기능은 v1.12의 `VolumeScheduling` 기능으로 대체되었다. -- `DynamicVolumeProvisioning`(*사용 중단됨*): 파드에 퍼시스턴트 볼륨의 [동적 프로비저닝](/ko/docs/concepts/storage/dynamic-provisioning/)을 활성화한다. -- `EnableAggregatedDiscoveryTimeout` (*사용 중단됨*): 수집된 검색 호출에서 5초 시간 초과를 활성화한다. -- `EnableEquivalenceClassCache`: 스케줄러가 파드를 스케줄링할 때 노드의 동등성을 캐시할 수 있게 한다. -- `EphemeralContainers`: 파드를 실행하기 위한 {{< glossary_tooltip text="임시 컨테이너" - term_id="ephemeral-container" >}}를 추가할 수 있다. -- `EvenPodsSpread`: 토폴로지 도메인 간에 파드를 균등하게 스케줄링할 수 있다. [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)을 참고한다. --`ExecProbeTimeout` : kubelet이 exec 프로브 시간 초과를 준수하는지 확인한다. 이 기능 게이트는 기존 워크로드가 쿠버네티스가 exec 프로브 제한 시간을 무시한 현재 수정된 결함에 의존하는 경우 존재한다. [준비성 프로브](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)를 참조한다. -- `ExpandInUsePersistentVolumes`: 사용 중인 PVC를 확장할 수 있다. [사용 중인 퍼시스턴트볼륨클레임 크기 조정](/ko/docs/concepts/storage/persistent-volumes/#사용-중인-퍼시스턴트볼륨클레임-크기-조정)을 참고한다. -- `ExpandPersistentVolumes`: 퍼시스턴트 볼륨 확장을 활성화한다. [퍼시스턴트 볼륨 클레임 확장](/ko/docs/concepts/storage/persistent-volumes/#퍼시스턴트-볼륨-클레임-확장)을 참고한다. -- `ExperimentalCriticalPodAnnotation`: 특정 파드에 *critical* 로 어노테이션을 달아서 [스케줄링이 보장되도록](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) 한다. +- `DynamicVolumeProvisioning`(*사용 중단됨*): 파드에 퍼시스턴트 볼륨의 + [동적 프로비저닝](/ko/docs/concepts/storage/dynamic-provisioning/)을 활성화한다. +- `EfficientWatchResumption`: 스토리지에서 생성된 북마크(진행 + 알림) 이벤트를 사용자에게 전달할 수 있다. 이것은 감시 작업에만 + 적용된다. +- `EnableAggregatedDiscoveryTimeout` (*사용 중단됨*): 수집된 검색 호출에서 5초 + 시간 초과를 활성화한다. +- `EnableEquivalenceClassCache`: 스케줄러가 파드를 스케줄링할 때 노드의 + 동등성을 캐시할 수 있게 한다. +- `EndpointSlice`: 보다 스케일링 가능하고 확장 가능한 네트워크 엔드포인트에 대한 + 엔드포인트슬라이스(EndpointSlices)를 활성화한다. [엔드포인트슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. +- `EndpointSliceNodeName` : 엔드포인트슬라이스 `nodeName` 필드를 활성화한다. +- `EndpointSliceProxying`: 활성화되면, 리눅스에서 실행되는 + kube-proxy는 엔드포인트 대신 엔드포인트슬라이스를 + 기본 데이터 소스로 사용하여 확장성과 성능을 향상시킨다. + [엔드포인트 슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. +- `EndpointSliceTerminatingCondition`: 엔드포인트슬라이스 `terminating` 및 `serving` + 조건 필드를 활성화한다. +- `EphemeralContainers`: 파드를 실행하기 위한 + {{< glossary_tooltip text="임시 컨테이너" term_id="ephemeral-container" >}}를 + 추가할 수 있다. +- `EvenPodsSpread`: 토폴로지 도메인 간에 파드를 균등하게 스케줄링할 수 있다. + [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)을 참고한다. +- `ExecProbeTimeout` : kubelet이 exec 프로브 시간 초과를 준수하는지 확인한다. + 이 기능 게이트는 기존 워크로드가 쿠버네티스가 exec 프로브 제한 시간을 무시한 + 현재 수정된 결함에 의존하는 경우 존재한다. + [준비성 프로브](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)를 참조한다. +- `ExpandCSIVolumes`: CSI 볼륨 확장을 활성화한다. +- `ExpandInUsePersistentVolumes`: 사용 중인 PVC를 확장할 수 있다. + [사용 중인 퍼시스턴트볼륨클레임 크기 조정](/ko/docs/concepts/storage/persistent-volumes/#사용-중인-퍼시스턴트볼륨클레임-크기-조정)을 참고한다. +- `ExpandPersistentVolumes`: 퍼시스턴트 볼륨 확장을 활성화한다. + [퍼시스턴트 볼륨 클레임 확장](/ko/docs/concepts/storage/persistent-volumes/#퍼시스턴트-볼륨-클레임-확장)을 참고한다. +- `ExperimentalCriticalPodAnnotation`: 특정 파드에 *critical* 로 + 어노테이션을 달아서 [스케줄링이 보장되도록](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) 한다. 이 기능은 v1.13부터 파드 우선 순위 및 선점으로 인해 사용 중단되었다. - `ExperimentalHostUserNamespaceDefaultingGate`: 사용자 네임스페이스를 호스트로 기본 활성화한다. 이것은 다른 호스트 네임스페이스, 호스트 마운트, 권한이 있는 컨테이너 또는 특정 비-네임스페이스(non-namespaced) 기능(예: `MKNODE`, `SYS_MODULE` 등)을 사용하는 컨테이너를 위한 것이다. 도커 데몬에서 사용자 네임스페이스 재 매핑이 활성화된 경우에만 활성화해야 한다. -- `EndpointSlice`: 보다 스케일링 가능하고 확장 가능한 네트워크 엔드포인트에 대한 - 엔드포인트 슬라이스를 활성화한다. [엔드포인트 슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. --`EndpointSliceNodeName` : 엔드포인트슬라이스 `nodeName` 필드를 활성화한다. --`EndpointSliceTerminating` : 엔드포인트슬라이스 `terminating` 및 `serving` 조건 필드를 - 활성화한다. -- `EndpointSliceProxying`: 이 기능 게이트가 활성화되면, 리눅스에서 실행되는 - kube-proxy는 엔드포인트 대신 엔드포인트슬라이스를 - 기본 데이터 소스로 사용하여 확장성과 성능을 향상시킨다. - [엔드포인트 슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. -- `WindowsEndpointSliceProxying`: 이 기능 게이트가 활성화되면, 윈도우에서 실행되는 - kube-proxy는 엔드포인트 대신 엔드포인트슬라이스를 - 기본 데이터 소스로 사용하여 확장성과 성능을 향상시킨다. - [엔드포인트 슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. - `GCERegionalPersistentDisk`: GCE에서 지역 PD 기능을 활성화한다. -- `GenericEphemeralVolume`: 일반 볼륨의 모든 기능을 지원하는 임시, 인라인 볼륨을 활성화한다(타사 스토리지 공급 업체, 스토리지 용량 추적, 스냅샷으로부터 복원 등에서 제공할 수 있음). [임시 볼륨](/docs/concepts/storage/ephemeral-volumes/)을 참고한다. --`GracefulNodeShutdown` : kubelet에서 정상 종료를 지원한다. 시스템 종료 중에 kubelet은 종료 이벤트를 감지하고 노드에서 실행중인 파드를 정상적으로 종료하려고 시도한다. 자세한 내용은 [Graceful Node Shutdown](/ko/docs/concepts/architecture/nodes/#그레이스풀-graceful-노드-셧다운)을 참조한다. -- `HugePages`: 사전 할당된 [huge page](/ko/docs/tasks/manage-hugepages/scheduling-hugepages/)의 할당 및 사용을 활성화한다. -- `HugePageStorageMediumSize`: 사전 할당된 [huge page](/ko/docs/tasks/manage-hugepages/scheduling-hugepages/)의 여러 크기를 지원한다. -- `HyperVContainer`: 윈도우 컨테이너를 위한 [Hyper-V 격리](https://docs.microsoft.com/ko-kr/virtualization/windowscontainers/manage-containers/hyperv-container) 기능을 활성화한다. -- `HPAScaleToZero`: 사용자 정의 또는 외부 메트릭을 사용할 때 `HorizontalPodAutoscaler` 리소스에 대해 `minReplicas` 를 0으로 설정한다. -- `ImmutableEphemeralVolumes`: 안정성과 성능 향상을 위해 개별 시크릿(Secret)과 컨피그맵(ConfigMap)을 변경할 수 없는(immutable) 것으로 표시할 수 있다. -- `KubeletConfigFile`: 구성 파일을 사용하여 지정된 파일에서 kubelet 구성을 로드할 수 있다. - 자세한 내용은 [구성 파일을 통해 kubelet 파라미터 설정](/docs/tasks/administer-cluster/kubelet-config-file/)을 참고한다. +- `GenericEphemeralVolume`: 일반 볼륨의 모든 기능을 지원하는 임시, 인라인 + 볼륨을 활성화한다(타사 스토리지 공급 업체, 스토리지 용량 추적, 스냅샷으로부터 복원 + 등에서 제공할 수 있음). + [임시 볼륨](/docs/concepts/storage/ephemeral-volumes/)을 참고한다. +- `GracefulNodeShutdown` : kubelet에서 정상 종료를 지원한다. + 시스템 종료 중에 kubelet은 종료 이벤트를 감지하고 노드에서 실행 중인 + 파드를 정상적으로 종료하려고 시도한다. 자세한 내용은 + [Graceful Node Shutdown](/ko/docs/concepts/architecture/nodes/#그레이스풀-graceful-노드-셧다운)을 + 참조한다. +- `HPAContainerMetrics`: `HorizontalPodAutoscaler`를 활성화하여 대상 파드의 + 개별 컨테이너 메트릭을 기반으로 확장한다. +- `HPAScaleToZero`: 사용자 정의 또는 외부 메트릭을 사용할 때 `HorizontalPodAutoscaler` 리소스에 대해 + `minReplicas` 를 0으로 설정한다. +- `HugePages`: 사전 할당된 [huge page](/ko/docs/tasks/manage-hugepages/scheduling-hugepages/)의 + 할당 및 사용을 활성화한다. +- `HugePageStorageMediumSize`: 사전 할당된 [huge page](/ko/docs/tasks/manage-hugepages/scheduling-hugepages/)의 + 여러 크기를 지원한다. +- `HyperVContainer`: 윈도우 컨테이너를 위한 + [Hyper-V 격리](https://docs.microsoft.com/ko-kr/virtualization/windowscontainers/manage-containers/hyperv-container) + 기능을 활성화한다. +- `ImmutableEphemeralVolumes`: 안정성과 성능 향상을 위해 개별 시크릿(Secret)과 컨피그맵(ConfigMap)을 + 변경할 수 없는(immutable) 것으로 표시할 수 있다. +- `IndexedJob`: [잡](/ko/docs/concepts/workloads/controllers/job/) 컨트롤러가 + 완료 횟수를 기반으로 파드 완료를 관리할 수 있도록 한다. +- `IngressClassNamespacedParams`: `IngressClass` 리소스가 네임스페이스 범위로 + 한정된 파라미터를 이용할 수 있도록 한다. 이 기능은 `IngressClass.spec.parameters` 에 + `Scope` 와 `Namespace` 2개의 필드를 추가한다. +- `IPv6DualStack`: IPv6을 위한 [이중 스택](/ko/docs/concepts/services-networking/dual-stack/) + 기능을 활성화한다. +- `KubeletConfigFile`: 구성 파일을 사용하여 지정된 파일에서 + kubelet 구성을 로드할 수 있다. + 자세한 내용은 [구성 파일을 통해 kubelet 파라미터 설정](/docs/tasks/administer-cluster/kubelet-config-file/)을 + 참고한다. - `KubeletCredentialProviders`: 이미지 풀 자격 증명에 대해 kubelet exec 자격 증명 공급자를 활성화한다. - `KubeletPluginsWatcher`: kubelet이 [CSI 볼륨 드라이버](/ko/docs/concepts/storage/volumes/#csi)와 같은 플러그인을 검색할 수 있도록 프로브 기반 플러그인 감시자(watcher) 유틸리티를 사용한다. -- `KubeletPodResources`: kubelet의 파드 리소스 grpc 엔드포인트를 활성화한다. - 자세한 내용은 [장치 모니터링 지원](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/compute-device-assignment.md)을 참고한다. -- `LegacyNodeRoleBehavior`: 비활성화되면, 서비스 로드 밸런서 및 노드 중단의 레거시 동작은 `NodeDisruptionExclusion` 과 `ServiceNodeExclusion` 에 의해 제공된 기능별 레이블을 대신하여 `node-role.kubernetes.io/master` 레이블을 무시한다. -- `LocalStorageCapacityIsolation`: [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)와 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir)의 `sizeLimit` 속성을 사용할 수 있게 한다. -- `LocalStorageCapacityIsolationFSQuotaMonitoring`: [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)에 `LocalStorageCapacityIsolation` 이 활성화되고 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir)의 백업 파일시스템이 프로젝트 쿼터를 지원하고 활성화된 경우, 파일시스템 사용보다는 프로젝트 쿼터를 사용하여 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir) 스토리지 사용을 모니터링하여 성능과 정확성을 향상시킨다. -- `MixedProtocolLBService`: 동일한 로드밸런서 유형 서비스 인스턴스에서 다른 프로토콜 사용을 활성화한다. -- `MountContainers`: 호스트의 유틸리티 컨테이너를 볼륨 마운터로 사용할 수 있다. +- `KubeletPodResources`: kubelet의 파드 리소스 gPRC 엔드포인트를 활성화한다. 자세한 내용은 + [장치 모니터링 지원](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/606-compute-device-assignment/README.md)을 + 참고한다. +- `KubeletPodResourcesGetAllocatable`: kubelet의 파드 리소스 `GetAllocatableResources` 기능을 활성화한다. + 이 API는 클라이언트가 노드의 여유 컴퓨팅 자원을 잘 파악할 수 있도록, 할당 가능 자원에 대한 정보를 + [자원 할당 보고](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#장치-플러그인-리소스-모니터링)한다. +- `LegacyNodeRoleBehavior`: 비활성화되면, 서비스 로드 밸런서 및 노드 중단의 레거시 동작은 + `NodeDisruptionExclusion` 과 `ServiceNodeExclusion` 에 의해 제공된 기능별 레이블을 대신하여 + `node-role.kubernetes.io/master` 레이블을 무시한다. +- `LocalStorageCapacityIsolation`: + [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)와 + [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir)의 + `sizeLimit` 속성을 사용할 수 있게 한다. +- `LocalStorageCapacityIsolationFSQuotaMonitoring`: [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)에 + `LocalStorageCapacityIsolation` 이 활성화되고 + [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir)의 + 백업 파일시스템이 프로젝트 쿼터를 지원하고 활성화된 경우, 파일시스템 사용보다는 + 프로젝트 쿼터를 사용하여 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir) + 스토리지 사용을 모니터링하여 성능과 정확성을 + 향상시킨다. +- `LogarithmicScaleDown`: 컨트롤러 스케일 다운 시에 파드 타임스탬프를 로그 스케일로 버켓화하여 + 축출할 파드를 반-랜덤하게 선택하는 기법을 활성화한다. +- `MixedProtocolLBService`: 동일한 로드밸런서 유형 서비스 인스턴스에서 다른 프로토콜 + 사용을 활성화한다. +- `MountContainers` (*사용 중단됨*): 호스트의 유틸리티 컨테이너를 볼륨 마운터로 + 사용할 수 있다. - `MountPropagation`: 한 컨테이너에서 다른 컨테이너 또는 파드로 마운트된 볼륨을 공유할 수 있다. 자세한 내용은 [마운트 전파(propagation)](/ko/docs/concepts/storage/volumes/#마운트-전파-propagation)을 참고한다. -- `NodeDisruptionExclusion`: 영역(zone) 장애 시 노드가 제외되지 않도록 노드 레이블 `node.kubernetes.io/exclude-disruption` 사용을 활성화한다. +- `NamespaceDefaultLabelName`: API 서버로 하여금 모든 네임스페이스에 대해 변경할 수 없는 (immutable) + {{< glossary_tooltip text="레이블" term_id="label" >}} `kubernetes.io/metadata.name`을 설정하도록 한다. (네임스페이스의 이름도 변경 불가) +- `NetworkPolicyEndPort`: 네트워크폴리시(NetworkPolicy) 오브젝트에서 단일 포트를 지정하는 것 대신에 포트 범위를 지정할 수 있도록, `endPort` 필드의 사용을 활성화한다. +- `NodeDisruptionExclusion`: 영역(zone) 장애 시 노드가 제외되지 않도록 노드 레이블 `node.kubernetes.io/exclude-disruption` + 사용을 활성화한다. - `NodeLease`: 새로운 리스(Lease) API가 노드 상태 신호로 사용될 수 있는 노드 하트비트(heartbeats)를 보고할 수 있게 한다. -- `NonPreemptingPriority`: 프라이어리티클래스(PriorityClass)와 파드에 NonPreempting 옵션을 활성화한다. +- `NonPreemptingPriority`: 프라이어리티클래스(PriorityClass)와 파드에 `preemptionPolicy` 필드를 활성화한다. +- `PVCProtection`: 파드에서 사용 중일 때 퍼시스턴트볼륨클레임(PVC)이 + 삭제되지 않도록 한다. +- `PodDeletionCost`: 레플리카셋 다운스케일 시 삭제될 파드의 우선순위를 사용자가 조절할 수 있도록, + [파드 삭제 비용](/ko/docs/concepts/workloads/controllers/replicaset/#파드-삭제-비용) 기능을 활성화한다. - `PersistentLocalVolumes`: 파드에서 `local` 볼륨 유형의 사용을 활성화한다. `local` 볼륨을 요청하는 경우 파드 어피니티를 지정해야 한다. - `PodDisruptionBudget`: [PodDisruptionBudget](/docs/tasks/run-application/configure-pdb/) 기능을 활성화한다. -- `PodOverhead`: 파드 오버헤드를 판단하기 위해 [파드오버헤드(PodOverhead)](/ko/docs/concepts/scheduling-eviction/pod-overhead/) 기능을 활성화한다. -- `PodPriority`: [우선 순위](/ko/docs/concepts/configuration/pod-priority-preemption/)를 기반으로 파드의 스케줄링 취소와 선점을 활성화한다. +- `PodAffinityNamespaceSelector`: [파드 어피니티 네임스페이스 셀렉터](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#네임스페이스-셀렉터) 기능과 + [CrossNamespacePodAffinity](/ko/docs/concepts/policy/resource-quotas/#네임스페이스-간-파드-어피니티-쿼터) 쿼터 범위 기능을 활성화한다. +- `PodOverhead`: 파드 오버헤드를 판단하기 위해 [파드오버헤드(PodOverhead)](/ko/docs/concepts/scheduling-eviction/pod-overhead/) + 기능을 활성화한다. +- `PodPriority`: [우선 순위](/ko/docs/concepts/configuration/pod-priority-preemption/)를 + 기반으로 파드의 스케줄링 취소와 선점을 활성화한다. - `PodReadinessGates`: 파드 준비성 평가를 확장하기 위해 `PodReadinessGate` 필드 설정을 활성화한다. 자세한 내용은 [파드의 준비성 게이트](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate)를 참고한다. - `PodShareProcessNamespace`: 파드에서 실행되는 컨테이너 간에 단일 프로세스 네임스페이스를 공유하기 위해 파드에서 `shareProcessNamespace` 설정을 활성화한다. 자세한 내용은 [파드의 컨테이너 간 프로세스 네임스페이스 공유](/docs/tasks/configure-pod-container/share-process-namespace/)에서 확인할 수 있다. -- `ProcMountType`: 컨테이너의 ProcMountType 제어를 활성화한다. -- `PVCProtection`: 파드에서 사용 중일 때 퍼시스턴트볼륨클레임(PVC)이 - 삭제되지 않도록 한다. -- `QOSReserved`: QoS 수준에서 리소스 예약을 허용하여 낮은 QoS 수준의 파드가 더 높은 QoS 수준에서 - 요청된 리소스로 파열되는 것을 방지한다(현재 메모리만 해당). +- `ProbeTerminationGracePeriod`: 파드의 [프로브-수준 + `terminationGracePeriodSeconds` 설정하기](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds) 기능을 활성화한다. + 더 자세한 사항은 [기능개선 제안](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period)을 참고한다. +- `ProcMountType`: SecurityContext의 `procMount` 필드를 설정하여 + 컨테이너의 proc 타입의 마운트를 제어할 수 있다. +- `QOSReserved`: QoS 수준에서 리소스 예약을 허용하여 낮은 QoS 수준의 파드가 + 더 높은 QoS 수준에서 요청된 리소스로 파열되는 것을 방지한다 + (현재 메모리만 해당). +- `RemainingItemCount`: API 서버가 + [청크(chunking) 목록 요청](/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks)에 대한 + 응답에서 남은 항목 수를 표시하도록 허용한다. +- `RemoveSelfLink`: ObjectMeta 및 ListMeta에서 `selfLink` 를 사용하지 않고 + 제거한다. - `ResourceLimitsPriorityFunction` (*사용 중단됨*): 입력 파드의 CPU 및 메모리 한도 중 하나 이상을 만족하는 노드에 가능한 최저 점수 1을 할당하는 스케줄러 우선 순위 기능을 활성화한다. 의도는 동일한 점수를 가진 노드 사이의 관계를 끊는 것이다. - `ResourceQuotaScopeSelectors`: 리소스 쿼터 범위 셀렉터를 활성화한다. -- `RootCAConfigMap`: 모든 네임 스페이스에 `kube-root-ca.crt`라는 {{< glossary_tooltip text="컨피그맵" term_id="configmap" >}}을 게시하도록 kube-controller-manager를 구성한다. 이 컨피그맵에는 kube-apiserver에 대한 연결을 확인하는 데 사용되는 CA 번들이 포함되어 있다. - 자세한 내용은 [바운드 서비스 계정 토큰](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md)을 참조한다. +- `RootCAConfigMap`: 모든 네임스페이스에 `kube-root-ca.crt`라는 + {{< glossary_tooltip text="컨피그맵" term_id="configmap" >}}을 게시하도록 + `kube-controller-manager` 를 구성한다. 이 컨피그맵에는 kube-apiserver에 대한 연결을 확인하는 데 + 사용되는 CA 번들이 포함되어 있다. 자세한 내용은 + [바운드 서비스 계정 토큰](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md)을 + 참조한다. - `RotateKubeletClientCertificate`: kubelet에서 클라이언트 TLS 인증서의 로테이션을 활성화한다. 자세한 내용은 [kubelet 구성](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration)을 참고한다. - `RotateKubeletServerCertificate`: kubelet에서 서버 TLS 인증서의 로테이션을 활성화한다. - 자세한 내용은 [kubelet 구성](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration)을 참고한다. -- `RunAsGroup`: 컨테이너의 init 프로세스에 설정된 기본 그룹 ID 제어를 활성화한다. -- `RuntimeClass`: 컨테이너 런타임 구성을 선택하기 위해 [런타임클래스(RuntimeClass)](/ko/docs/concepts/containers/runtime-class/) 기능을 활성화한다. -- `ScheduleDaemonSetPods`: 데몬셋(DaemonSet) 컨트롤러 대신 기본 스케줄러로 데몬셋 파드를 스케줄링할 수 있다. -- `SCTPSupport`: 파드, 서비스, 엔드포인트, 엔드포인트슬라이스 및 네트워크폴리시 정의에서 _SCTP_ `protocol` 값을 활성화한다. -- `ServerSideApply`: API 서버에서 [SSA(Sever Side Apply)](/docs/reference/using-api/server-side-apply/) 경로를 활성화한다. -- `ServiceAccountIssuerDiscovery`: API 서버에서 서비스 어카운트 발행자에 대해 OIDC 디스커버리 엔드포인트(발급자 및 JWKS URL)를 활성화한다. 자세한 내용은 [파드의 서비스 어카운트 구성](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery)을 참고한다. +- `RunAsGroup`: 컨테이너의 init 프로세스에 설정된 기본 그룹 ID 제어를 + 활성화한다. +- `RuntimeClass`: 컨테이너 런타임 구성을 선택하기 위해 [런타임클래스(RuntimeClass)](/ko/docs/concepts/containers/runtime-class/) + 기능을 활성화한다. +- `ScheduleDaemonSetPods`: 데몬셋(DaemonSet) 컨트롤러 대신 기본 스케줄러로 데몬셋 파드를 + 스케줄링할 수 있다. +- `SCTPSupport`: 파드, 서비스, 엔드포인트, 엔드포인트슬라이스 및 네트워크폴리시 정의에서 + _SCTP_ `protocol` 값을 활성화한다. +- `ServerSideApply`: API 서버에서 [SSA(Sever Side Apply)](/docs/reference/using-api/server-side-apply/) + 경로를 활성화한다. +- `ServiceAccountIssuerDiscovery`: API 서버에서 서비스 어카운트 발행자에 대해 OIDC 디스커버리 엔드포인트(발급자 및 + JWKS URL)를 활성화한다. 자세한 내용은 + [파드의 서비스 어카운트 구성](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery)을 + 참고한다. - `ServiceAppProtocol`: 서비스와 엔드포인트에서 `AppProtocol` 필드를 활성화한다. -- `ServiceLBNodePortControl`: 서비스에서`spec.allocateLoadBalancerNodePorts` 필드를 활성화한다. +- `ServiceInternalTrafficPolicy`: 서비스에서 `InternalTrafficPolicy` 필드를 활성화한다. +- `ServiceLBNodePortControl`: 서비스에서`spec.allocateLoadBalancerNodePorts` 필드를 + 활성화한다. +- `ServiceLoadBalancerClass`: 서비스에서 `LoadBalancerClass` 필드를 활성화한다. 자세한 내용은 [로드밸런서 구현체의 종류 확인하기](/ko/docs/concepts/services-networking/service/#load-balancer-class)를 참고한다. - `ServiceLoadBalancerFinalizer`: 서비스 로드 밸런서에 대한 Finalizer 보호를 활성화한다. -- `ServiceNodeExclusion`: 클라우드 제공자가 생성한 로드 밸런서에서 노드를 제외할 수 있다. - "`alpha.service-controller.kubernetes.io/exclude-balancer`" 키 또는 `node.kubernetes.io/exclude-from-external-load-balancers` 로 레이블이 지정된 경우 노드를 제외할 수 있다. -- `ServiceTopology`: 서비스가 클러스터의 노드 토폴로지를 기반으로 트래픽을 라우팅할 수 있도록 한다. 자세한 내용은 [서비스토폴로지(ServiceTopology)](/ko/docs/concepts/services-networking/service-topology/)를 참고한다. -- `SizeMemoryBackedVolumes`: kubelet 지원을 사용하여 메모리 백업 볼륨의 크기를 조정한다. 자세한 내용은 [volumes](/ko/docs/concepts/storage/volumes)를 참조한다. -- `SetHostnameAsFQDN`: 전체 주소 도메인 이름(FQDN)을 파드의 호스트 이름으로 설정하는 기능을 활성화한다. [파드의 `setHostnameAsFQDN` 필드](/ko/docs/concepts/services-networking/dns-pod-service/#파드의-sethostnameasfqdn-필드)를 참고한다. -- `StartupProbe`: kubelet에서 [스타트업](/ko/docs/concepts/workloads/pods/pod-lifecycle/#언제-스타트업-프로브를-사용해야-하는가) 프로브를 활성화한다. +- `ServiceNodeExclusion`: 클라우드 제공자가 생성한 로드 밸런서에서 노드를 + 제외할 수 있다. "`node.kubernetes.io/exclude-from-external-load-balancers`"로 + 레이블이 지정된 경우 노드를 제외할 수 있다. +- `ServiceTopology`: 서비스가 클러스터의 노드 토폴로지를 기반으로 트래픽을 라우팅할 수 + 있도록 한다. 자세한 내용은 + [서비스토폴로지(ServiceTopology)](/ko/docs/concepts/services-networking/service-topology/)를 + 참고한다. +- `SetHostnameAsFQDN`: 전체 주소 도메인 이름(FQDN)을 파드의 호스트 이름으로 + 설정하는 기능을 활성화한다. + [파드의 `setHostnameAsFQDN` 필드](/ko/docs/concepts/services-networking/dns-pod-service/#파드의-sethostnameasfqdn-필드)를 참고한다. +- `StartupProbe`: kubelet에서 + [스타트업](/ko/docs/concepts/workloads/pods/pod-lifecycle/#언제-스타트업-프로브를-사용해야-하는가) + 프로브를 활성화한다. - `StorageObjectInUseProtection`: 퍼시스턴트볼륨 또는 퍼시스턴트볼륨클레임 오브젝트가 여전히 사용 중인 경우 삭제를 연기한다. -- `StorageVersionHash`: API 서버가 디스커버리에서 스토리지 버전 해시를 노출하도록 허용한다. +- `StorageVersionAPI`: [스토리지 버전 API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#storageversion-v1alpha1-internal-apiserver-k8s-io)를 + 활성화한다. +- `StorageVersionHash`: API 서버가 디스커버리에서 스토리지 버전 해시를 노출하도록 + 허용한다. - `StreamingProxyRedirects`: 스트리밍 요청을 위해 백엔드(kubelet)에서 리디렉션을 가로채서 따르도록 API 서버에 지시한다. 스트리밍 요청의 예로는 `exec`, `attach` 및 `port-forward` 요청이 있다. - `SupportIPVSProxyMode`: IPVS를 사용하여 클러스터 내 서비스 로드 밸런싱을 제공한다. 자세한 내용은 [서비스 프록시](/ko/docs/concepts/services-networking/service/#가상-ip와-서비스-프록시)를 참고한다. - `SupportPodPidsLimit`: 파드의 PID 제한을 지원한다. -- `SupportNodePidsLimit`: 노드에서 PID 제한 지원을 활성화한다. `--system-reserved` 및 `--kube-reserved` 옵션의 `pid=` 매개 변수를 지정하여 지정된 수의 프로세스 ID가 시스템 전체와 각각 쿠버네티스 시스템 데몬에 대해 예약되도록 할 수 있다. -- `Sysctls`: 각 파드에 설정할 수 있는 네임스페이스 커널 파라미터(sysctl)를 지원한다. - 자세한 내용은 [sysctl](/docs/tasks/administer-cluster/sysctl-cluster/)을 참고한다. -- `TaintBasedEvictions`: 노드의 테인트(taint) 및 파드의 톨러레이션(toleration)을 기반으로 노드에서 파드를 축출할 수 있다. - 자세한 내용은 [테인트와 톨러레이션](/ko/docs/concepts/scheduling-eviction/taint-and-toleration/)을 참고한다. -- `TaintNodesByCondition`: [노드 컨디션](/ko/docs/concepts/architecture/nodes/#condition)을 기반으로 자동 테인트 노드를 활성화한다. +- `SupportNodePidsLimit`: 노드에서 PID 제한 지원을 활성화한다. + `--system-reserved` 및 `--kube-reserved` 옵션의 `pid=` + 파라미터를 지정하여 지정된 수의 프로세스 ID가 + 시스템 전체와 각각 쿠버네티스 시스템 데몬에 대해 예약되도록 + 할 수 있다. +- `SuspendJob`: 잡 중지/재시작 기능을 활성화한다. + 자세한 내용은 [잡 문서](/ko/docs/concepts/workloads/controllers/job/)를 + 참고한다. +- `Sysctls`: 각 파드에 설정할 수 있는 네임스페이스 커널 + 파라미터(sysctl)를 지원한다. 자세한 내용은 + [sysctl](/docs/tasks/administer-cluster/sysctl-cluster/)을 참고한다. +- `TTLAfterFinished`: [TTL 컨트롤러](/ko/docs/concepts/workloads/controllers/ttlafterfinished/)가 + 실행이 끝난 후 리소스를 정리하도록 + 허용한다. +- `TaintBasedEvictions`: 노드의 테인트(taint) 및 파드의 톨러레이션(toleration)을 기반으로 + 노드에서 파드를 축출할 수 있다. + 자세한 내용은 [테인트와 톨러레이션](/ko/docs/concepts/scheduling-eviction/taint-and-toleration/)을 + 참고한다. +- `TaintNodesByCondition`: [노드 컨디션](/ko/docs/concepts/architecture/nodes/#condition)을 + 기반으로 자동 테인트 노드를 활성화한다. - `TokenRequest`: 서비스 어카운트 리소스에서 `TokenRequest` 엔드포인트를 활성화한다. -- `TokenRequestProjection`: [`projected` 볼륨](/ko/docs/concepts/storage/volumes/#projected)을 통해 서비스 어카운트 - 토큰을 파드에 주입할 수 있다. -- `TopologyManager`: 쿠버네티스의 다른 컴포넌트에 대한 세분화된 하드웨어 리소스 할당을 조정하는 메커니즘을 활성화한다. [노드의 토폴로지 관리 정책 제어](/docs/tasks/administer-cluster/topology-manager/)를 참고한다. -- `TTLAfterFinished`: [TTL 컨트롤러](/ko/docs/concepts/workloads/controllers/ttlafterfinished/)가 실행이 끝난 후 리소스를 정리하도록 허용한다. +- `TokenRequestProjection`: [`projected` 볼륨](/ko/docs/concepts/storage/volumes/#projected)을 통해 + 서비스 어카운트 토큰을 파드에 주입할 수 있다. +- `TopologyAwareHints`: 엔드포인트슬라이스(EndpointSlices)에서 토폴로지 힌트 기반 + 토폴로지-어웨어 라우팅을 활성화한다. 자세한 내용은 + [토폴로지 어웨어 힌트](/docs/concepts/services-networking/topology-aware-hints/) + 를 참고한다. +- `TopologyManager`: 쿠버네티스의 다른 컴포넌트에 대한 세분화된 하드웨어 리소스 + 할당을 조정하는 메커니즘을 활성화한다. + [노드의 토폴로지 관리 정책 제어](/docs/tasks/administer-cluster/topology-manager/)를 참고한다. +- `VolumeCapacityPriority`: 가용 PV 용량을 기반으로 + 여러 토폴로지에 있는 노드들의 우선순위를 정하는 기능을 활성화한다. - `VolumePVCDataSource`: 기존 PVC를 데이터 소스로 지정하는 기능을 지원한다. - `VolumeScheduling`: 볼륨 토폴로지 인식 스케줄링을 활성화하고 퍼시스턴트볼륨클레임(PVC) 바인딩이 스케줄링 결정을 인식하도록 한다. 또한 `PersistentLocalVolumes` 기능 게이트와 함께 사용될 때 [`local`](/ko/docs/concepts/storage/volumes/#local) 볼륨 유형을 사용할 수 있다. - `VolumeSnapshotDataSource`: 볼륨 스냅샷 데이터 소스 지원을 활성화한다. -- `VolumeSubpathEnvExpansion`: 환경 변수를 `subPath`로 확장하기 위해 `subPathExpr` 필드를 활성화한다. +- `VolumeSubpathEnvExpansion`: 환경 변수를 `subPath`로 확장하기 위해 + `subPathExpr` 필드를 활성화한다. +- `WarningHeaders`: API 응답에서 경고 헤더를 보낼 수 있다. - `WatchBookmark`: 감시자 북마크(watch bookmark) 이벤트 지원을 활성화한다. -- `WindowsGMSA`: 파드에서 컨테이너 런타임으로 GMSA 자격 증명 스펙을 전달할 수 있다. -- `WindowsRunAsUserName` : 기본 사용자가 아닌(non-default) 사용자로 윈도우 컨테이너에서 애플리케이션을 실행할 수 있도록 지원한다. - 자세한 내용은 [RunAsUserName 구성](/docs/tasks/configure-pod-container/configure-runasusername)을 참고한다. - `WinDSR`: kube-proxy가 윈도우용 DSR 로드 밸런서를 생성할 수 있다. - `WinOverlay`: kube-proxy가 윈도우용 오버레이 모드에서 실행될 수 있도록 한다. +- `WindowsGMSA`: 파드에서 컨테이너 런타임으로 GMSA 자격 증명 스펙을 전달할 수 있다. +- `WindowsRunAsUserName` : 기본 사용자가 아닌(non-default) 사용자로 윈도우 컨테이너에서 + 애플리케이션을 실행할 수 있도록 지원한다. 자세한 내용은 + [RunAsUserName 구성](/docs/tasks/configure-pod-container/configure-runasusername)을 + 참고한다. +- `WindowsEndpointSliceProxying`: 활성화되면, 윈도우에서 실행되는 kube-proxy는 + 엔드포인트 대신 엔드포인트슬라이스를 기본 데이터 소스로 사용하여 + 확장성과 성능을 향상시킨다. + [엔드포인트 슬라이스 활성화하기](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md b/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md index 565c3b4faf486..e4c790c491097 100644 --- a/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md @@ -2,8 +2,21 @@ title: kube-proxy content_type: tool-reference weight: 30 +auto_generated: true --- + + + ## {{% heading "synopsis" %}} @@ -32,308 +45,308 @@ kube-proxy [flags] --azure-container-registry-config string -Azure 컨테이너 레지스트리 구성 정보가 들어 있는 파일의 경로. +

    Azure 컨테이너 레지스트리 구성 정보가 들어 있는 파일의 경로.

    ---bind-address ip     기본값: 0.0.0.0 +--bind-address string     기본값: 0.0.0.0 -프록시 서버가 서비스할 IP 주소(모든 IPv4 인터페이스의 경우 '0.0.0.0'으로 설정, 모든 IPv6 인터페이스의 경우 '::'로 설정) +

    프록시 서버가 서비스할 IP 주소(모든 IPv4 인터페이스의 경우 '0.0.0.0'으로 설정, 모든 IPv6 인터페이스의 경우 '::'로 설정)

    --bind-address-hard-fail -true인 경우 kube-proxy는 포트 바인딩 실패를 치명적인 것으로 간주하고 종료한다. +

    true인 경우 kube-proxy는 포트 바인딩 실패를 치명적인 것으로 간주하고 종료한다.

    --cleanup -true인 경우 iptables 및 ipvs 규칙을 제거하고 종료한다. +

    true인 경우 iptables 및 ipvs 규칙을 제거하고 종료한다.

    --cluster-cidr string -클러스터에 있는 파드의 CIDR 범위. 구성 후에는 이 범위 밖에서 서비스 클러스터 IP로 전송되는 트래픽은 마스커레이드되고 파드에서 외부 LoadBalancer IP로 전송된 트래픽은 대신 해당 클러스터 IP로 전송된다 +

    클러스터에 있는 파드의 CIDR 범위. 구성 후에는 이 범위 밖에서 서비스 클러스터 IP로 전송되는 트래픽은 마스커레이드되고 파드에서 외부 LoadBalancer IP로 전송된 트래픽은 대신 해당 클러스터 IP로 전송된다

    --config string -설정 파일의 경로. +

    설정 파일의 경로.

    --config-sync-period duration     기본값: 15m0s -apiserver의 설정이 갱신되는 빈도. 0보다 커야 한다. +

    apiserver의 설정이 갱신되는 빈도. 0보다 커야 한다.

    --conntrack-max-per-core int32     기본값: 32768 -CPU 코어당 추적할 최대 NAT 연결 수(한도(limit)를 그대로 두고 contrack-min을 무시하려면 0으로 설정한다)( +

    CPU 코어당 추적할 최대 NAT 연결 수(한도(limit)를 그대로 두고 contrack-min을 무시하려면 0으로 설정한다)(

    --conntrack-min int32     기본값: 131072 -conntrack-max-per-core와 관계없이 할당할 최소 conntrack 항목 수(한도를 그대로 두려면 conntrack-max-per-core값을 0으로 설정). +

    conntrack-max-per-core와 관계없이 할당할 최소 conntrack 항목 수(한도를 그대로 두려면 conntrack-max-per-core값을 0으로 설정).

    --conntrack-tcp-timeout-close-wait duration     기본값: 1h0m0s -CLOSE_WAIT 상태의 TCP 연결에 대한 NAT 시간 초과 +

    CLOSE_WAIT 상태의 TCP 연결에 대한 NAT 시간 초과

    --conntrack-tcp-timeout-established duration     기본값: 24h0m0s -설정된 TCP 연결에 대한 유휴시간 초과(값이 0이면 그대로 유지) +

    설정된 TCP 연결에 대한 유휴시간 초과(값이 0이면 그대로 유지)

    --detect-local-mode LocalMode -로컬 트래픽을 탐지하는 데 사용할 모드 +

    로컬 트래픽을 탐지하는 데 사용할 모드

    --feature-gates mapStringBool -알파/실험 기능에 대한 기능 게이트를 설명하는 키=값 쌍 집합. 옵션은 다음과 같다.
    APIListChunking=true|false (BETA - 기본값=true)
    APIPriorityAndFairness=true|false (ALPHA - 기본값=false)
    APIResponseCompression=true|false (BETA - 기본값=true)
    AllAlpha=true|false (ALPHA - 기본값=false)
    AllBeta=true|false (BETA - 기본값=false)
    AllowInsecureBackendProxy=true|false (BETA - 기본값=true)
    AnyVolumeDataSource=true|false (ALPHA - 기본값=false)
    AppArmor=true|false (BETA - 기본값=true)
    BalanceAttachedNodeVolumes=true|false (ALPHA - 기본값=false)
    BoundServiceAccountTokenVolume=true|false (ALPHA - 기본값=false)
    CPUManager=true|false (BETA - 기본값=true)
    CRIContainerLogRotation=true|false (BETA - 기본값=true)
    CSIInlineVolume=true|false (BETA - 기본값=true)
    CSIMigration=true|false (BETA - 기본값=true)
    CSIMigrationAWS=true|false (BETA - 기본값=false)
    CSIMigrationAWSComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationAzureDisk=true|false (BETA - 기본값=false)
    CSIMigrationAzureDiskComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationAzureFile=true|false (ALPHA - 기본값=false)
    CSIMigrationAzureFileComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationGCE=true|false (BETA - 기본값=false)
    CSIMigrationGCEComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationOpenStack=true|false (BETA - 기본값=false)
    CSIMigrationOpenStackComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationvSphere=true|false (BETA - 기본값=false)
    CSIMigrationvSphereComplete=true|false (BETA - 기본값=false)
    CSIStorageCapacity=true|false (ALPHA - 기본값=false)
    CSIVolumeFSGroupPolicy=true|false (ALPHA - 기본값=false)
    ConfigurableFSGroupPolicy=true|false (ALPHA - 기본값=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - 기본값=false)
    DefaultPodTopologySpread=true|false (ALPHA - 기본값=false)
    DevicePlugins=true|false (BETA - 기본값=true)
    DisableAcceleratorUsageMetrics=true|false (ALPHA - 기본값=false)
    DynamicKubeletConfig=true|false (BETA - 기본값=true)
    EndpointSlice=true|false (BETA - 기본값=true)
    EndpointSliceProxying=true|false (BETA - 기본값=true)
    EphemeralContainers=true|false (ALPHA - 기본값=false)
    ExpandCSIVolumes=true|false (BETA - 기본값=true)
    ExpandInUsePersistentVolumes=true|false (BETA - 기본값=true)
    ExpandPersistentVolumes=true|false (BETA - 기본값=true)
    ExperimentalHostUserNamespaceDefaulting=true|false (BETA - 기본값=false)
    GenericEphemeralVolume=true|false (ALPHA - 기본값=false)
    HPAScaleToZero=true|false (ALPHA - 기본값=false)
    HugePageStorageMediumSize=true|false (BETA - 기본값=true)
    HyperVContainer=true|false (ALPHA - 기본값=false)
    IPv6DualStack=true|false (ALPHA - 기본값=false)
    ImmutableEphemeralVolumes=true|false (BETA - 기본값=true)
    KubeletPodResources=true|false (BETA - 기본값=true)
    LegacyNodeRoleBehavior=true|false (BETA - 기본값=true)
    LocalStorageCapacityIsolation=true|false (BETA - 기본값=true)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - 기본값=false)
    NodeDisruptionExclusion=true|false (BETA - 기본값=true)
    NonPreemptingPriority=true|false (BETA - 기본값=true)
    PodDisruptionBudget=true|false (BETA - 기본값=true)
    PodOverhead=true|false (BETA - 기본값=true)
    ProcMountType=true|false (ALPHA - 기본값=false)
    QOSReserved=true|false (ALPHA - 기본값=false)
    RemainingItemCount=true|false (BETA - 기본값=true)
    RemoveSelfLink=true|false (ALPHA - 기본값=false)
    RotateKubeletServerCertificate=true|false (BETA - 기본값=true)
    RunAsGroup=true|false (BETA - 기본값=true)
    RuntimeClass=true|false (BETA - 기본값=true)
    SCTPSupport=true|false (BETA - 기본값=true)
    SelectorIndex=true|false (BETA - 기본값=true)
    ServerSideApply=true|false (BETA - 기본값=true)
    ServiceAccountIssuerDiscovery=true|false (ALPHA - 기본값=false)
    ServiceAppProtocol=true|false (BETA - 기본값=true)
    ServiceNodeExclusion=true|false (BETA - 기본값=true)
    ServiceTopology=true|false (ALPHA - 기본값=false)
    SetHostnameAsFQDN=true|false (ALPHA - 기본값=false)
    StartupProbe=true|false (BETA - 기본값=true)
    StorageVersionHash=true|false (BETA - 기본값=true)
    SupportNodePidsLimit=true|false (BETA - 기본값=true)
    SupportPodPidsLimit=true|false (BETA - 기본값=true)
    Sysctls=true|false (BETA - 기본값=true)
    TTLAfterFinished=true|false (ALPHA - 기본값=false)
    TokenRequest=true|false (BETA - 기본값=true)
    TokenRequestProjection=true|false (BETA - 기본값=true)
    TopologyManager=true|false (BETA - 기본값=true)
    ValidateProxyRedirects=true|false (BETA - 기본값=true)
    VolumeSnapshotDataSource=true|false (BETA - 기본값=true)
    WarningHeaders=true|false (BETA - 기본값=true)
    WinDSR=true|false (ALPHA - 기본값=false)
    WinOverlay=true|false (ALPHA - 기본값=false)
    WindowsEndpointSliceProxying=true|false (ALPHA - 기본값=false) +

    알파/실험 기능에 대한 기능 게이트를 설명하는 키=값 쌍 집합. 옵션은 다음과 같다.
    APIListChunking=true|false (BETA - 기본값=true)
    APIPriorityAndFairness=true|false (ALPHA - 기본값=false)
    APIResponseCompression=true|false (BETA - 기본값=true)
    AllAlpha=true|false (ALPHA - 기본값=false)
    AllBeta=true|false (BETA - 기본값=false)
    AllowInsecureBackendProxy=true|false (BETA - 기본값=true)
    AnyVolumeDataSource=true|false (ALPHA - 기본값=false)
    AppArmor=true|false (BETA - 기본값=true)
    BalanceAttachedNodeVolumes=true|false (ALPHA - 기본값=false)
    BoundServiceAccountTokenVolume=true|false (ALPHA - 기본값=false)
    CPUManager=true|false (BETA - 기본값=true)
    CRIContainerLogRotation=true|false (BETA - 기본값=true)
    CSIInlineVolume=true|false (BETA - 기본값=true)
    CSIMigration=true|false (BETA - 기본값=true)
    CSIMigrationAWS=true|false (BETA - 기본값=false)
    CSIMigrationAWSComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationAzureDisk=true|false (BETA - 기본값=false)
    CSIMigrationAzureDiskComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationAzureFile=true|false (ALPHA - 기본값=false)
    CSIMigrationAzureFileComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationGCE=true|false (BETA - 기본값=false)
    CSIMigrationGCEComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationOpenStack=true|false (BETA - 기본값=false)
    CSIMigrationOpenStackComplete=true|false (ALPHA - 기본값=false)
    CSIMigrationvSphere=true|false (BETA - 기본값=false)
    CSIMigrationvSphereComplete=true|false (BETA - 기본값=false)
    CSIStorageCapacity=true|false (ALPHA - 기본값=false)
    CSIVolumeFSGroupPolicy=true|false (ALPHA - 기본값=false)
    ConfigurableFSGroupPolicy=true|false (ALPHA - 기본값=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - 기본값=false)
    DefaultPodTopologySpread=true|false (ALPHA - 기본값=false)
    DevicePlugins=true|false (BETA - 기본값=true)
    DisableAcceleratorUsageMetrics=true|false (ALPHA - 기본값=false)
    DynamicKubeletConfig=true|false (BETA - 기본값=true)
    EndpointSlice=true|false (BETA - 기본값=true)
    EndpointSliceProxying=true|false (BETA - 기본값=true)
    EphemeralContainers=true|false (ALPHA - 기본값=false)
    ExpandCSIVolumes=true|false (BETA - 기본값=true)
    ExpandInUsePersistentVolumes=true|false (BETA - 기본값=true)
    ExpandPersistentVolumes=true|false (BETA - 기본값=true)
    ExperimentalHostUserNamespaceDefaulting=true|false (BETA - 기본값=false)
    GenericEphemeralVolume=true|false (ALPHA - 기본값=false)
    HPAScaleToZero=true|false (ALPHA - 기본값=false)
    HugePageStorageMediumSize=true|false (BETA - 기본값=true)
    HyperVContainer=true|false (ALPHA - 기본값=false)
    IPv6DualStack=true|false (ALPHA - 기본값=false)
    ImmutableEphemeralVolumes=true|false (BETA - 기본값=true)
    KubeletPodResources=true|false (BETA - 기본값=true)
    LegacyNodeRoleBehavior=true|false (BETA - 기본값=true)
    LocalStorageCapacityIsolation=true|false (BETA - 기본값=true)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - 기본값=false)
    NodeDisruptionExclusion=true|false (BETA - 기본값=true)
    NonPreemptingPriority=true|false (BETA - 기본값=true)
    PodDisruptionBudget=true|false (BETA - 기본값=true)
    PodOverhead=true|false (BETA - 기본값=true)
    ProcMountType=true|false (ALPHA - 기본값=false)
    QOSReserved=true|false (ALPHA - 기본값=false)
    RemainingItemCount=true|false (BETA - 기본값=true)
    RemoveSelfLink=true|false (ALPHA - 기본값=false)
    RotateKubeletServerCertificate=true|false (BETA - 기본값=true)
    RunAsGroup=true|false (BETA - 기본값=true)
    RuntimeClass=true|false (BETA - 기본값=true)
    SCTPSupport=true|false (BETA - 기본값=true)
    SelectorIndex=true|false (BETA - 기본값=true)
    ServerSideApply=true|false (BETA - 기본값=true)
    ServiceAccountIssuerDiscovery=true|false (ALPHA - 기본값=false)
    ServiceAppProtocol=true|false (BETA - 기본값=true)
    ServiceNodeExclusion=true|false (BETA - 기본값=true)
    ServiceTopology=true|false (ALPHA - 기본값=false)
    SetHostnameAsFQDN=true|false (ALPHA - 기본값=false)
    StartupProbe=true|false (BETA - 기본값=true)
    StorageVersionHash=true|false (BETA - 기본값=true)
    SupportNodePidsLimit=true|false (BETA - 기본값=true)
    SupportPodPidsLimit=true|false (BETA - 기본값=true)
    Sysctls=true|false (BETA - 기본값=true)
    TTLAfterFinished=true|false (ALPHA - 기본값=false)
    TokenRequest=true|false (BETA - 기본값=true)
    TokenRequestProjection=true|false (BETA - 기본값=true)
    TopologyManager=true|false (BETA - 기본값=true)
    ValidateProxyRedirects=true|false (BETA - 기본값=true)
    VolumeSnapshotDataSource=true|false (BETA - 기본값=true)
    WarningHeaders=true|false (BETA - 기본값=true)
    WinDSR=true|false (ALPHA - 기본값=false)
    WinOverlay=true|false (ALPHA - 기본값=false)
    WindowsEndpointSliceProxying=true|false (ALPHA - 기본값=false)

    --healthz-bind-address ipport     기본값: 0.0.0.0:10256 -헬스 체크 서버가 서비스할 포트가 있는 IP 주소(모든 IPv4의 인터페이스의 경우 '0.0.0.0:10256', 모든 IPv6의 인터페이스인 경우 '[::]:10256'로 설정). 사용 안 할 경우 빈칸으로 둠. +

    헬스 체크 서버가 서비스할 포트가 있는 IP 주소(모든 IPv4의 인터페이스의 경우 '0.0.0.0:10256', 모든 IPv6의 인터페이스인 경우 '[::]:10256'로 설정). 사용 안 할 경우 빈칸으로 둠.

    -h, --help -kube-proxy에 대한 도움말. +

    kube-proxy에 대한 도움말.

    --hostname-override string -문자열 값이 있으면, 이 값을 실제 호스트네임 대신에 ID로 사용한다. +

    문자열 값이 있으면, 이 값을 실제 호스트네임 대신에 ID로 사용한다.

    --iptables-masquerade-bit int32     기본값: 14 -순수 iptable 프록시를 사용하는 경우 SNAT가 필요한 패킷을 표시하는 fwmark 스페이스 비트. [0, 31] 범위 안에 있어야 한다. +

    순수 iptable 프록시를 사용하는 경우 SNAT가 필요한 패킷을 표시하는 fwmark 스페이스 비트. [0, 31] 범위 안에 있어야 한다.

    --iptables-min-sync-period duration     기본값: 1s -엔드포인트 및 서비스가 변경될 때 iptable 규칙을 새로 고칠 수 있는 빈도의 최소 간격(예: '5s', '1m', '2h22m'). +

    엔드포인트 및 서비스가 변경될 때 iptable 규칙을 새로 고칠 수 있는 빈도의 최소 간격(예: '5s', '1m', '2h22m').

    --iptables-sync-period duration     기본값: 30s -iptable 규칙을 새로 고치는 빈도의 최대 간격(예: '5s', '1m', '2h22m'). 0 보다 커야 한다. +

    iptable 규칙을 새로 고치는 빈도의 최대 간격(예: '5s', '1m', '2h22m'). 0 보다 커야 한다.

    --ipvs-exclude-cidrs stringSlice -IPVS 규칙을 정리할 때 ipvs 프록시가 건드리지 않아야 하는 쉼표로 구분된 CIDR 목록. +

    IPVS 규칙을 정리할 때 ipvs 프록시가 건드리지 않아야 하는 쉼표로 구분된 CIDR 목록.

    --ipvs-min-sync-period duration -엔드포인트 및 서비스가 변경될 때 ipvs 규칙을 새로 고칠 수 있는 빈도의 최소 간격(예: '5s', '1m', '2h22m'). +

    엔드포인트 및 서비스가 변경될 때 ipvs 규칙을 새로 고칠 수 있는 빈도의 최소 간격(예: '5s', '1m', '2h22m').

    --ipvs-scheduler string -프록시 모드가 ipvs인 경우 ipvs 스케줄러 유형. +

    프록시 모드가 ipvs인 경우 ipvs 스케줄러 유형.

    --ipvs-strict-arp -arp_ignore를 1로 설정하고 arp_annotes를 2로 설정하여 엄격한 ARP를 사용. +

    arp_ignore를 1로 설정하고 arp_annotes를 2로 설정하여 엄격한 ARP를 사용.

    --ipvs-sync-period duration     기본값: 30s -ipvs 규칙이 새로 갱신되는 빈도의 최대 간격(예: '5s', '1m', '2h22m'). 0 보다 커야 한다. +

    ipvs 규칙이 새로 갱신되는 빈도의 최대 간격(예: '5s', '1m', '2h22m'). 0 보다 커야 한다.

    --ipvs-tcp-timeout duration -유휴 IPVS TCP 연결에 대한 시간 초과. 0이면 그대로 유지(예: '5s', '1m', '2h22m'). +

    유휴 IPVS TCP 연결에 대한 시간 초과. 0이면 그대로 유지(예: '5s', '1m', '2h22m').

    --ipvs-tcpfin-timeout duration -FIN 패킷을 수신한 후 IPVS TCP 연결에 대한 시간 초과. 0이면 그대로 유지(예: '5s', '1m', '2h22m'). +

    FIN 패킷을 수신한 후 IPVS TCP 연결에 대한 시간 초과. 0이면 그대로 유지(예: '5s', '1m', '2h22m').

    --ipvs-udp-timeout duration -IPVS UDP 패킷에 대한 시간 초과. 0이면 그대로 유지(예: '5s', '1m', '2h22m'). +

    IPVS UDP 패킷에 대한 시간 초과. 0이면 그대로 유지(예: '5s', '1m', '2h22m').

    --kube-api-burst int32     기본값: 10 -쿠버네티스 api 서버와 통신하는 동안 사용할 burst. +

    쿠버네티스 api 서버와 통신하는 동안 사용할 burst.

    --kube-api-content-type string     기본값: "application/vnd.kubernetes.protobuf" -api 서버에 보낸 요청의 내용 유형. +

    api 서버에 보낸 요청의 내용 유형.

    --kube-api-qps float32     기본값: 5 -쿠버네티스 api 서버와 통신할 때 사용할 QPS. +

    쿠버네티스 api 서버와 통신할 때 사용할 QPS.

    --kubeconfig string -인증 정보가 있는 kubeconfig 파일의 경로(마스터 위치는 마스터 플래그로 설정됨). +

    인증 정보가 있는 kubeconfig 파일의 경로(마스터 위치는 마스터 플래그로 설정됨).

    --log-flush-frequency duration     기본값: 5s -로그 플러시 사이의 최대 시간 +

    로그 플러시 사이의 최대 시간

    --masquerade-all -순수 iptables 프록시를 사용하는 경우 서비스 클러스터 IP를 통해 전송된 모든 트래픽을 SNAT함(일반적으로 필요하지 않음). +

    순수 iptables 프록시를 사용하는 경우 서비스 클러스터 IP를 통해 전송된 모든 트래픽을 SNAT함(일반적으로 필요하지 않음).

    --master string -쿠버네티스 API 서버의 주소(kubeconfig의 모든 값 덮어쓰기). +

    쿠버네티스 API 서버의 주소(kubeconfig의 모든 값 덮어쓰기).

    --metrics-bind-address ipport     기본값: 127.0.0.1:10249 -메트릭 서버가 서비스할 포트가 있는 IP 주소(모든 IPv4 인터페이스의 경우 '0.0.0.0:10249', 모든 IPv6 인터페이스의 경우 '[::]:10249'로 설정됨). 사용하지 않으려면 비워둘 것. +

    메트릭 서버가 서비스할 포트가 있는 IP 주소(모든 IPv4 인터페이스의 경우 '0.0.0.0:10249', 모든 IPv6 인터페이스의 경우 '[::]:10249'로 설정됨). 사용하지 않으려면 비워둘 것.

    --nodeport-addresses stringSlice -NodePort에 사용할 주소를 지정하는 값의 문자열 조각. 값은 유효한 IP 블록(예: 1.2.3.0/24, 1.2.3.4/32). 기본값인 빈 문자열 조각값은([]) 모든 로컬 주소를 사용하는 것을 의미한다. +

    NodePort에 사용할 주소를 지정하는 값의 문자열 조각. 값은 유효한 IP 블록(예: 1.2.3.0/24, 1.2.3.4/32). 기본값인 빈 문자열 조각값은([]) 모든 로컬 주소를 사용하는 것을 의미한다.

    --oom-score-adj int32     기본값: -999 -kube-proxy 프로세스에 대한 oom-score-adj 값. 값은 [-1000, 1000] 범위 내에 있어야 한다. +

    kube-proxy 프로세스에 대한 oom-score-adj 값. 값은 [-1000, 1000] 범위 내에 있어야 한다.

    --profiling -값이 true이면 /debug/pprof 핸들러에서 웹 인터페이스를 통한 프로파일링을 활성화한다. +

    값이 true이면 /debug/pprof 핸들러에서 웹 인터페이스를 통한 프로파일링을 활성화한다.

    --proxy-mode ProxyMode -사용할 프록시 모드: 'userspace' (이전) or 'iptables' (빠름) or 'ipvs' or 'kernelspace' (윈도우). 공백인 경우 가장 잘 사용할 수 있는 프록시(현재는 iptables)를 사용한다. iptables 프록시를 선택했지만, 시스템의 커널 또는 iptables 버전이 맞지 않으면, 항상 userspace 프록시로 변경된다. +

    사용할 프록시 모드: 'userspace' (이전) or 'iptables' (빠름) or 'ipvs' or 'kernelspace' (윈도우). 공백인 경우 가장 잘 사용할 수 있는 프록시(현재는 iptables)를 사용한다. iptables 프록시를 선택했지만, 시스템의 커널 또는 iptables 버전이 맞지 않으면, 항상 userspace 프록시로 변경된다.

    --proxy-port-range port-range -서비스 트래픽을 프록시하기 위해 사용할 수 있는 호스트 포트 범위(beginPort-endPort, single port 또는 beginPort+offset 포함). 만약 범위가 0, 0-0, 혹은 지정되지 않으면, 포트는 무작위로 선택된다. +

    서비스 트래픽을 프록시하기 위해 사용할 수 있는 호스트 포트 범위(beginPort-endPort, single port 또는 beginPort+offset 포함). 만약 범위가 0, 0-0, 혹은 지정되지 않으면, 포트는 무작위로 선택된다.

    --show-hidden-metrics-for-version string -숨겨진 메트릭을 표시할 이전 버전. +

    숨겨진 메트릭을 표시할 이전 버전.

    --udp-timeout duration     기본값: 250ms -유휴 UDP 연결이 열린 상태로 유지되는 시간(예: '250ms', '2s'). 값은 0보다 커야 한다. 프록시 모드 userspace에만 적용 가능함. +

    유휴 UDP 연결이 열린 상태로 유지되는 시간(예: '250ms', '2s'). 값은 0보다 커야 한다. 프록시 모드 userspace에만 적용 가능함.

    --version version[=true] -버전 정보를 인쇄하고 종료. +

    버전 정보를 인쇄하고 종료.

    --write-config-to string -기본 구성 값을 이 파일에 옮겨쓰고 종료한다. +

    기본 구성 값을 이 파일에 옮겨쓰고 종료한다.

    diff --git a/content/ko/docs/reference/glossary/api-group.md b/content/ko/docs/reference/glossary/api-group.md index 0c27d3181e070..96f32bd9ce200 100644 --- a/content/ko/docs/reference/glossary/api-group.md +++ b/content/ko/docs/reference/glossary/api-group.md @@ -2,7 +2,7 @@ title: API 그룹(API Group) id: api-group date: 2019-09-02 -full_link: /ko/docs/concepts/overview/kubernetes-api/#api-groups +full_link: /ko/docs/concepts/overview/kubernetes-api/#api-그룹과-버전-규칙 short_description: > 쿠버네티스 API의 연관된 경로들의 집합. @@ -11,9 +11,9 @@ tags: - fundamental - architecture --- -쿠버네티스 API의 연관된 경로들의 집합. +쿠버네티스 API의 연관된 경로들의 집합. API 서버의 구성을 변경하여 각 API 그룹을 활성화하거나 비활성화할 수 있다. 특정 리소스에 대한 경로를 비활성화하거나 활성화할 수도 있다. API 그룹을 사용하면 쿠버네티스 API를 더 쉽게 확장할 수 있다. API 그룹은 REST 경로 및 직렬화된 오브젝트의 `apiVersion` 필드에 지정된다. -* 자세한 내용은 [API 그룹(/ko/docs/concepts/overview/kubernetes-api/#api-groups)을 참조한다. +* 자세한 내용은 [API 그룹(/ko/docs/concepts/overview/kubernetes-api/#api-그룹과-버전-규칙)을 참조한다. diff --git a/content/ko/docs/reference/glossary/cloud-controller-manager.md b/content/ko/docs/reference/glossary/cloud-controller-manager.md index 20121a9371e98..d4eb23111b2a7 100644 --- a/content/ko/docs/reference/glossary/cloud-controller-manager.md +++ b/content/ko/docs/reference/glossary/cloud-controller-manager.md @@ -5,16 +5,16 @@ date: 2018-04-12 full_link: /ko/docs/concepts/architecture/cloud-controller/ short_description: > 쿠버네티스를 타사 클라우드 공급자와 통합하는 컨트롤 플레인 컴포넌트. -aka: +aka: tags: - core-object - architecture - operation --- - 클라우드별 컨트롤 로직을 포함하는 쿠버네티스 +클라우드별 컨트롤 로직을 포함하는 쿠버네티스 {{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}} 컴포넌트이다. -클라우트 컨트롤러 매니저를 통해 클러스터를 클라우드 공급자의 API에 연결하고, -해당 클라우드 플랫폼과 상호 작용하는 컴포넌트와 클러스터와 상호 작용하는 컴포넌트를 분리할 수 있다. +클라우드 컨트롤러 매니저를 통해 클러스터를 클라우드 공급자의 API에 연결하고, +해당 클라우드 플랫폼과 상호 작용하는 컴포넌트와 클러스터와만 상호 작용하는 컴포넌트를 구분할 수 있게 해 준다. diff --git a/content/ko/docs/reference/glossary/index.md b/content/ko/docs/reference/glossary/index.md index 9af1b3d078553..024d8414ba927 100755 --- a/content/ko/docs/reference/glossary/index.md +++ b/content/ko/docs/reference/glossary/index.md @@ -1,5 +1,5 @@ --- -title: 표준 용어집 +title: 용어집 layout: glossary noedit: true default_active_tag: fundamental diff --git a/content/ko/docs/reference/glossary/persistent-volume-claim.md b/content/ko/docs/reference/glossary/persistent-volume-claim.md new file mode 100644 index 0000000000000..122b754d239f2 --- /dev/null +++ b/content/ko/docs/reference/glossary/persistent-volume-claim.md @@ -0,0 +1,18 @@ +--- +title: 퍼시스턴트 볼륨 클레임(Persistent Volume Claim) +id: persistent-volume-claim +date: 2018-04-12 +full_link: /ko/docs/concepts/storage/persistent-volumes/ +short_description: > + 컨테이너의 볼륨으로 마운트될 수 있도록 퍼시스턴트볼륨(PersistentVolume)에 정의된 스토리지 리소스를 요청한다. + +aka: +tags: +- core-object +- storage +--- + {{< glossary_tooltip text="컨테이너" term_id="container" >}}의 볼륨으로 마운트될 수 있도록 {{< glossary_tooltip text="퍼시스턴트볼륨(PersistentVolume)" term_id="persistent-volume" >}}에 정의된 스토리지 리소스를 요청한다. + + + +스토리지의 양, 스토리지에 엑세스하는 방법(읽기 전용, 읽기 그리고/또는 쓰기) 및 재확보(보존, 재활용 혹은 삭제) 방법을 지정한다. 스토리지 자체에 관한 내용은 퍼시스턴트볼륨 오브젝트에 설명되어 있다. diff --git a/content/ko/docs/reference/glossary/quantity.md b/content/ko/docs/reference/glossary/quantity.md new file mode 100644 index 0000000000000..450307841ad05 --- /dev/null +++ b/content/ko/docs/reference/glossary/quantity.md @@ -0,0 +1,33 @@ +--- +title: 수량(Quantity) +id: quantity +date: 2018-08-07 +full_link: +short_description: > + SI 접미사를 사용하는 작거나 큰 숫자의 정수(whole-number) 표현. + +aka: +tags: +- core-object +--- + SI 접미사를 사용하는 작거나 큰 숫자의 정수(whole-number) 표현. + + + +수량은 SI 접미사가 포함된 간결한 정수 표기법을 통해서 작거나 큰 숫자를 표현한 것이다. +분수는 밀리(milli) 단위로 표시되는 반면, +큰 숫자는 킬로(kilo), 메가(mega), 또는 기가(giga) +단위로 표시할 수 있다. + + +예를 들어, 숫자 `1.5`는 `1500m`으로, 숫자 `1000`은 `1k`로, `1000000`은 +`1M`으로 표시할 수 있다. 또한, 이진 표기법 접미사도 명시 가능하므로, +숫자 2048은 `2Ki`로 표기될 수 있다. + +허용되는 10진수(10의 거듭 제곱) 단위는 `m` (밀리), `k` (킬로, 의도적인 소문자), +`M` (메가), `G` (기가), `T` (테라), `P` (페타), +`E` (엑사)가 있다. + +허용되는 2진수(2의 거듭 제곱) 단위는 `Ki` (키비), `Mi` (메비), `Gi` (기비), +`Ti` (테비), `Pi` (페비), `Ei` (엑비)가 있다. + diff --git a/content/ko/docs/reference/glossary/secret.md b/content/ko/docs/reference/glossary/secret.md new file mode 100644 index 0000000000000..63637adc1a238 --- /dev/null +++ b/content/ko/docs/reference/glossary/secret.md @@ -0,0 +1,18 @@ +--- +title: 시크릿(Secret) +id: secret +date: 2018-04-12 +full_link: /ko/docs/concepts/configuration/secret/ +short_description: > + 비밀번호, OAuth 토큰 및 ssh 키와 같은 민감한 정보를 저장한다. + +aka: +tags: +- core-object +- security +--- + 비밀번호, OAuth 토큰 및 ssh 키와 같은 민감한 정보를 저장한다. + + + +민감한 정보를 사용하는 방식에 대해 더 세밀하게 제어할 수 있으며, 유휴 상태의 [암호화](/docs/tasks/administer-cluster/encrypt-data/#ensure-all-secrets-are-encrypted)를 포함하여 우발적인 노출 위험을 줄인다. {{< glossary_tooltip text="파드(Pod)" term_id="pod" >}}는 시크릿을 마운트된 볼륨의 파일로 참조하거나, 파드의 이미지를 풀링하는 kubelet이 시크릿을 참조한다. 시크릿은 기밀 데이터에 적합하고 [컨피그맵](/docs/tasks/configure-pod-container/configure-pod-configmap/)은 기밀이 아닌 데이터에 적합하다. diff --git a/content/ko/docs/reference/glossary/storage-class.md b/content/ko/docs/reference/glossary/storage-class.md new file mode 100644 index 0000000000000..63bd655b68d26 --- /dev/null +++ b/content/ko/docs/reference/glossary/storage-class.md @@ -0,0 +1,20 @@ +--- +title: 스토리지 클래스(Storage Class) +id: storageclass +date: 2018-04-12 +full_link: /ko/docs/concepts/storage/storage-classes +short_description: > + 스토리지클래스는 관리자가 사용 가능한 다양한 스토리지 유형을 설명할 수 있는 방법을 제공한다. + +aka: +tags: +- core-object +- storage +--- + 스토리지클래스는 관리자가 사용 가능한 다양한 스토리지 유형을 설명할 수 있는 방법을 제공한다. + + + +스토리지 클래스는 서비스 품질 수준, 백업 정책 혹은 클러스터 관리자가 결정한 임의의 정책에 매핑할 수 있다. 각 스토리지클래스에는 클래스에 속한 {{< glossary_tooltip text="퍼시스턴트 볼륨(Persistent Volume)" term_id="persistent-volume" >}}을 동적으로 프로비저닝해야 할 때 사용되는 `provisioner`, `parameters` 및 `reclaimPolicy` 필드가 있다. 사용자는 스토리지클래스 객체의 이름을 사용하여 특정 클래스를 요청할 수 있다. + + diff --git a/content/ko/docs/reference/glossary/volume-plugin.md b/content/ko/docs/reference/glossary/volume-plugin.md new file mode 100644 index 0000000000000..779f0303ca5e3 --- /dev/null +++ b/content/ko/docs/reference/glossary/volume-plugin.md @@ -0,0 +1,19 @@ +--- +title: 볼륨 플러그인(Volume Plugin) +id: volumeplugin +date: 2018-04-12 +full_link: +short_description: > + 볼륨 플러그인은 파드 내에서의 스토리지 통합을 가능하게 한다. + +aka: +tags: +- core-object +- storage +--- + 볼륨 플러그인은 {{< glossary_tooltip text="파드(Pod)" term_id="pod" >}} 내에서의 스토리지 통합을 가능하게 한다. + + + +볼륨 플러그인을 사용하면 {{< glossary_tooltip text="파드" term_id="pod" >}}에서 사용할 스토리지 볼륨을 연결하고 마운트할 수 있다. 볼륨 플러그인은 _인-트리(in tree)_ 혹은 _아웃-오브-트리(out of tree)_ 일 수 있다. _인-트리_ 플러그인은 쿠버네티스 코드 리포지터리의 일부이며 동일한 릴리즈 주기를 따른다. _아웃-오브-트리_ 플러그인은 독립적으로 개발된다. + diff --git a/content/ko/docs/reference/issues-security/_index.md b/content/ko/docs/reference/issues-security/_index.md index 596a6e01a46b5..12ac2f375906e 100644 --- a/content/ko/docs/reference/issues-security/_index.md +++ b/content/ko/docs/reference/issues-security/_index.md @@ -1,4 +1,4 @@ --- title: 쿠버네티스 이슈와 보안 -weight: 10 +weight: 40 --- diff --git a/content/ko/docs/reference/kubectl/_index.md b/content/ko/docs/reference/kubectl/_index.md index 7b6c2d720b12a..765adb6fe8790 100755 --- a/content/ko/docs/reference/kubectl/_index.md +++ b/content/ko/docs/reference/kubectl/_index.md @@ -1,5 +1,5 @@ --- -title: "kubectl CLI" +title: "kubectl" weight: 60 --- diff --git a/content/ko/docs/reference/kubectl/cheatsheet.md b/content/ko/docs/reference/kubectl/cheatsheet.md index 2ac5f6076cae1..4ee9c5406e9e2 100644 --- a/content/ko/docs/reference/kubectl/cheatsheet.md +++ b/content/ko/docs/reference/kubectl/cheatsheet.md @@ -191,7 +191,7 @@ JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.ty && kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True" # 외부 도구 없이 디코딩된 시크릿 출력 -kubectl get secret ${secret_name} -o go-template='{{range $k,$v := .data}}{{$k}}={{$v|base64decode}}{{"\n"}}{{end}}' +kubectl get secret my-secret -o go-template='{{range $k,$v := .data}}{{"### "}}{{$k}}{{"\n"}}{{$v|base64decode}}{{"\n\n"}}{{end}}' # 파드에 의해 현재 사용되고 있는 모든 시크릿 목록 조회 kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq @@ -293,12 +293,12 @@ kubectl get pods -n mynamespace --no-headers=true | awk '/pattern1|pattern2/{pr ## 실행 중인 파드와 상호 작용 ```bash -kubectl logs my-pod # 파드 로그(stdout) 덤프 +kubectl logs my-pod # 파드 로그 덤프 (stdout) kubectl logs -l name=myLabel # name이 myLabel인 파드 로그 덤프 (stdout) -kubectl logs my-pod --previous # 컨테이너의 이전 인스턴스 생성에 대한 파드 로그(stdout) 덤프 -kubectl logs my-pod -c my-container # 파드 로그(stdout, 멀티-컨테이너 경우) 덤프 +kubectl logs my-pod --previous # 컨테이너의 이전 인스턴스 생성에 대한 파드 로그 덤프 (stdout) +kubectl logs my-pod -c my-container # 파드 로그 덤프 (stdout, 멀티-컨테이너 경우) kubectl logs -l name=myLabel -c my-container # name이 myLabel인 파드 로그 덤프 (stdout) -kubectl logs my-pod -c my-container --previous # 컨테이너의 이전 인스턴스 생성에 대한 파드 로그(stdout, 멀티-컨테이너 경우) 덤프 +kubectl logs my-pod -c my-container --previous # 컨테이너의 이전 인스턴스 생성에 대한 파드 로그 덤프 (stdout, 멀티-컨테이너 경우) kubectl logs -f my-pod # 실시간 스트림 파드 로그(stdout) kubectl logs -f my-pod -c my-container # 실시간 스트림 파드 로그(stdout, 멀티-컨테이너 경우) kubectl logs -f -l name=myLabel --all-containers # name이 myLabel인 모든 파드의 로그 스트리밍 (stdout) @@ -317,6 +317,18 @@ kubectl top pod POD_NAME --containers # 특정 파드와 해당 kubectl top pod POD_NAME --sort-by=cpu # 지정한 파드에 대한 메트릭을 표시하고 'cpu' 또는 'memory'별로 정렬 ``` +## 디플로이먼트, 서비스와 상호 작용 +```bash +kubectl logs deploy/my-deployment # 디플로이먼트에 대한 파드 로그 덤프 (단일-컨테이너 경우) +kubectl logs deploy/my-deployment -c my-container # 디플로이먼트에 대한 파드 로그 덤프 (멀티-컨테이너 경우) + +kubectl port-forward svc/my-service 5000 # 로컬 머신의 5000번 포트를 리스닝하고, my-service의 동일한(5000번) 포트로 전달 +kubectl port-forward svc/my-service 5000:my-service-port # 로컬 머신의 5000번 포트를 리스닝하고, my-service의 라는 이름을 가진 포트로 전달 + +kubectl port-forward deploy/my-deployment 5000:6000 # 로컬 머신의 5000번 포트를 리스닝하고, 에 의해 생성된 파드의 6000번 포트로 전달 +kubectl exec deploy/my-deployment -- ls # 에 의해 생성된 첫번째 파드의 첫번째 컨테이너에 명령어 실행 (단일- 또는 다중-컨테이너 경우) +``` + ## 노드, 클러스터와 상호 작용 ```bash @@ -334,7 +346,7 @@ kubectl taint nodes foo dedicated=special-user:NoSchedule ### 리소스 타입 -단축명, [API 그룹](/ko/docs/concepts/overview/kubernetes-api/#api-그룹)과 함께 지원되는 모든 리소스 유형들, 그것들의 [네임스페이스](/ko/docs/concepts/overview/working-with-objects/namespaces)와 [종류(Kind)](/ko/docs/concepts/overview/working-with-objects/kubernetes-objects)를 나열: +단축명, [API 그룹](/ko/docs/concepts/overview/kubernetes-api/#api-그룹과-버전-규칙)과 함께 지원되는 모든 리소스 유형들, 그것들의 [네임스페이스](/ko/docs/concepts/overview/working-with-objects/namespaces)와 [종류(Kind)](/ko/docs/concepts/overview/working-with-objects/kubernetes-objects)를 나열: ```bash kubectl api-resources @@ -345,7 +357,7 @@ API 리소스를 탐색하기 위한 다른 작업: ```bash kubectl api-resources --namespaced=true # 네임스페이스를 가지는 모든 리소스 kubectl api-resources --namespaced=false # 네임스페이스를 가지지 않는 모든 리소스 -kubectl api-resources -o name # 모든 리소스의 단순한 (리소스 이름 만) 출력 +kubectl api-resources -o name # 모든 리소스의 단순한 (리소스 이름만) 출력 kubectl api-resources -o wide # 모든 리소스의 확장된 ("wide"로 알려진) 출력 kubectl api-resources --verbs=list,get # "list"와 "get"의 요청 동사를 지원하는 모든 리소스 출력 kubectl api-resources --api-group=extensions # "extensions" API 그룹의 모든 리소스 @@ -372,6 +384,9 @@ kubectl api-resources --api-group=extensions # "extensions" API 그룹의 모든 # 클러스터에서 실행 중인 모든 이미지 kubectl get pods -A -o=custom-columns='DATA:spec.containers[*].image' +# `default` 네임스페이스의 모든 이미지를 파드별로 그룹지어 출력 +kubectl get pods --namespace default --output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image" + # "k8s.gcr.io/coredns:1.6.2" 를 제외한 모든 이미지 kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image' diff --git a/content/ko/docs/reference/kubectl/docker-cli-to-kubectl.md b/content/ko/docs/reference/kubectl/docker-cli-to-kubectl.md index 1679059871b57..12b41b1d980e9 100644 --- a/content/ko/docs/reference/kubectl/docker-cli-to-kubectl.md +++ b/content/ko/docs/reference/kubectl/docker-cli-to-kubectl.md @@ -7,7 +7,7 @@ content_type: concept --- -당신은 쿠버네티스 커맨드 라인 도구인 kubectl을 사용하여 API 서버와 상호 작용할 수 있다. 만약 도커 커맨드 라인 도구에 익숙하다면 kubectl을 사용하는 것은 간단하다. 다음 섹션에서는 도커의 하위 명령을 보여주고 kubectl과 같은 명령어를 설명한다. +당신은 쿠버네티스 커맨드 라인 도구인 `kubectl`을 사용하여 API 서버와 상호 작용할 수 있다. 만약 도커 커맨드 라인 도구에 익숙하다면 `kubectl`을 사용하는 것은 간단하다. 다음 섹션에서는 도커의 하위 명령을 보여주고 `kubectl`과 같은 명령어를 설명한다. diff --git a/content/ko/docs/reference/kubectl/overview.md b/content/ko/docs/reference/kubectl/overview.md index 7ddfe5b095e8c..3d8179a08a9ef 100644 --- a/content/ko/docs/reference/kubectl/overview.md +++ b/content/ko/docs/reference/kubectl/overview.md @@ -17,7 +17,7 @@ KUBECONFIG 환경 변수를 설정하거나 [`--kubeconfig`](/ko/docs/concepts/c 이 개요는 `kubectl` 구문을 다루고, 커맨드 동작을 설명하며, 일반적인 예제를 제공한다. 지원되는 모든 플래그 및 하위 명령을 포함한 각 명령에 대한 자세한 내용은 [kubectl](/docs/reference/generated/kubectl/kubectl-commands/) 참조 문서를 참고한다. -설치 방법에 대해서는 [kubectl 설치](/ko/docs/tasks/tools/install-kubectl/)를 참고한다. +설치 방법에 대해서는 [kubectl 설치](/ko/docs/tasks/tools/)를 참고한다. diff --git a/content/ko/docs/reference/scheduling/config.md b/content/ko/docs/reference/scheduling/config.md index 0f3f120d77ce3..5da54ed813bba 100644 --- a/content/ko/docs/reference/scheduling/config.md +++ b/content/ko/docs/reference/scheduling/config.md @@ -18,12 +18,10 @@ weight: 20 각 단계는 익스텐션 포인트(extension point)를 통해 노출된다. 플러그인은 이러한 익스텐션 포인트 중 하나 이상을 구현하여 스케줄링 동작을 제공한다. -컴포넌트 구성 API([`v1alpha1`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1alpha1?tab=doc#KubeSchedulerConfiguration) -또는 [`v1alpha2`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1alpha2?tab=doc#KubeSchedulerConfiguration))를 -사용하고, `kube-scheduler --config `을 실행하여 +[KubeSchedulerConfiguration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +구조에 맞게 파일을 작성하고, +`kube-scheduler --config `을 실행하여 스케줄링 프로파일을 지정할 수 있다. -`v1alpha2` API를 사용하면 [여러 프로파일](#여러-프로파일)을 -실행하도록 kube-scheduler를 구성할 수 있다. 최소 구성은 다음과 같다. @@ -149,7 +147,12 @@ profiles: 익스텐션 포인트: `Score`. - `VolumeBinding`: 노드에 요청된 {{< glossary_tooltip text="볼륨" term_id="volume" >}}이 있는지 또는 바인딩할 수 있는지 확인한다. - 익스텐션 포인트: `PreFilter`, `Filter`, `Reserve`, `PreBind`. + 익스텐션 포인트: `PreFilter`, `Filter`, `Reserve`, `PreBind`, `Score`. + {{< note >}} + `Score` 익스텐션 포인트는 `VolumeCapacityPriority` 기능이 + 활성화되어 있어야 활성화되며, + 요청된 볼륨 사이즈를 만족하는 가장 작은 PV들을 우선순위 매긴다. + {{< /note >}} - `VolumeRestrictions`: 노드에 마운트된 볼륨이 볼륨 제공자에 특정한 제한 사항을 충족하는지 확인한다. 익스텐션 포인트: `Filter`. @@ -185,8 +188,6 @@ profiles: - `RequestedToCapacityRatio`: 할당된 리소스의 구성된 기능에 따라 노드를 선호한다. 익스텐션 포인트: `Score`. -- `NodeResourceLimits`: 파드 리소스 제한을 충족하는 노드를 선호한다. - 익스텐션 포인트: `PreScore`, `Score`. - `CinderVolume`: 노드에 대해 OpenStack Cinder 볼륨 제한을 충족할 수 있는지 확인한다. 익스텐션 포인트: `Filter`. @@ -251,3 +252,4 @@ profiles: * [kube-scheduler 레퍼런스](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) 읽어보기 * [스케줄링](/ko/docs/concepts/scheduling-eviction/kube-scheduler/)에 대해 알아보기 +* [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) 레퍼런스 읽어보기 diff --git a/content/ko/docs/reference/scheduling/policies.md b/content/ko/docs/reference/scheduling/policies.md index f2cae65b685e6..626e077784e82 100644 --- a/content/ko/docs/reference/scheduling/policies.md +++ b/content/ko/docs/reference/scheduling/policies.md @@ -8,9 +8,7 @@ weight: 10 스케줄링 정책을 사용하여 {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}}가 각각 노드를 필터링하고 스코어링(scoring)하기 위해 실행하는 *단정(predicates)* 및 *우선순위(priorities)* 를 지정할 수 있다. -`kube-scheduler --policy-config-file ` 또는 `kube-scheduler --policy-configmap `을 실행하고 [정책 유형](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1?tab=doc#Policy)을 사용하여 스케줄링 정책을 설정할 수 있다. - - +`kube-scheduler --policy-config-file ` 또는 `kube-scheduler --policy-configmap `을 실행하고 [정책 유형](/docs/reference/config-api/kube-scheduler-policy-config.v1/)을 사용하여 스케줄링 정책을 설정할 수 있다. @@ -110,9 +108,9 @@ weight: 10 - `EvenPodsSpreadPriority`: 선호된 [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)을 구현한다. - - ## {{% heading "whatsnext" %}} * [스케줄링](/ko/docs/concepts/scheduling-eviction/kube-scheduler/)에 대해 배우기 * [kube-scheduler 프로파일](/docs/reference/scheduling/profiles/)에 대해 배우기 +* [kube-scheduler configuration 레퍼런스 (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1) 읽어보기 +* [kube-scheduler Policy 레퍼런스 (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) 읽어보기 diff --git a/content/ko/docs/reference/setup-tools/_index.md b/content/ko/docs/reference/setup-tools/_index.md index 051fba4c5d79d..717b9cb2ac7a0 100644 --- a/content/ko/docs/reference/setup-tools/_index.md +++ b/content/ko/docs/reference/setup-tools/_index.md @@ -1,4 +1,4 @@ --- -title: 설치 도구 레퍼런스 +title: 설치 도구 weight: 50 --- diff --git a/content/ko/docs/reference/setup-tools/kubeadm/_index.md b/content/ko/docs/reference/setup-tools/kubeadm/_index.md index 683f78000b444..ca7a08f5efefe 100644 --- a/content/ko/docs/reference/setup-tools/kubeadm/_index.md +++ b/content/ko/docs/reference/setup-tools/kubeadm/_index.md @@ -16,15 +16,17 @@ kubeadm은 실행 가능한 최소 클러스터를 시작하고 실행하는 데 ## 설치 방법 -kubeadm을 설치하려면, [설치 가이드](/docs/setup/production-environment/tools/kubeadm/install-kubeadm)를 참고한다. +kubeadm을 설치하려면, [설치 가이드](/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)를 참고한다. ## {{% heading "whatsnext" %}} -* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init): 쿠버네티스 컨트롤 플레인 노드를 부트스트랩한다. -* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join): 쿠버네티스 워커(worker) 노드를 부트스트랩하고 클러스터에 조인시킨다. -* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade): 쿠버네티스 클러스터를 새로운 버전으로 업그레이드한다. -* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config): kubeadm v1.7.x 이하의 버전을 사용하여 클러스터를 초기화한 경우, `kubeadm upgrade` 를 위해 사용자의 클러스터를 구성한다. -* [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token): `kubeadm join` 을 위한 토큰을 관리한다. -* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset): `kubeadm init` 또는 `kubeadm join` 에 의한 호스트의 모든 변경 사항을 되돌린다. -* [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version): kubeadm 버전을 출력한다. -* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha): 커뮤니티에서 피드백을 수집하기 위해서 기능 미리 보기를 제공한다. +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/): 쿠버네티스 컨트롤 플레인 노드를 부트스트랩한다. +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/): 쿠버네티스 워커(worker) 노드를 부트스트랩하고 클러스터에 조인시킨다. +* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/): 쿠버네티스 클러스터를 새로운 버전으로 업그레이드한다. +* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config/): kubeadm v1.7.x 이하의 버전을 사용하여 클러스터를 초기화한 경우, `kubeadm upgrade` 를 위해 사용자의 클러스터를 구성한다. +* [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token/): `kubeadm join` 을 위한 토큰을 관리한다. +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/): `kubeadm init` 또는 `kubeadm join` 에 의한 호스트의 모든 변경 사항을 되돌린다. +* [kubeadm certs](/docs/reference/setup-tools/kubeadm/kubeadm-certs): 쿠버네티스 인증서를 관리한다. +* [kubeadm kubeconfig](/docs/reference/setup-tools/kubeadm/kubeadm-kubeconfig): kubeconfig 파일을 관리한다. +* [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version/): kubeadm 버전을 출력한다. +* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/): 커뮤니티에서 피드백을 수집하기 위해서 기능 미리 보기를 제공한다. diff --git a/content/ko/docs/reference/setup-tools/kubeadm/generated/_index.md b/content/ko/docs/reference/setup-tools/kubeadm/generated/_index.md new file mode 100644 index 0000000000000..7ebf753ae9d46 --- /dev/null +++ b/content/ko/docs/reference/setup-tools/kubeadm/generated/_index.md @@ -0,0 +1,6 @@ +--- +title: "Kubeadm Generated" +weight: 10 +toc_hide: true +--- + diff --git a/content/ko/docs/reference/tools.md b/content/ko/docs/reference/tools/_index.md similarity index 85% rename from content/ko/docs/reference/tools.md rename to content/ko/docs/reference/tools/_index.md index ac0a5fb6c5889..a38158bf14b2d 100644 --- a/content/ko/docs/reference/tools.md +++ b/content/ko/docs/reference/tools/_index.md @@ -16,13 +16,13 @@ content_type: concept ## Kubeadm -[`kubeadm`](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)은 물리적 환경, 클라우드 서버, 또는 가상머신 상에서 안전한 쿠버네티스를 쉽게 프로비저닝하기 위한 커맨드라인 툴이다(현재는 알파 상태). +[`kubeadm`](/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)은 물리적 환경, 클라우드 서버, 또는 가상머신 상에서 안전한 쿠버네티스를 쉽게 프로비저닝하기 위한 커맨드라인 툴이다(현재는 알파 상태). ## Minikube -[`minikube`](https://minikube.sigs.k8s.io/docs/)는 개발과 테스팅 목적으로 하는 +[`minikube`](https://minikube.sigs.k8s.io/docs/)는 개발과 테스팅 목적으로 단일 노드 쿠버네티스 클러스터를 로컬 워크스테이션에서 -쉽게 구동시키는 도구이다. +실행하는 도구이다. ## 대시보드 diff --git a/content/ko/docs/reference/using-api/_index.md b/content/ko/docs/reference/using-api/_index.md index 427cebf529901..423d82e0f2c1c 100644 --- a/content/ko/docs/reference/using-api/_index.md +++ b/content/ko/docs/reference/using-api/_index.md @@ -1,7 +1,8 @@ --- -title: 쿠버네티스 API 개요 +title: API 개요 content_type: concept weight: 10 +no_list: true card: name: 레퍼런스 weight: 50 diff --git a/content/ko/docs/reference/using-api/client-libraries.md b/content/ko/docs/reference/using-api/client-libraries.md index ae0404239d982..f8c1cb91c80d0 100644 --- a/content/ko/docs/reference/using-api/client-libraries.md +++ b/content/ko/docs/reference/using-api/client-libraries.md @@ -65,12 +65,13 @@ API 호출 또는 요청/응답 타입을 직접 구현할 필요는 없다. | Python | [github.com/fiaas/k8s](https://github.com/fiaas/k8s) | | Python | [github.com/mnubo/kubernetes-py](https://github.com/mnubo/kubernetes-py) | | Python | [github.com/tomplus/kubernetes_asyncio](https://github.com/tomplus/kubernetes_asyncio) | +| Python | [github.com/Frankkkkk/pykorm](https://github.com/Frankkkkk/pykorm) | | Ruby | [github.com/abonas/kubeclient](https://github.com/abonas/kubeclient) | | Ruby | [github.com/Ch00k/kuber](https://github.com/Ch00k/kuber) | | Ruby | [github.com/kontena/k8s-client](https://github.com/kontena/k8s-client) | | Rust | [github.com/clux/kube-rs](https://github.com/clux/kube-rs) | | Rust | [github.com/ynqa/kubernetes-rust](https://github.com/ynqa/kubernetes-rust) | -| Scala | [github.com/doriordan/skuber](https://github.com/doriordan/skuber) | +| Scala | [github.com/hagay3/skuber](https://github.com/hagay3/skuber) | | Scala | [github.com/joan38/kubernetes-client](https://github.com/joan38/kubernetes-client) | | DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | | Swift | [github.com/swiftkube/client](https://github.com/swiftkube/client) | diff --git a/content/ko/docs/setup/best-practices/certificates.md b/content/ko/docs/setup/best-practices/certificates.md index 5595e0ac3d3fe..77665edbe27fa 100644 --- a/content/ko/docs/setup/best-practices/certificates.md +++ b/content/ko/docs/setup/best-practices/certificates.md @@ -7,7 +7,7 @@ weight: 40 쿠버네티스는 TLS 위에 인증을 위해 PKI 인증서가 필요하다. -만약 [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/)으로 쿠버네티스를 설치했다면, 클러스터에 필요한 인증서는 자동으로 생성된다. +만약 [kubeadm](/ko/docs/reference/setup-tools/kubeadm/)으로 쿠버네티스를 설치했다면, 클러스터에 필요한 인증서는 자동으로 생성된다. 또한 더 안전하게 자신이 소유한 인증서를 생성할 수 있다. 이를 테면, 개인키를 API 서버에 저장하지 않으므로 더 안전하게 보관할 수 있다. 이 페이지는 클러스터에 필요한 인증서를 설명한다. @@ -72,7 +72,7 @@ etcd 역시 클라이언트와 피어 간에 상호 TLS 인증을 구현한다. | kube-apiserver-kubelet-client | kubernetes-ca | system:masters | client | | | front-proxy-client | kubernetes-front-proxy-ca | | client | | -[1]: 클러스터에 접속한 다른 IP 또는 DNS 이름([kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) 이 사용하는 로드 밸런서 안정 IP 또는 DNS 이름, `kubernetes`, `kubernetes.default`, `kubernetes.default.svc`, +[1]: 클러스터에 접속한 다른 IP 또는 DNS 이름([kubeadm](/ko/docs/reference/setup-tools/kubeadm/) 이 사용하는 로드 밸런서 안정 IP 또는 DNS 이름, `kubernetes`, `kubernetes.default`, `kubernetes.default.svc`, `kubernetes.default.svc.cluster`, `kubernetes.default.svc.cluster.local`) `kind`는 하나 이상의 [x509 키 사용](https://godoc.org/k8s.io/api/certificates/v1beta1#KeyUsage) 종류를 가진다. @@ -97,7 +97,7 @@ kubeadm 사용자만 해당: ### 인증서 파일 경로 -인증서는 권고하는 파일 경로에 존재해야 한다([kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/)에서 사용되는 것처럼). 경로는 위치에 관계없이 주어진 파라미터를 사용하여 지정되야 한다. +인증서는 권고하는 파일 경로에 존재해야 한다([kubeadm](/ko/docs/reference/setup-tools/kubeadm/)에서 사용되는 것처럼). 경로는 위치에 관계없이 주어진 파라미터를 사용하여 지정해야 한다. | 기본 CN | 권고되는 키 파일 경로 | 권고하는 인증서 파일 경로 | 명령어 | 키 파라미터 | 인증서 파라미터 | |------------------------------|------------------------------|-----------------------------|----------------|------------------------------|-------------------------------------------| @@ -155,5 +155,5 @@ KUBECONFIG= kubectl config use-context default-system |-------------------------|-------------------------|-----------------------------------------------------------------------| | admin.conf | kubectl | 클러스터 관리자를 설정한다. | | kubelet.conf | kubelet | 클러스터 각 노드를 위해 필요하다. | -| controller-manager.conf | kube-controller-manager | 반드시 매니페스트를 `manifests/kube-controller-manager.yaml`에 추가해야한다. | -| scheduler.conf | kube-scheduler | 반드시 매니페스트를 `manifests/kube-scheduler.yaml`에 추가해야한다. | +| controller-manager.conf | kube-controller-manager | 반드시 매니페스트를 `manifests/kube-controller-manager.yaml`에 추가해야 한다. | +| scheduler.conf | kube-scheduler | 반드시 매니페스트를 `manifests/kube-scheduler.yaml`에 추가해야 한다. | diff --git a/content/ko/docs/setup/best-practices/cluster-large.md b/content/ko/docs/setup/best-practices/cluster-large.md index 6f6dbbae0ef69..d67892e6dca11 100644 --- a/content/ko/docs/setup/best-practices/cluster-large.md +++ b/content/ko/docs/setup/best-practices/cluster-large.md @@ -67,9 +67,8 @@ _A_ 영역에 있는 컨트롤 플레인 호스트로만 전달한다. 단일 쿠버네티스 [리소스 제한](/ko/docs/concepts/configuration/manage-resources-containers/)은 파드와 컨테이너가 다른 컴포넌트에 영향을 줄 수 있는 메모리 누수 및 기타 방식의 영향을 -최소화하는 데 도움이 된다. 이러한 리소스 제한은 애플리케이션 워크로드에 적용되는 것과 마찬가지로 -{{< glossary_tooltip text="애드온" term_id="addons" >}}에도 적용될 수 있으며 -적용되어야 한다. +최소화하는 데 도움이 된다. 이러한 리소스 제한은 애플리케이션 워크로드에 적용될 수 있는 것처럼 +{{< glossary_tooltip text="애드온" term_id="addons" >}} 리소스에도 적용될 수 있다. 예를 들어, 로깅 컴포넌트에 대한 CPU 및 메모리 제한을 설정할 수 있다. diff --git a/content/ko/docs/setup/best-practices/multiple-zones.md b/content/ko/docs/setup/best-practices/multiple-zones.md index 189dbed654078..3d825ebd08c18 100644 --- a/content/ko/docs/setup/best-practices/multiple-zones.md +++ b/content/ko/docs/setup/best-practices/multiple-zones.md @@ -55,7 +55,7 @@ content_type: concept 특정 kubelet을 나타내는 노드 오브젝트에 {{< glossary_tooltip text="레이블" term_id="label" >}}을 자동으로 추가한다. 이러한 레이블에는 -[영역 정보](/docs/reference/kubernetes-api/labels-annotations-taints/#topologykubernetesiozone)가 포함될 수 있다. +[영역 정보](/docs/reference/labels-annotations-taints/#topologykubernetesiozone)가 포함될 수 있다. 클러스터가 여러 영역 또는 지역에 걸쳐있는 경우, [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)과 diff --git a/content/ko/docs/setup/production-environment/container-runtimes.md b/content/ko/docs/setup/production-environment/container-runtimes.md index 4b749ab7e51bc..d2638f4433624 100644 --- a/content/ko/docs/setup/production-environment/container-runtimes.md +++ b/content/ko/docs/setup/production-environment/container-runtimes.md @@ -48,7 +48,7 @@ Systemd는 cgroup과의 긴밀한 통합을 통해 프로세스당 cgroup을 할 시스템이 안정화된다. 도커에 대해 구성하려면, `native.cgroupdriver=systemd`를 설정한다. {{< caution >}} -클러스터에 결합되어 있는 노드의 cgroup 관리자를 변경하는 것은 강력하게 권장하지 *않는다*. +클러스터에 결합되어 있는 노드의 cgroup 관리자를 변경하는 것은 신중하게 수행해야 한다. 하나의 cgroup 드라이버의 의미를 사용하여 kubelet이 파드를 생성해왔다면, 컨테이너 런타임을 다른 cgroup 드라이버로 변경하는 것은 존재하는 기존 파드에 대해 파드 샌드박스 재생성을 시도할 때, 에러가 발생할 수 있다. kubelet을 재시작하는 것은 에러를 해결할 수 없을 것이다. @@ -57,13 +57,18 @@ kubelet을 재시작하는 것은 에러를 해결할 수 없을 것이다. 교체하거나, 자동화를 사용하여 다시 설치한다. {{< /caution >}} +### kubeadm으로 생성한 클러스터의 드라이버를 `systemd`로 변경하기 + +kubeadm으로 생성한 클러스터의 cgroup 드라이버를 `systemd`로 변경하려면 +[변경 가이드](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/)를 참고한다. + ## 컨테이너 런타임 {{% thirdparty-content %}} ### containerd -이 섹션에는 `containerd` 를 CRI 런타임으로 사용하는 데 필요한 단계가 포함되어 있다. +이 섹션에는 containerd 를 CRI 런타임으로 사용하는 데 필요한 단계가 포함되어 있다. 필수 구성 요소를 설치 및 구성한다. @@ -90,163 +95,63 @@ sudo sysctl --system containerd를 설치한다. {{< tabs name="tab-cri-containerd-installation" >}} -{{% tab name="Ubuntu 16.04" %}} +{{% tab name="Linux" %}} -```shell -# 도커의 공식 GPG 키 추가 -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - -# (containerd 설치) -## 리포지터리 설정 -### HTTPS를 통해 리포지터리를 사용할 수 있도록 패키지 설치 -sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common -``` +1. 공식 도커 리포지터리에서 `containerd.io` 패키지를 설치한다. 각 리눅스 배포한에 대한 도커 리포지터리를 설정하고 `containerd.io` 패키지를 설치하는 방법은 [도커 엔진 설치](https://docs.docker.com/engine/install/#server)에서 찾을 수 있다. -```shell -## 도커의 공식 GPG 키 추가 -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add - -``` +2. containerd 설정 -```shell -## 도커 apt 리포지터리 추가 -sudo add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" -``` + ```shell + sudo mkdir -p /etc/containerd + containerd config default | sudo tee /etc/containerd/config.toml + ``` -```shell -## containerd 설치 -sudo apt-get update && sudo apt-get install -y containerd.io -``` +3. containerd 재시작 -```shell -# containerd 구성 -sudo mkdir -p /etc/containerd -sudo containerd config default | sudo tee /etc/containerd/config.toml -``` + ```shell + sudo systemctl restart containerd + ``` -```shell -# containerd 재시작 -sudo systemctl restart containerd -``` {{% /tab %}} -{{% tab name="Ubuntu 18.04/20.04" %}} - -```shell -# (containerd 설치) -sudo apt-get update && sudo apt-get install -y containerd -``` - -```shell -# containerd 구성 -sudo mkdir -p /etc/containerd -sudo containerd config default | sudo tee /etc/containerd/config.toml -``` - -```shell -# containerd 재시작 -sudo systemctl restart containerd -``` -{{% /tab %}} -{{% tab name="Debian 9+" %}} - -```shell -# (containerd 설치) -## 리포지터리 설정 -### HTTPS를 통해 리포지터리를 사용할 수 있도록 패키지 설치 -sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common -``` - -```shell -## 도커의 공식 GPG 키 추가 -curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add - -``` +{{% tab name="Windows (PowerShell)" %}} -```shell -## 도커 apt 리포지터리 추가 -sudo add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" -``` +PowerShell 세션을 시작하고 `$Version`을 원하는 버전(예: `$Version:1.4.3`)으로 설정한 후 다음 명령을 실행한다. -```shell -## containerd 설치 -sudo apt-get update && sudo apt-get install -y containerd.io -``` +1. containerd 다운로드 -```shell -# 기본 containerd 구성 설정 -sudo mkdir -p /etc/containerd -containerd config default | sudo tee /etc/containerd/config.toml -``` - -```shell -# containerd 재시작 -sudo systemctl restart containerd -``` -{{% /tab %}} -{{% tab name="CentOS/RHEL 7.4+" %}} - -```shell -# (containerd 설치) -## 리포지터리 설정 -### 필요한 패키지 설치 -sudo yum install -y yum-utils device-mapper-persistent-data lvm2 -``` + ```powershell + curl.exe -L https://github.com/containerd/containerd/releases/download/v$Version/containerd-$Version-windows-amd64.tar.gz -o containerd-windows-amd64.tar. +gz + tar.exe xvf .\containerd-windows-amd64.tar.gz + ``` -```shell -## 도커 리포지터리 추가 -sudo yum-config-manager \ - --add-repo \ - https://download.docker.com/linux/centos/docker-ce.repo -``` +2. 추출과 설정 -```shell -# containerd 설치 -sudo yum update -y && sudo yum install -y containerd.io -``` + ```powershell + Copy-Item -Path ".\bin\" -Destination "$Env:ProgramFiles\containerd" -Recurse -Force + cd $Env:ProgramFiles\containerd\ + .\containerd.exe config default | Out-File config.toml -Encoding ascii -```shell -## containerd 구성 -sudo mkdir -p /etc/containerd -sudo containerd config default | sudo tee /etc/containerd/config.toml -``` + # 설정을 검토한다. 설정에 따라 다음을 조정할 수 있다. + # - sandbox_image (쿠버네티스 일시중지 이미지) + # - cni bin 폴더와 conf 폴더 위치 + Get-Content config.toml -```shell -# containerd 재시작 -sudo systemctl restart containerd -``` -{{% /tab %}} -{{% tab name="Windows (PowerShell)" %}} -```powershell -# (containerd 설치) -# containerd 다운로드 -cmd /c curl -OL https://github.com/containerd/containerd/releases/download/v1.4.1/containerd-1.4.1-windows-amd64.tar.gz -cmd /c tar xvf .\containerd-1.4.1-windows-amd64.tar.gz -``` + # (선택사항 - 그러나 적극 권장함) Windows 디펜더 검사에서 containerd 제외 + Add-MpPreference -ExclusionProcess "$Env:ProgramFiles\containerd\containerd.exe" + ``` -```powershell -# 추출 및 구성 -Copy-Item -Path ".\bin\" -Destination "$Env:ProgramFiles\containerd" -Recurse -Force -cd $Env:ProgramFiles\containerd\ -.\containerd.exe config default | Out-File config.toml -Encoding ascii +3. containerd 실행 -# 구성을 검토한다. 설정에 따라 조정할 수 있다. -# - sandbox_image (쿠버네티스 pause 이미지) -# - cni bin_dir 및 conf_dir locations -Get-Content config.toml -``` + ```powershell + .\containerd.exe --register-service + Start-Service containerd + ``` -```powershell -# containerd 시작 -.\containerd.exe --register-service -Start-Service containerd -``` {{% /tab %}} {{< /tabs >}} -#### systemd {#containerd-systemd} +#### `systemd` cgroup 드라이버의 사용 {#containerd-systemd} `/etc/containerd/config.toml` 의 `systemd` cgroup 드라이버를 `runc` 에서 사용하려면, 다음과 같이 설정한다. @@ -257,8 +162,14 @@ Start-Service containerd SystemdCgroup = true ``` +이 변경 사항을 적용하는 경우 containerd를 재시작한다. + +```shell +sudo systemctl restart containerd +``` + kubeadm을 사용하는 경우, -[kubelet용 cgroup 드라이버](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node)를 수동으로 구성한다. +[kubelet용 cgroup 드라이버](/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#컨트롤-플레인-노드에서-kubelet이-사용하는-cgroup-드라이버-구성)를 수동으로 구성한다. ### CRI-O @@ -420,7 +331,7 @@ CRI-O를 시작한다. ```shell sudo systemctl daemon-reload -sudo systemctl start crio +sudo systemctl enable crio --now ``` 자세한 사항은 [CRI-O 설치 가이드](https://github.com/cri-o/cri-o/blob/master/install.md)를 @@ -446,138 +357,38 @@ CRI-O의 cgroup 드라이버 구성을 동기화 상태로 ### 도커 -각 노드에 도커 CE를 설치한다. - -쿠버네티스 릴리스 정보에서 해당 버전의 쿠버네티스와 호환되는 -도커 버전을 찾을 수 있다. +1. 각 노드에서 [도커 엔진 설치](https://docs.docker.com/engine/install/#server)에 따라 리눅스 배포판용 도커를 설치한다. 이 [의존성 파일](https://git.k8s.io/kubernetes/build/dependencies.yaml)에서 검증된 최신 버전의 도커를 찾을 수 있다. -사용자의 시스템에서 다음의 명령을 이용해 도커를 설치한다. +2. 특히 컨테이너의 cgroup 관리에 systemd를 사용하도록 도커 데몬을 구성한다. -{{< tabs name="tab-cri-docker-installation" >}} -{{% tab name="Ubuntu 16.04+" %}} - -```shell -# (도커 CE 설치) -## 리포지터리 설정 -### apt가 HTTPS로 리포지터리를 사용하는 것을 허용하기 위한 패키지 설치 -sudo apt-get update && sudo apt-get install -y \ - apt-transport-https ca-certificates curl software-properties-common gnupg2 -``` + ```shell + sudo mkdir /etc/docker + cat <}} + `overlay2`는 리눅스 커널 4.0 이상 또는 3.10.0-514 버전 이상을 사용하는 RHEL 또는 CentOS를 구동하는 시스템에서 선호하는 스토리지 드라이버이다. + {{< /note >}} -```shell -# 도커 apt 리포지터리 추가: -sudo add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" -``` +3. 도커 재시작과 부팅시 실행되게 설정 -```shell -# 도커 CE 설치 -sudo apt-get update && sudo apt-get install -y \ - containerd.io=1.2.13-2 \ - docker-ce=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) \ - docker-ce-cli=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) -``` + ```shell + sudo systemctl enable docker + sudo systemctl daemon-reload + sudo systemctl restart docker + ``` -```shell -## /etc/docker 생성 -sudo mkdir /etc/docker -``` - -```shell -# 도커 데몬 설정 -cat <}} - -부팅 시 `docker` 서비스를 시작하려면, 다음 명령을 실행한다. - -```shell -sudo systemctl enable docker -``` - -자세한 내용은 [공식 도커 설치 가이드](https://docs.docker.com/engine/installation/)를 -참조한다. +{{< note >}} +더 자세한 내용은 + - [도커 데몬 설정](https://docs.docker.com/config/daemon/) + - [systemd로 도커 제어](https://docs.docker.com/config/daemon/systemd/) +{{< /note >}} diff --git a/content/ko/docs/setup/production-environment/tools/kops.md b/content/ko/docs/setup/production-environment/tools/kops.md index 4ec5386d2f1bb..9c241e496f58a 100644 --- a/content/ko/docs/setup/production-environment/tools/kops.md +++ b/content/ko/docs/setup/production-environment/tools/kops.md @@ -23,7 +23,7 @@ kops는 자동화된 프로비저닝 시스템인데, ## {{% heading "prerequisites" %}} -* [kubectl](/ko/docs/tasks/tools/install-kubectl/)을 반드시 설치해야 한다. +* [kubectl](/ko/docs/tasks/tools/)을 반드시 설치해야 한다. * 반드시 64-bit (AMD64 그리고 Intel 64)디바이스 아키텍쳐 위에서 `kops` 를 [설치](https://github.com/kubernetes/kops#installing) 한다. @@ -39,19 +39,19 @@ kops는 자동화된 프로비저닝 시스템인데, #### 설치 -[releases page](https://github.com/kubernetes/kops/releases)에서 kops를 다운로드 한다(소스코드로부터 빌드하는것도 역시 어렵지 않다). +[releases page](https://github.com/kubernetes/kops/releases)에서 kops를 다운로드한다(소스 코드로부터 빌드하는 것도 역시 편리하다). {{< tabs name="kops_installation" >}} {{% tab name="macOS" %}} -최신 버전의 릴리즈를 다운받는 명령어: +최신 버전의 릴리스를 다운받는 명령어: ```shell curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64 ``` -특정 버전을 다운로드 받는다면 명령의 다음부분을 특정 kops 버전으로 변경한다. +특정 버전을 다운로드 받는다면 명령의 다음 부분을 특정 kops 버전으로 변경한다. ```shell $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) @@ -84,7 +84,7 @@ brew update && brew install kops {{% /tab %}} {{% tab name="리눅스" %}} -최신 릴리즈를 다운로드 받는 명령어: +최신 릴리스를 다운로드 받는 명령어: ```shell curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64 @@ -147,8 +147,8 @@ Route53 hosted zone은 서브도메인도 지원한다. 여러분의 hosted zone `dev` NS 레코드를 `example.com`에 생성한다. 만약 이것이 루트 도메인 네임이라면 이 NS 레코드들은 도메인 등록기관을 통해서 생성해야 한다(예를 들어, `example.com`는 `example.com`를 구매한 곳에서 설정 할 수 있다). -이 단계에서 문제가 되기 쉽다.(문제를 만드는 가장 큰 이유이다!) dig 툴을 실행해서 -클러스터 설정이 정확한지 한번 더 확인 한다. +route53 도메인 설정을 확인한다(문제를 만드는 가장 큰 이유이다!). dig 툴을 실행해서 +클러스터 설정이 정확한지 한번 더 확인한다. `dig NS dev.example.com` diff --git a/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index 2e6252bf80ba2..358274d143313 100644 --- a/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -76,9 +76,7 @@ kind: ClusterConfiguration kubernetesVersion: v1.16.0 scheduler: extraArgs: - address: 0.0.0.0 + bind-address: 0.0.0.0 config: /home/johndoe/schedconfig.yaml kubeconfig: /home/johndoe/kubeconfig.yaml ``` - - diff --git a/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md new file mode 100644 index 0000000000000..a7ce213fdadf1 --- /dev/null +++ b/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -0,0 +1,315 @@ +--- +title: kubeadm 설치하기 +content_type: task +weight: 10 +card: + name: setup + weight: 20 + title: kubeadm 설정 도구 설치 +--- + + + +이 페이지에서는 `kubeadm` 툴박스를 설치하는 방법을 보여준다. +이 설치 프로세스를 수행한 후 kubeadm으로 클러스터를 만드는 방법에 대한 자세한 내용은 [kubeadm을 사용하여 클러스터 생성하기](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) 페이지를 참고한다. + + + +## {{% heading "prerequisites" %}} + + +* 호환되는 리눅스 머신. 쿠버네티스 프로젝트는 데비안 기반 배포판, 레드햇 기반 배포판, 그리고 패키지 매니저를 사용하지 않는 경우에 대한 일반적인 가이드를 제공한다. +* 2 GB 이상의 램을 장착한 머신. (이 보다 작으면 사용자의 앱을 위한 공간이 거의 남지 않음) +* 2 이상의 CPU. +* 클러스터의 모든 머신에 걸친 전체 네트워크 연결. (공용 또는 사설 네트워크면 괜찮음) +* 모든 노드에 대해 고유한 호스트 이름, MAC 주소 및 product_uuid. 자세한 내용은 [여기](#verify-mac-address)를 참고한다. +* 컴퓨터의 특정 포트들 개방. 자세한 내용은 [여기](#check-required-ports)를 참고한다. +* 스왑의 비활성화. kubelet이 제대로 작동하게 하려면 **반드시** 스왑을 사용하지 않도록 설정한다. + + + + + +## MAC 주소 및 product_uuid가 모든 노드에 대해 고유한지 확인 {#verify-mac-address} +* 사용자는 `ip link` 또는 `ifconfig -a` 명령을 사용하여 네트워크 인터페이스의 MAC 주소를 확인할 수 있다. +* product_uuid는 `sudo cat /sys/class/dmi/id/product_uuid` 명령을 사용하여 확인할 수 있다. + +일부 가상 머신은 동일한 값을 가질 수 있지만 하드웨어 장치는 고유한 주소를 가질 +가능성이 높다. 쿠버네티스는 이러한 값을 사용하여 클러스터의 노드를 고유하게 식별한다. +이러한 값이 각 노드에 고유하지 않으면 설치 프로세스가 +[실패](https://github.com/kubernetes/kubeadm/issues/31)할 수 있다. + +## 네트워크 어댑터 확인 + +네트워크 어댑터가 두 개 이상이고, 쿠버네티스 컴포넌트가 디폴트 라우트(default route)에서 도달할 수 없는 +경우, 쿠버네티스 클러스터 주소가 적절한 어댑터를 통해 이동하도록 IP 경로를 추가하는 것이 좋다. + +## iptables가 브리지된 트래픽을 보게 하기 + +`br_netfilter` 모듈이 로드되었는지 확인한다. `lsmod | grep br_netfilter` 를 실행하면 된다. 명시적으로 로드하려면 `sudo modprobe br_netfilter` 를 실행한다. + +리눅스 노드의 iptables가 브리지된 트래픽을 올바르게 보기 위한 요구 사항으로, `sysctl` 구성에서 `net.bridge.bridge-nf-call-iptables` 가 1로 설정되어 있는지 확인해야 한다. 다음은 예시이다. + +```bash +cat <}}을 사용한다. + +{{< tabs name="container_runtime" >}} +{{% tab name="리눅스 노드" %}} + +기본적으로, 쿠버네티스는 +{{< glossary_tooltip term_id="cri" text="컨테이너 런타임 인터페이스">}}(CRI)를 +사용하여 사용자가 선택한 컨테이너 런타임과 인터페이스한다. + +런타임을 지정하지 않으면, kubeadm은 잘 알려진 유닉스 도메인 소켓 목록을 검색하여 +설치된 컨테이너 런타임을 자동으로 감지하려고 한다. +다음 표에는 컨테이너 런타임 및 관련 소켓 경로가 나열되어 있다. + +{{< table caption = "컨테이너 런타임과 소켓 경로" >}} +| 런타임 | 유닉스 도메인 소켓 경로 | +|------------|-----------------------------------| +| 도커 | `/var/run/dockershim.sock` | +| containerd | `/run/containerd/containerd.sock` | +| CRI-O | `/var/run/crio/crio.sock` | +{{< /table >}} + +
    +도커와 containerd가 모두 감지되면 도커가 우선시된다. 이것이 필요한 이유는 도커 18.09에서 +도커만 설치한 경우에도 containerd와 함께 제공되므로 둘 다 감지될 수 있기 +때문이다. +다른 두 개 이상의 런타임이 감지되면, kubeadm은 오류와 함께 종료된다. + +kubelet은 빌트인 `dockershim` CRI 구현을 통해 도커와 통합된다. + +자세한 내용은 [컨테이너 런타임](/ko/docs/setup/production-environment/container-runtimes/)을 +참고한다. +{{% /tab %}} +{{% tab name="다른 운영 체제" %}} +기본적으로, kubeadm은 컨테이너 런타임으로 {{< glossary_tooltip term_id="docker" >}}를 사용한다. +kubelet은 빌트인 `dockershim` CRI 구현을 통해 도커와 통합된다. + +자세한 내용은 [컨테이너 런타임](/ko/docs/setup/production-environment/container-runtimes/)을 +참고한다. +{{% /tab %}} +{{< /tabs >}} + + +## kubeadm, kubelet 및 kubectl 설치 + +모든 머신에 다음 패키지들을 설치한다. + +* `kubeadm`: 클러스터를 부트스트랩하는 명령이다. + +* `kubelet`: 클러스터의 모든 머신에서 실행되는 파드와 컨테이너 시작과 + 같은 작업을 수행하는 컴포넌트이다. + +* `kubectl`: 클러스터와 통신하기 위한 커맨드 라인 유틸리티이다. + +kubeadm은 `kubelet` 또는 `kubectl` 을 설치하거나 관리하지 **않으므로**, kubeadm이 +설치하려는 쿠버네티스 컨트롤 플레인의 버전과 일치하는지 +확인해야 한다. 그렇지 않으면, 예상치 못한 버그 동작으로 이어질 수 있는 +버전 차이(skew)가 발생할 위험이 있다. 그러나, kubelet과 컨트롤 플레인 사이에 _하나의_ +마이너 버전 차이가 지원되지만, kubelet 버전은 API 서버 버전 보다 +높을 수 없다. 예를 들어, 1.7.0 버전의 kubelet은 1.8.0 API 서버와 완전히 호환되어야 하지만, +그 반대의 경우는 아니다. + +`kubectl` 설치에 대한 정보는 [kubectl 설치 및 설정](/ko/docs/tasks/tools/)을 참고한다. + +{{< warning >}} +이 지침은 모든 시스템 업그레이드에서 모든 쿠버네티스 패키지를 제외한다. +이는 kubeadm 및 쿠버네티스를 +[업그레이드 하는 데 특별한 주의](/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)가 필요하기 때문이다. +{{}} + +버전 차이에 대한 자세한 내용은 다음을 참고한다. + +* 쿠버네티스 [버전 및 버전-차이 정책](/docs/setup/release/version-skew-policy/) +* Kubeadm 관련 [버전 차이 정책](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#version-skew-policy) + +{{< tabs name="k8s_install" >}} +{{% tab name="데비안 기반 배포판" %}} + +1. `apt` 패키지 색인을 업데이트하고, 쿠버네티스 `apt` 리포지터리를 사용하는 데 필요한 패키지를 설치한다. + + ```shell + sudo apt-get update + sudo apt-get install -y apt-transport-https ca-certificates curl + ``` + +2. 구글 클라우드의 공개 사이닝 키를 다운로드 한다. + + ```shell + sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg + ``` + +3. 쿠버네티스 `apt` 리포지터리를 추가한다. + + ```shell + echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list + ``` + +4. `apt` 패키지 색인을 업데이트하고, kubelet, kubeadm, kubectl을 설치하고 해당 버전을 고정한다. + + ```shell + sudo apt-get update + sudo apt-get install -y kubelet kubeadm kubectl + sudo apt-mark hold kubelet kubeadm kubectl + ``` + +{{% /tab %}} +{{% tab name="레드햇 기반 배포판" %}} +```bash +cat <}} +`DOWNLOAD_DIR` 변수는 쓰기 가능한 디렉터리로 설정되어야 한다. +Flatcar Container Linux를 실행 중인 경우, `DOWNLOAD_DIR=/opt/bin` 을 설정한다. +{{< /note >}} + +```bash +DOWNLOAD_DIR=/usr/local/bin +sudo mkdir -p $DOWNLOAD_DIR +``` + +crictl 설치(kubeadm / Kubelet 컨테이너 런타임 인터페이스(CRI)에 필요) + +```bash +CRICTL_VERSION="v1.17.0" +curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz +``` + +`kubeadm`, `kubelet`, `kubectl` 설치 및 `kubelet` systemd 서비스 추가 + +```bash +RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" +cd $DOWNLOAD_DIR +sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} +sudo chmod +x {kubeadm,kubelet,kubectl} + +RELEASE_VERSION="v0.4.0" +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service +sudo mkdir -p /etc/systemd/system/kubelet.service.d +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +``` + +`kubelet` 활성화 및 시작 + +```bash +systemctl enable --now kubelet +``` + +{{< note >}} +Flatcar Container Linux 배포판은 `/usr` 디렉터리를 읽기 전용 파일시스템으로 마운트한다. +클러스터를 부트스트랩하기 전에, 쓰기 가능한 디렉터리를 구성하기 위한 추가 단계를 수행해야 한다. +쓰기 가능한 디렉터리를 설정하는 방법을 알아 보려면 [Kubeadm 문제 해결 가이드](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#usr-mounted-read-only/)를 참고한다. +{{< /note >}} +{{% /tab %}} +{{< /tabs >}} + + +kubelet은 이제 kubeadm이 수행할 작업을 알려 줄 때까지 크래시루프(crashloop) 상태로 +기다려야 하므로 몇 초마다 다시 시작된다. + +## cgroup 드라이버 구성 + +컨테이너 런타임과 kubelet은 +["cgroup 드라이버"](/ko/docs/setup/production-environment/container-runtimes/)라는 속성을 갖고 있으며, +cgroup 드라이버는 리눅스 머신의 cgroup 관리 측면에 있어서 중요하다. + +{{< warning >}} +컨테이너 런타임과 kubelet의 cgroup 드라이버를 일치시켜야 하며, 그렇지 않으면 kubelet 프로세스에 오류가 발생한다. + +더 자세한 사항은 [cgroup 드라이버 설정하기](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/)를 참고한다. +{{< /warning >}} + +## 문제 해결 + +kubeadm에 문제가 있는 경우, [문제 해결 문서](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/)를 참고한다. + +## {{% heading "whatsnext" %}} + +* [kubeadm을 사용하여 클러스터 생성](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) diff --git a/content/ko/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/ko/docs/setup/production-environment/tools/kubeadm/self-hosting.md deleted file mode 100644 index cfa18135b1bdb..0000000000000 --- a/content/ko/docs/setup/production-environment/tools/kubeadm/self-hosting.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -reviewers: -title: 컨트롤 플레인을 자체 호스팅하기 위해 쿠버네티스 클러스터 구성하기 -content_type: concept -weight: 100 ---- - - - -### 쿠버네티스 컨트롤 플레인 자체 호스팅하기 {#self-hosting} - -kubeadm은 실험적으로 _자체 호스팅_ 된 쿠버네티스 컨트롤 플레인을 만들 수 있도록 -해준다. API 서버, 컨트롤러 매니저 및 스케줄러와 같은 주요 구성 요소가 정적(static) 파일을 -통해 kubelet에 구성된 [스태틱(static) 파드](/ko/docs/tasks/configure-pod-container/static-pod/) -대신 쿠버네티스 API를 통해 구성된 [데몬셋(DaemonSet) 파드](/ko/docs/concepts/workloads/controllers/daemonset/) -로 실행된다. - -자체 호스팅된 클러스터를 만들려면 [kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting) -명령어를 확인한다. - - - -#### 주의사항 - -{{< caution >}} -이 기능은 클러스터를 지원되지 않는 상태로 전환하여 더 이상 클러스터를 관리할 수 없게 만든다. -이것은 `kubeadm upgrade`를 포함한다. -{{< /caution >}} - -1. 1.8 이후 버전에서 자체 호스팅은 몇 가지 중요한 한계가 있다. - 특히 자체 호스팅된 클러스터는 수동 조정 없이는 - _컨트롤 플레인 노드를 재부팅하고 나서 복구할 수 없다._ - -1. 기본적으로 자체 호스팅된 컨트롤 플레인 파드는 - [`hostPath`](/ko/docs/concepts/storage/volumes/#hostpath) 볼륨에서 불러 온 - 자격 증명에 의존한다. 초기 생성을 제외하고, 이러한 자격 증명은 kubeadm에 의해 - 관리되지 않는다. - -1. 컨트롤 플레인의 자체 호스팅된 부분에는 스태틱 파드로 실행되는 etcd가 - 포함되지 않는다. - -#### 프로세스 - -자체 호스팅 부트스트랩 프로세스는 [kubeadm 설계 -문서](https://github.com/kubernetes/kubeadm/blob/master/docs/design/design_v1.9.md#optional-self-hosting)에 기록되어 있다. - -요약하면 `kubeadm alpha selfhosting`은 다음과 같이 작동한다. - - 1. 부트스트랩 스태틱 컨트롤 플레인이 실행되고 정상 상태가 될 때까지 기다린다. - 이것은 자체 호스팅이 없는 `kubeadm init` 프로세스와 동일하다. - - 1. 스태틱 컨트롤 플레인 파드 매니페스트를 사용하여 자체 호스팅된 컨트롤 - 플레인을 실행할 데몬셋 매니페스트 집합을 구성한다. 또한 필요한 경우 - 해당 매니페스트를 수정한다. 예를 들어, 시크릿을 위한 새로운 볼륨을 - 추가한다. - - 1. `kube-system` 네임스페이스에 데몬셋을 생성하고 결과 파드가 실행될 때까지 - 대기한다. - - 1. 일단 자체 호스팅된 파드가 동작하면 관련 스태틱 파드가 삭제되고 - kubeadm은 계속해서 다음 구성 요소를 설치한다. - 이것은 kubelet이 스태틱 파드를 멈추게 한다. - - 1. 기존의 컨트롤 플레인이 멈추면 새롭게 자체 호스팅된 컨트롤 플레인은 - 리스닝 포트에 바인딩하여 활성화할 수 있다. diff --git a/content/ko/docs/setup/production-environment/tools/kubespray.md b/content/ko/docs/setup/production-environment/tools/kubespray.md index f068061a7d9b5..301c7249270b1 100644 --- a/content/ko/docs/setup/production-environment/tools/kubespray.md +++ b/content/ko/docs/setup/production-environment/tools/kubespray.md @@ -22,7 +22,7 @@ Kubespray는 [Ansible](https://docs.ansible.com/) 플레이북, [인벤토리](h * Flatcar Container Linux by Kinvolk * 지속적인 통합 (CI) 테스트 -클러스터를 설치해 줄 도구로 유스케이스와 가장 잘 맞는 것을 고르고 싶다면, kubespray를 [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), [kops](/ko/docs/setup/production-environment/tools/kops/)와 [비교한 글](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md)을 읽어보자. +클러스터를 설치해 줄 도구로 유스케이스와 가장 잘 맞는 것을 고르고 싶다면, kubespray를 [kubeadm](/ko/docs/reference/setup-tools/kubeadm/), [kops](/ko/docs/setup/production-environment/tools/kops/)와 [비교한 글](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md)을 읽어보자. @@ -68,7 +68,7 @@ Kubespray에서는 디플로이먼트의 많은 속성들을 사용자가 정의 * {{< glossary_tooltip term_id="cri-o" >}} * 인증서 생성 방법 -Kubespray의 [변수 파일들](https://docs.ansible.com/ansible/playbooks_variables.html)을 사용자가 정의할 수 있다. 만약 Kubespray를 막 시작한 경우, kubespray의 기본 설정값을 이용해 클러스터를 배포하고 Kubernetes를 탐색하는 것이 좋다. +Kubespray의 [변수 파일들](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html)을 사용자가 정의할 수 있다. 만약 Kubespray를 처음 접하는 경우, kubespray의 기본 설정값을 이용해 클러스터를 배포하고 Kubernetes를 탐색하는 것이 좋다. ### (4/5) 클러스터 배포하기 diff --git a/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index 0ea3dc0ce9063..cbe02c49e29a3 100644 --- a/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -12,7 +12,7 @@ weight: 65 ## 쿠버네티스의 윈도우 컨테이너 -쿠버네티스에서 윈도우 컨테이너 오케스트레이션을 활성화하려면, 기존 리눅스 클러스터에 윈도우 노드를 포함하기만 하면 된다. 쿠버네티스의 {{< glossary_tooltip text="파드" term_id="pod" >}}에서 윈도우 컨테이너를 스케줄링하는 것은 리눅스 기반 컨테이너를 스케줄링하는 것만큼 간단하고 쉽다. +쿠버네티스에서 윈도우 컨테이너 오케스트레이션을 활성화하려면, 기존 리눅스 클러스터에 윈도우 노드를 포함한다. 쿠버네티스의 {{< glossary_tooltip text="파드" term_id="pod" >}}에서 윈도우 컨테이너를 스케줄링하는 것은 리눅스 기반 컨테이너를 스케줄링하는 것과 유사하다. 윈도우 컨테이너를 실행하려면, 쿠버네티스 클러스터에 리눅스를 실행하는 컨트롤 플레인 노드와 사용자의 워크로드 요구에 따라 윈도우 또는 리눅스를 실행하는 워커가 있는 여러 운영 체제가 포함되어 있어야 한다. 윈도우 서버 2019는 윈도우에서 [쿠버네티스 노드](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node)를 활성화하는 유일한 윈도우 운영 체제이다(kubelet, [컨테이너 런타임](https://docs.microsoft.com/ko-kr/virtualization/windowscontainers/deploy-containers/containerd) 및 kube-proxy 포함). 윈도우 배포 채널에 대한 자세한 설명은 [Microsoft 문서](https://docs.microsoft.com/ko-kr/windows-server/get-started-19/servicing-channels-19)를 참고한다. @@ -33,7 +33,7 @@ weight: 65 쿠버네티스의 윈도우 운영 체제 지원은 다음 표를 참조한다. 단일 이기종 쿠버네티스 클러스터에는 윈도우 및 리눅스 워커 노드가 모두 있을 수 있다. 윈도우 컨테이너는 윈도우 노드에서, 리눅스 컨테이너는 리눅스 노드에서 스케줄되어야 한다. | 쿠버네티스 버전 | 윈도우 서버 LTSC 릴리스 | 윈도우 서버 SAC 릴리스 | -| --- | --- | --- | --- | +| --- | --- | --- | | *Kubernetes v1.17* | Windows Server 2019 | Windows Server ver 1809 | | *Kubernetes v1.18* | Windows Server 2019 | Windows Server ver 1809, Windows Server ver 1903, Windows Server ver 1909 | | *Kubernetes v1.19* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | @@ -218,7 +218,7 @@ CSI 노드 플러그인(특히 블록 디바이스 또는 공유 파일시스템 #### IPv4/IPv6 이중 스택 -`IPv6DualStack` [기능 게이트](https://kubernetes.io/ko/docs/reference/command-line-tools-reference/feature-gates/)를 사용하여 `l2bridge` 네트워크에 IPv4/IPv6 이중 스택 네트워킹을 활성화할 수 있다. 자세한 내용은 [IPv4/IPv6 이중 스택 활성화](/ko/docs/concepts/services-networking/dual-stack/#ipv4-ipv6-이중-스택-활성화)을 참조한다. +`IPv6DualStack` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 사용하여 `l2bridge` 네트워크에 IPv4/IPv6 이중 스택 네트워킹을 활성화할 수 있다. 자세한 내용은 [IPv4/IPv6 이중 스택 활성화](/ko/docs/concepts/services-networking/dual-stack/#ipv4-ipv6-이중-스택-활성화)를 참조한다. {{< note >}} 윈도우에서 쿠버네티스와 함께 IPv6를 사용하려면 윈도우 서버 버전 2004 (커널 버전 10.0.19041.610) 이상이 필요하다. @@ -230,23 +230,32 @@ CSI 노드 플러그인(특히 블록 디바이스 또는 공유 파일시스템 ### 제한 -#### 컨트롤 플레인 - 윈도우는 쿠버네티스 아키텍처 및 컴포넌트 매트릭스에서 워커 노드로만 지원된다. 즉, 쿠버네티스 클러스터에는 항상 리눅스 마스터 노드가 반드시 포함되어야 하고, 0개 이상의 리눅스 워커 노드 및 0개 이상의 윈도우 워커 노드가 포함된다. -#### 컴퓨트 - -##### 리소스 관리 및 프로세스 격리 +#### 자원 관리 리눅스 cgroup은 리눅스에서 리소스 제어를 위한 파드 경계로 사용된다. 컨테이너는 네트워크, 프로세스 및 파일시스템 격리를 위해 해당 경계 내에 생성된다. cgroups API는 cpu/io/memory 통계를 수집하는 데 사용할 수 있다. 반대로 윈도우는 시스템 네임스페이스 필터가 있는 컨테이너별로 잡(Job) 오브젝트를 사용하여 컨테이너의 모든 프로세스를 포함하고 호스트와의 논리적 격리를 제공한다. 네임스페이스 필터링 없이 윈도우 컨테이너를 실행할 수 있는 방법은 없다. 즉, 시스템 권한은 호스트 컨텍스트에서 삽입 될(assert) 수 없으므로 권한이 있는(privileged) 컨테이너는 윈도우에서 사용할 수 없다. 보안 계정 매니져(Security Account Manager, SAM)가 분리되어 있으므로 컨테이너는 호스트의 ID를 가정할 수 없다. -##### 운영 체제 제한 +#### 자원 예약 -윈도우에는 호스트 OS 버전이 컨테이너 베이스 이미지 OS 버전과 일치해야 하는 엄격한 호환성 규칙이 있다. 윈도우 서버 2019의 컨테이너 운영 체제가 있는 윈도우 컨테이너만 지원된다. 윈도우 컨테이너 이미지 버전의 일부 이전 버전과의 호환성을 가능하게 하는 컨테이너의 Hyper-V 격리는 향후 릴리스로 계획되어 있다. +##### 메모리 예약 +윈도우에는 리눅스에는 있는 메모리 부족 프로세스 킬러가 없다. 윈도우는 모든 사용자-모드 메모리 할당을 항상 가상 메모리처럼 처리하며, 페이지파일이 필수이다. 결과적으로 윈도우에서는 리눅스에서 발생할 수 있는 메모리 부족 상태에 도달하지 않으며, 프로세스는 메모리 부족 (out of memory, OOM) 종료를 겪는 대신 디스크로 페이징한다. 메모리가 오버프로비저닝되고 모든 물리 메모리가 고갈되면 페이징으로 인해 성능이 저하될 수 있다. + +kubelet 파라미터 `--kubelet-reserve` 를 사용하여 메모리 사용량을 합리적인 범위 내로 유지할 수 있으며, `--system-reserve` 를 사용하여 노드 (컨테이너 외부) 의 메모리 사용량을 예약할 수 있다. 이들을 사용하면 그만큼 [노드 할당(NodeAllocatable)](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)은 줄어든다. + +{{< note >}} +워크로드를 배포할 때, 컨테이너에 리소스 제한을 걸어라 (제한만 설정하거나, 제한이 요청과 같아야 함). 이 또한 NodeAllocatable 에서 차감되며, 메모리가 꽉 찬 노드에 스케줄러가 파드를 할당하지 않도록 제한한다. +{{< /note >}} + +오버프로비저닝을 방지하는 가장 좋은 방법은 윈도우, 도커, 그리고 쿠버네티스 프로세스를 위해 최소 2GB 이상의 시스템 예약 메모리로 kubelet을 설정하는 것이다. -##### 기능 제한 +##### CPU 예약 +윈도우, 도커, 그리고 다른 쿠버네티스 호스트 프로세스가 이벤트에 잘 응답할 수 있도록, CPU의 일정 비율을 예약하는 것이 좋다. 이 값은 윈도우 노드에 있는 CPU 코어 수에 따라 조정해야 한다. 이 비율을 결정하려면, 각 노드의 최대 파드 밀도(density)를 관찰하고, 시스템 서비스의 CPU 사용량을 모니터링하여 워크로드 요구사항을 충족하는 값을 선택해야 한다. -* TerminationGracePeriod: CRI-containerD 필요 +kubelet 파라미터 `--kubelet-reserve` 를 사용하여 CPU 사용량을 합리적인 범위 내로 유지할 수 있으며, `--system-reserve` 를 사용하여 노드 (컨테이너 외부) 의 CPU 사용량을 예약할 수 있다. 이들을 사용하면 그만큼 [노드 할당(NodeAllocatable)](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)은 줄어든다. + +#### 기능 제한 +* TerminationGracePeriod: 구현되지 않음 * 단일 파일 매핑: CRI-ContainerD로 구현 예정 * 종료 메시지: CRI-ContainerD로 구현 예정 * 특권을 가진(Privileged) 컨테이너: 현재 윈도우 컨테이너에서 지원되지 않음 @@ -254,15 +263,8 @@ CSI 노드 플러그인(특히 블록 디바이스 또는 공유 파일시스템 * 기존 노드 문제 감지기는 리눅스 전용이며 특권을 가진 컨테이너가 필요하다. 윈도우에서 특권을 가진 컨테이너를 지원하지 않기 때문에 일반적으로 윈도우에서 이 기능이 사용될 것으로 예상하지 않는다. * 공유 네임스페이스의 모든 기능이 지원되는 것은 아니다. (자세한 내용은 API 섹션 참조). -##### 메모리 예약 및 처리 - -윈도우에는 리눅스처럼 out-of-memory 프로세스 킬러가 없다. 윈도우는 항상 모든 사용자 모드 메모리 할당을 가상으로 처리하며 페이지 파일은 필수이다. 결과적으로 윈도우는 리눅스와 같은 방식으로 메모리 부족 상태에 도달하지 않고, 메모리 부족(OOM)으로 인한 종료 대신 페이지를 디스크로 처리한다. 메모리가 과도하게 프로비저닝되고 모든 실제 메모리가 고갈되면, 페이징으로 인해 성능이 저하될 수 있다. - -2단계 프로세스를 통해 적절한 범위 내에서 메모리 사용량을 유지할 수 있다. 먼저, kubelet 파라미터 `--kubelet-reserve` 그리고/또는 `--system-reserve`를 사용하여 노드(컨테이너 외부)의 메모리 사용량을 고려한다. 이렇게 하면 [노드 할당(NodeAllocatable)](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)이 줄어든다. 워크로드를 배포할 때 컨테이너에 리소스 제한을 사용(limits만 설정하거나 limits이 requests과 같아야 함)한다. 또한 NodeAllocatable에서 빼고 노드가 가득차면 스케줄러가 더 많은 파드를 추가하지 못하도록 한다. - -오버 프로비저닝을 방지하는 모범 사례는 윈도우, 도커 및 쿠버네티스 프로세스를 고려하여 최소 2GB의 시스템 예약 메모리로 kubelet을 구성하는 것이다. - -플래그의 동작은 아래에 설명된 대로 다르게 동작한다. +#### 각 플래그의 리눅스와의 차이점 +윈도우 노드에서의 kubelet 플래그의 동작은 아래에 설명된 대로 다르게 동작한다. * `--kubelet-reserve`, `--system-reserve`, `--eviction-hard` 플래그는 Node Allocatable 업데이트 * `--enforce-node-allocable`을 사용한 축출(Eviction)은 구현되지 않았다. @@ -294,7 +296,7 @@ CSI 노드 플러그인(특히 블록 디바이스 또는 공유 파일시스템 * NFS 기반 스토리지/볼륨 지원 * 마운트된 볼륨 확장(resizefs) -#### 네트워킹 +#### 네트워킹 {#네트워킹-제한} 윈도우 컨테이너 네트워킹은 리눅스 네트워킹과 몇 가지 중요한 면에서 다르다. [윈도우 컨테이너 네트워킹에 대한 Microsoft 문서](https://docs.microsoft.com/ko-kr/virtualization/windowscontainers/container-networking/architecture)에는 추가 세부 정보와 배경이 포함되어 있다. @@ -303,8 +305,9 @@ CSI 노드 플러그인(특히 블록 디바이스 또는 공유 파일시스템 다음 네트워킹 기능은 윈도우 노드에서 지원되지 않는다. * 윈도우 파드에서는 호스트 네트워킹 모드를 사용할 수 없다. -* 노드 자체에서 로컬 NodePort 접근은 실패한다. (다른 노드 또는 외부 클라이언트에서 작동) +* 노드 자체에서 로컬 NodePort 접근은 실패한다. (다른 노드 또는 외부 클라이언트에서는 가능) * 노드에서 서비스 VIP에 접근하는 것은 향후 윈도우 서버 릴리스에서 사용할 수 있다. +* 한 서비스는 최대 64개의 백엔드 파드 또는 고유한 목적지 IP를 지원할 수 있다. * kube-proxy의 오버레이 네트워킹 지원은 알파 릴리스이다. 또한 윈도우 서버 2019에 [KB4482887](https://support.microsoft.com/ko-kr/help/4482887/windows-10-update-kb4482887)을 설치해야 한다. * 로컬 트래픽 정책 및 DSR 모드 * l2bridge, l2tunnel 또는 오버레이 네트워크에 연결된 윈도우 컨테이너는 IPv6 스택을 통한 통신을 지원하지 않는다. 이러한 네트워크 드라이버가 IPv6 주소를 사용하고 kubelet, kube-proxy 및 CNI 플러그인에서 후속 쿠버네티스 작업을 사용할 수 있도록 하는데 필요한 뛰어난 윈도우 플랫폼 작업이 있다. @@ -361,7 +364,7 @@ SELinux, AppArmor, Seccomp, 기능(POSIX 기능)과 같은 리눅스 특유의 * ID - 리눅스는 정수형으로 표시되는 userID(UID) 및 groupID(GID)를 사용한다. 사용자와 그룹 이름은 정식 이름이 아니다. UID+GID에 대한 `/etc/groups` 또는 `/etc/passwd`의 별칭일 뿐이다. 윈도우는 윈도우 보안 계정 관리자(Security Account Manager, SAM) 데이터베이스에 저장된 더 큰 이진 보안 식별자(SID)를 사용한다. 이 데이터베이스는 호스트와 컨테이너 간에 또는 컨테이너들 간에 공유되지 않는다. * 파일 퍼미션 - 윈도우는 권한 및 UUID+GID의 비트 마스크(bitmask) 대신 SID를 기반으로 하는 접근 제어 목록을 사용한다. -* 파일 경로 - 윈도우의 규칙은 `/` 대신 `\`를 사용하는 것이다. Go IO 라이브러리는 일반적으로 두 가지를 모두 허용하고 작동하도록 하지만, 컨테이너 내부에서 해석되는 경로 또는 커맨드 라인을 설정할 때 `\`가 필요할 수 있다. +* 파일 경로 - 윈도우의 규칙은 `/` 대신 `\`를 사용하는 것이다. Go IO 라이브러리는 두 가지 파일 경로 분리자를 모두 허용한다. 하지만, 컨테이너 내부에서 해석되는 경로 또는 커맨드 라인을 설정할 때 `\`가 필요할 수 있다. * 신호(Signals) - 윈도우 대화형(interactive) 앱은 종료를 다르게 처리하며, 다음 중 하나 이상을 구현할 수 있다. * UI 스레드는 WM_CLOSE를 포함하여 잘 정의된(well-defined) 메시지를 처리한다. * 콘솔 앱은 컨트롤 핸들러(Control Handler)를 사용하여 ctrl-c 또는 ctrl-break를 처리한다. @@ -409,6 +412,10 @@ PodSecurityContext 필드는 윈도우에서 작동하지 않는다. 참조를 * V1.PodSecurityContext.SupplementalGroups - 윈도우에서는 사용할 수 없는 GID를 제공한다. * V1.PodSecurityContext.Sysctls - 이것들은 리눅스 sysctl 인터페이스의 일부이다. 윈도우에는 이에 상응하는 것이 없다. +#### 운영 체제 버전 제한 + +윈도우에는 호스트 OS 버전이 컨테이너 베이스 이미지 OS 버전과 일치해야 하는 엄격한 호환성 규칙이 있다. 윈도우 서버 2019의 컨테이너 운영 체제가 있는 윈도우 컨테이너만 지원된다. 윈도우 컨테이너 이미지 버전의 일부 이전 버전과의 호환성을 가능하게 하는 컨테이너의 Hyper-V 격리는 향후 릴리스로 계획되어 있다. + ## 도움 받기 및 트러블슈팅 {#troubleshooting} 쿠버네티스 클러스터 트러블슈팅을 위한 기본 도움말은 이 [섹션](/docs/tasks/debug-application-cluster/troubleshooting/)에서 먼저 찾아야 한다. 이 섹션에는 몇 가지 추가 윈도우 관련 트러블슈팅 도움말이 포함되어 있다. 로그는 쿠버네티스에서 트러블슈팅하는데 중요한 요소이다. 다른 기여자로부터 트러블슈팅 지원을 구할 때마다 이를 포함해야 한다. SIG-Windows [로그 수집에 대한 기여 가이드](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs)의 지침을 따른다. @@ -544,7 +551,7 @@ PodSecurityContext 필드는 윈도우에서 작동하지 않는다. 참조를 1. `start.ps1`을 시작한 후, flanneld가 "Waiting for the Network to be created"에서 멈춘다. - 이 [조사 중인 이슈](https://github.com/coreos/flannel/issues/1066)에 대한 수많은 보고가 있다. 플란넬 네트워크의 관리 IP가 설정될 때 타이밍 이슈일 가능성이 높다. 해결 방법은 간단히 start.ps1을 다시 시작하거나 다음과 같이 수동으로 다시 시작하는 것이다. + 이 [이슈](https://github.com/coreos/flannel/issues/1066)에 대한 수많은 보고가 있다. 플란넬 네트워크의 관리 IP가 설정될 때의 타이밍 이슈일 가능성이 높다. 해결 방법은 start.ps1을 다시 시작하거나 다음과 같이 수동으로 다시 시작하는 것이다. ```powershell PS C:> [Environment]::SetEnvironmentVariable("NODE_NAME", "") diff --git a/content/ko/docs/setup/release/notes.md b/content/ko/docs/setup/release/notes.md index f833d270943be..05de7fb44086a 100644 --- a/content/ko/docs/setup/release/notes.md +++ b/content/ko/docs/setup/release/notes.md @@ -1,5 +1,5 @@ --- -title: v1.20 릴리스 노트 +title: v1.21 릴리스 노트 weight: 10 card: name: release-notes @@ -13,953 +13,760 @@ card: -# v1.20.0 +# v1.21.0 [문서](https://docs.k8s.io) -## v1.20.0 다운로드 +## v1.21.0 다운로드 파일명 | sha512 해시 -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes.tar.gz) | `ebfe49552bbda02807034488967b3b62bf9e3e507d56245e298c4c19090387136572c1fca789e772a5e8a19535531d01dcedb61980e42ca7b0461d3864df2c14` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-src.tar.gz) | `bcbd67ed0bb77840828c08c6118ad0c9bf2bcda16763afaafd8731fd6ce735be654feef61e554bcc34c77c65b02a25dae565adc5e1dc49a2daaa0d115bf1efe6` +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes.tar.gz) | `19bb76a3fa5ce4b9f043b2a3a77c32365ab1fcb902d8dd6678427fb8be8f49f64a5a03dc46aaef9c7dadee05501cf83412eda46f0edacbb8fc1ed0bf5fb79142` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-src.tar.gz) | `f942e6d6c10007a6e9ce21e94df597015ae646a7bc3e515caf1a3b79f1354efb9aff59c40f2553a8e3d43fe4a01742241f5af18b69666244906ed11a22e3bc49` ### 클라이언트 바이너리 파일명 | sha512 해시 -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-darwin-amd64.tar.gz) | `3609f6483f4244676162232b3294d7a2dc40ae5bdd86a842a05aa768f5223b8f50e1d6420fd8afb2d0ce19de06e1d38e5e5b10154ba0cb71a74233e6dc94d5a0` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-386.tar.gz) | `e06c08016a08137d39804383fdc33a40bb2567aa77d88a5c3fd5b9d93f5b581c635b2c4faaa718ed3bb2d120cb14fe91649ed4469ba72c3a3dda1e343db545ed` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-amd64.tar.gz) | `081472833601aa4fa78e79239f67833aa4efcb4efe714426cd01d4ddf6f36fbf304ef7e1f5373bff0fdff44a845f7560165c093c108bd359b5ab4189f36b1f2f` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm.tar.gz) | `037f84a2f29fe62d266cab38ac5600d058cce12cbc4851bcf062fafba796c1fbe23a0c2939cd15784854ca7cd92383e5b96a11474fc71fb614b47dbf98a477d9` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm64.tar.gz) | `275727e1796791ca3cbe52aaa713a2660404eab6209466fdc1cfa8559c9b361fe55c64c6bcecbdeba536b6d56213ddf726e58adc60f959b6f77e4017834c5622` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-ppc64le.tar.gz) | `7a9965293029e9fcdb2b7387467f022d2026953b8461e6c84182abf35c28b7822d2389a6d8e4d8e532d2ea5d5d67c6fee5fb6c351363cb44c599dc8800649b04` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-s390x.tar.gz) | `85fc449ce1980f5f030cc32e8c8e2198c1cc91a448e04b15d27debc3ca56aa85d283f44b4f4e5fed26ac96904cc12808fa3e9af3d8bf823fc928befb9950d6f5` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-386.tar.gz) | `4c0a27dba1077aaee943e0eb7a787239dd697e1d968e78d1933c1e60b02d5d233d58541d5beec59807a4ffe3351d5152359e11da120bf64cacb3ee29fbc242e6` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-amd64.tar.gz) | `29336faf7c596539b8329afbbdceeddc843162501de4afee44a40616278fa1f284d8fc48c241fc7d52c65dab70f76280cc33cec419c8c5dbc2625d9175534af8` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-amd64.tar.gz) | `be9d1440e418e5253fb8a3d8aba705ca8160746a9bd17325ad626a986b6da9f733af864155a651a32b7bca94b533b8d596005ddbe5248bdeea85db47a1b957ed` +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-arm64.tar.gz) | `eed0ddc81d104bb2d41ace13f737c490423d5df4ebddc7376e45c18ed66af35933c9376b912c1c3da105945b04056f6ca0870c156bee8a307cf4189ca5eb1dd1` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-386.tar.gz) | `8a2f30c4434199762f2a96141dab4241c1cce2711bea9ea39cc63c2c5e7d31719ed7f076efac1931604e3a94578d3bbf0cfa454965708c96f3cfb91789868746` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-amd64.tar.gz) | `cd3cfa645fa31de3716f1f63506e31b73d2aa8d37bb558bb3b3e8c151f35b3d74d44e03cbd05be67e380f9a5d015aba460222afdac6677815cd99a85c2325cf0` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm.tar.gz) | `936042aa11cea0f6dfd2c30fc5dbe655420b34799bede036b1299a92d6831f589ca10290b73b9c9741560b603ae31e450ad024e273f2b4df5354bfac272691d8` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm64.tar.gz) | `42beb75364d7bf4bf526804b8a35bd0ab3e124b712e9d1f45c1b914e6be0166619b30695feb24b3eecef134991dacb9ab3597e788bd9e45cf35addddf20dd7f6` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-ppc64le.tar.gz) | `4baba2ed7046b28370eccc22e2378ae79e3ce58220d6f4f1b6791e8233bec8379e30200bb20b971456b83f2b791ea166fdfcf1ea56908bc1eea03590c0eda468` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-s390x.tar.gz) | `37fa0c4d703aef09ce68c10ef3e7362b0313c8f251ce38eea579cd18fae4023d3d2b70e0f31577cabe6958ab9cfc30e98d25a7c64e69048b423057c3cf728339` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-386.tar.gz) | `6900db36c1e3340edfd6dfd8d720575a904c932d39a8a7fa36401595e971a0235bd42111dbcc1cbb77e7374e47f1380a68c637997c18f96a0d9cdc9f3714c4c9` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-amd64.tar.gz) | `90de67f6f79fc63bcfdf35066e3d84501cc85433265ffad36fd1a7a428a31b446249f0644a1e97495ea8b2a08e6944df6ef30363003750339edaa2aceffe937c` ### 서버 바이너리 파일명 | sha512 해시 -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-amd64.tar.gz) | `fb56486a55dbf7dbacb53b1aaa690bae18d33d244c72a1e2dc95fb0fcce45108c44ba79f8fa04f12383801c46813dc33d2d0eb2203035cdce1078871595e446e` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm.tar.gz) | `735ed9993071fe35b292bf06930ee3c0f889e3c7edb983195b1c8e4d7113047c12c0f8281fe71879fc2fcd871e1ee587f03b695a03c8512c873abad444997a19` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm64.tar.gz) | `ffab155531d5a9b82487ee1abf4f6ef49626ea58b2de340656a762e46cf3e0f470bdbe7821210901fe1114224957c44c1d9cc1e32efb5ee24e51fe63990785b2` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-ppc64le.tar.gz) | `9d5730d35c4ddfb4c5483173629fe55df35d1e535d96f02459468220ac2c97dc01b995f577432a6e4d1548b6edbfdc90828dc9c1f7cf7464481af6ae10aaf118` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-s390x.tar.gz) | `6e4c165306940e8b99dd6e590f8542e31aed23d2c7a6808af0357fa425cec1a57016dd66169cf2a95f8eb8ef70e1f29e2d500533aae889e2e3d9290d04ab8721` +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-amd64.tar.gz) | `3941dcc2309ac19ec185603a79f5a086d8a198f98c04efa23f15a177e5e1f34946ea9392ba9f5d24d0d727839438f067fef1001fc6e88b27b8b01e35bbd962ca` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm.tar.gz) | `6507abf6c2ec2b336901dc23269f6c577ec0049b8bad3c9dd6ad63f21aa10f09bfbbfa6e064c2466d250411d3e10f8672791a9e10942e38de7bfbaf7a8bcc9da` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm64.tar.gz) | `5abe76f867ca6865344e957bf166b81766c049ec4eb183a8a5580c22a7f8474db1edf90fd901a5833e56128b6825811653a1d27f72fd34ce5b1287a8c10da05c` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-ppc64le.tar.gz) | `62507b182ca25396a285d91241536860e58f54fac937e97cbdf91948c83bb41be97d33277400489bf50e85164d560205540b76e94e5d519892312bdc63df1067` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-s390x.tar.gz) | `04f2a1f7d1388e4a7d7d9f597f872a3da36f26839cfed16aad6df07021c03f4dca1df06b19cfda56df09d1c2d9a13ebd0af40ca1b9b6aecfaf427ab7712d88f3` ### 노드 바이너리 파일명 | sha512 해시 -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-amd64.tar.gz) | `3e6c90561dd1c27fa1dff6953c503251c36001f7e0f8eff3ec918c74ae2d9aa25917d8ac87d5b4224b8229f620b1830442e6dce3b2a497043f8497eee3705696` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm.tar.gz) | `26db385d9ae9a97a1051a638e7e3de22c4bbff389d5a419fe40d5893f9e4fa85c8b60a2bd1d370fd381b60c3ca33c5d72d4767c90898caa9dbd4df6bd116a247` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm64.tar.gz) | `5b8b63f617e248432b7eb913285a8ef8ba028255216332c05db949666c3f9e9cb9f4c393bbd68d00369bda77abf9bfa2da254a5c9fe0d79ffdad855a77a9d8ed` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-ppc64le.tar.gz) | `60da7715996b4865e390640525d6e98593ba3cd45c6caeea763aa5355a7f989926da54f58cc5f657f614c8134f97cd3894b899f8b467d100dca48bc22dd4ff63` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-s390x.tar.gz) | `9407dc55412bd04633f84fcefe3a1074f3eaa772a7cb9302242b8768d6189b75d37677a959f91130e8ad9dc590f9ba8408ba6700a0ceff6827315226dd5ee1e6` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-windows-amd64.tar.gz) | `9d4261af343cc330e6359582f80dbd6efb57d41f882747a94bbf47b4f93292d43dd19a86214d4944d268941622dfbc96847585e6fec15fddc4dbd93d17015fa8` +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-amd64.tar.gz) | `c1831c708109c31b3878e5a9327ea4b9e546504d0b6b00f3d43db78b5dd7d5114d32ac24a9a505f9cadbe61521f0419933348d2cd309ed8cfe3987d9ca8a7e2c` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm.tar.gz) | `b68dd5bcfc7f9ce2781952df40c8c3a64c29701beff6ac22f042d6f31d4de220e9200b7e8272ddf608114327770acdaf3cb9a34a0a5206e784bda717ea080e0f` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm64.tar.gz) | `7fa84fc500c28774ed25ca34b6f7b208a2bea29d6e8379f84b9f57bd024aa8fe574418cee7ee26edd55310716d43d65ae7b9cbe11e40c995fe2eac7f66bdb423` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-ppc64le.tar.gz) | `a4278b3f8e458e9581e01f0c5ba8443303c987988ee136075a8f2f25515d70ca549fbd2e4d10eefca816c75c381d62d71494bd70c47034ab47f8315bbef4ae37` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-s390x.tar.gz) | `8de2bc6f22f232ff534b45012986eac23893581ccb6c45bd637e40dbe808ce31d5a92375c00dc578bdbadec342b6e5b70c1b9f3d3a7bb26ccfde97d71f9bf84a` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-windows-amd64.tar.gz) | `b82e94663d330cff7a117f99a7544f27d0bc92b36b5a283b3c23725d5b33e6f15e0ebf784627638f22f2d58c58c0c2b618ddfd226a64ae779693a0861475d355` -## v1.19.0 이후 변경로그(Changelog) +## v1.20.0 이후 변경로그 (Changelog) -## 새로운 소식(주요 테마) +# v1.21.0-rc.0 릴리스 노트 -### Dockershim 사용 중단(deprecation) +[문서](https://docs.k8s.io/docs/home) -Docker as an underlying runtime is being deprecated. Docker-produced images will continue to work in your cluster with all runtimes, as they always have. -The Kubernetes community [has written a blog post about this in detail](https://blog.k8s.io/2020/12/02/dont-panic-kubernetes-and-docker/) with [a dedicated FAQ page for it](https://blog.k8s.io/2020/12/02/dockershim-faq/). +# v1.20.0 이후 변경로그 (Changelog) -### client-go를 위한 외부 자격증명(credential) 제공자 +## 새로운 소식 (주요 테마) -The client-go credential plugins can now be passed in the current cluster information via the `KUBERNETES_EXEC_INFO` environment variable. Learn more about this on [client-go credential plugins documentation](https://docs.k8s.io/reference/access-authn-authz/authentication/#client-go-credential-plugins/). +### Deprecation of PodSecurityPolicy -### 기능 게이트(feature gate)를 통해 크론잡(CronJob) 컨트롤러 v2 활성화 가능 +PSP as an admission controller resource is being deprecated. Deployed PodSecurityPolicy's will keep working until version 1.25, their target removal from the codebase. A new feature, with a working title of "PSP replacement policy", is being developed in [KEP-2579](https://features.k8s.io/2579). To learn more, read [PodSecurityPolicy Deprecation: Past, Present, and Future](https://blog.k8s.io/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). -An alternative implementation of `CronJob` controller is now available as an alpha feature in this release, which has experimental performance improvement by using informers instead of polling. While this will be the default behavior in the future, you can [try them in this release through a feature gate](https://docs.k8s.io/concepts/workloads/controllers/cron-jobs/). +### Kubernetes API Reference Documentation -### PID 제한(PID Limits)이 안정 기능(General Availability)으로 전환 +The API reference is now generated with [`gen-resourcesdocs`](https://github.com/kubernetes-sigs/reference-docs/tree/c96658d89fb21037b7d00d27e6dbbe6b32375837/gen-resourcesdocs) and it is moving to [Kubernetes API](https://docs.k8s.io/reference/kubernetes-api/) -PID Limits features are now generally available on both `SupportNodePidsLimit` (node-to-pod PID isolation) and `SupportPodPidsLimit` (ability to limit PIDs per pod), after being enabled-by-default in beta stage for a year. +### Kustomize Updates in Kubectl -### API 우선순위 및 공정성(API Priority and Fairness)이 베타 단계로 전환 +[Kustomize](https://github.com/kubernetes-sigs/kustomize) version in kubectl had a jump from v2.0.3 to [v4.0.5](https://github.com/kubernetes/kubernetes/pull/98946). Kustomize is now treated as a library and future updates will be less sporadic. -Initially introduced in 1.18, Kubernetes 1.20 now enables API Priority and Fairness (APF) by default. This allows `kube-apiserver` to [categorize incoming requests by priority levels](https://docs.k8s.io/concepts/cluster-administration/flow-control/). +### Default Container Labels -### IPv4/IPv6이 작동 +Pod with multiple containers can use `kubectl.kubernetes.io/default-container` label to have a container preselected for kubectl commands. More can be read in [KEP-2227](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/2227-kubectl-default-container/README.md). -IPv4/IPv6 dual-stack has been reimplemented for 1.20 to support dual-stack Services, based on user and community feedback. If your cluster has dual-stack enabled, you can create Services which can use IPv4, IPv6, or both, and you can change this setting for existing Services. Details are available in updated [IPv4/IPv6 dual-stack docs](https://docs.k8s.io/concepts/services-networking/dual-stack/), which cover the nuanced array of options. +### Immutable Secrets and ConfigMaps -We expect this implementation to progress from alpha to beta and GA in coming releases, so we’re eager to have you comment about your dual-stack experiences in [#k8s-dual-stack](https://kubernetes.slack.com/messages/k8s-dual-stack) or in [enhancements #563](https://features.k8s.io/563). +Immutable Secrets and ConfigMaps graduates to GA. This feature allows users to specify that the contents of a particular Secret or ConfigMap is immutable for its object lifetime. For such instances, Kubelet will not watch/poll for changes and therefore reducing apiserver load. -### go1.15.5 +### Structured Logging in Kubelet -go1.15.5 has been integrated to Kubernets project as of this release, [including other infrastructure related updates on this effort](https://github.com/kubernetes/kubernetes/pull/95776). +Kubelet has adopted structured logging, thanks to community effort in accomplishing this within the release timeline. Structured logging in the project remains an ongoing effort -- for folks interested in participating, [keep an eye / chime in to the mailing list discussion](https://groups.google.com/g/kubernetes-dev/c/y4WIw-ntUR8). -### CSI 볼륨 스냅샷(CSI Volume Snapshot)이 안정 기능으로 전환 +### Storage Capacity Tracking -CSI Volume Snapshot moves to GA in the 1.20 release. This feature provides a standard way to trigger volume snapshot operations in Kubernetes and allows Kubernetes users to incorporate snapshot operations in a portable manner on any Kubernetes environment regardless of supporting underlying storage providers. -Additionally, these Kubernetes snapshot primitives act as basic building blocks that unlock the ability to develop advanced, enterprise grade, storage administration features for Kubernetes: including application or cluster level backup solutions. -Note that snapshot support will require Kubernetes distributors to bundle the Snapshot controller, Snapshot CRDs, and validation webhook. In addition, a CSI driver supporting the snapshot functionality must also be deployed on the cluster. +Traditionally, the Kubernetes scheduler was based on the assumptions that additional persistent storage is available everywhere in the cluster and has infinite capacity. Topology constraints addressed the first point, but up to now pod scheduling was still done without considering that the remaining storage capacity may not be enough to start a new pod. [Storage capacity tracking](https://docs.k8s.io/concepts/storage/storage-capacity/) addresses that by adding an API for a CSI driver to report storage capacity and uses that information in the Kubernetes scheduler when choosing a node for a pod. This feature serves as a stepping stone for supporting dynamic provisioning for local volumes and other volume types that are more capacity constrained. -### 비재귀적 볼륨 소유(Non-recursive Volume Ownership (FSGroup))가 베타 단계로 전환 +### Generic Ephemeral Volumes -By default, the `fsgroup` setting, if specified, recursively updates permissions for every file in a volume on every mount. This can make mount, and pod startup, very slow if the volume has many files. -This setting enables a pod to specify a `PodFSGroupChangePolicy` that indicates that volume ownership and permissions will be changed only when permission and ownership of the root directory does not match with expected permissions on the volume. +[Generic ephermeral volumes](https://docs.k8s.io/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) feature allows any existing storage driver that supports dynamic provisioning to be used as an ephemeral volume with the volume’s lifecycle bound to the Pod. It can be used to provide scratch storage that is different from the root disk, for example persistent memory, or a separate local disk on that node. All StorageClass parameters for volume provisioning are supported. All features supported with PersistentVolumeClaims are supported, such as storage capacity tracking, snapshots and restore, and volume resizing. -### FSGroup를 위한 CSIDriver 정책이 베타 단계로 전환 +### CSI Service Account Token -The FSGroup's CSIDriver Policy is now beta in 1.20. This allows CSIDrivers to explicitly indicate if they want Kubernetes to manage permissions and ownership for their volumes via `fsgroup`. +CSI Service Account Token feature moves to Beta in 1.21. This feature improves the security posture and allows CSI drivers to receive pods' [bound service account tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md). This feature also provides a knob to re-publish volumes so that short-lived volumes can be refreshed. -### CSI 드라이버의 보안성 향상(알파) +### CSI Health Monitoring -In 1.20, we introduce a new alpha feature `CSIServiceAccountToken`. This feature allows CSI drivers to impersonate the pods that they mount the volumes for. This improves the security posture in the mounting process where the volumes are ACL’ed on the pods’ service account without handing out unnecessary permissions to the CSI drivers’ service account. This feature is especially important for secret-handling CSI drivers, such as the secrets-store-csi-driver. Since these tokens can be rotated and short-lived, this feature also provides a knob for CSI drivers to receive `NodePublishVolume` RPC calls periodically with the new token. This knob is also useful when volumes are short-lived, e.g. certificates. - -### 그레이스풀 노드 종료(Graceful Node Shutdown) 기능 소개(알파) - -The `GracefulNodeShutdown` feature is now in Alpha. This allows kubelet to be aware of node system shutdowns, enabling graceful termination of pods during a system shutdown. This feature can be [enabled through feature gate](https://docs.k8s.io/concepts/architecture/nodes/#graceful-node-shutdown). - -### 런타임 로그 관리(sanitation) - -Logs can now be configured to use runtime protection from leaking sensitive data. [Details for this experimental feature is available in documentation](https://docs.k8s.io/concepts/cluster-administration/system-logs/#log-sanitization). - -### 파드 리소스 메트릭 - -On-demand metrics calculation is now available through `/metrics/resources`. [When enabled]( -https://docs.k8s.io/concepts/cluster-administration/system-metrics#kube-scheduler-metrics), the endpoint will report the requested resources and the desired limits of all running pods. - -### `RootCAConfigMap` 소개 - -`RootCAConfigMap` graduates to Beta, seperating from `BoundServiceAccountTokenVolume`. The `kube-root-ca.crt` ConfigMap is now available to every namespace, by default. It contains the Certificate Authority bundle for verify kube-apiserver connections. - -### `kubectl debug` 이 베타 단계로 전환 - -`kubectl alpha debug` graduates from alpha to beta in 1.20, becoming `kubectl debug`. -`kubectl debug` provides support for common debugging workflows directly from kubectl. Troubleshooting scenarios supported in this release of `kubectl` include: -Troubleshoot workloads that crash on startup by creating a copy of the pod that uses a different container image or command. -Troubleshoot distroless containers by adding a new container with debugging tools, either in a new copy of the pod or using an ephemeral container. (Ephemeral containers are an alpha feature that are not enabled by default.) -Troubleshoot on a node by creating a container running in the host namespaces and with access to the host’s filesystem. -Note that as a new builtin command, `kubectl debug` takes priority over any `kubectl` plugin named “debug”. You will need to rename the affected plugin. -Invocations using `kubectl alpha debug` are now deprecated and will be removed in a subsequent release. Update your scripts to use `kubectl debug` instead of `kubectl alpha debug`! -For more information about kubectl debug, see Debugging Running Pods on the Kubernetes website, kubectl help debug, or reach out to SIG CLI by visiting #sig-cli or commenting on [enhancement #1441](https://features.k8s.io/1441). - -### kubeadm에서 사용 중단된 플래그 삭제 - -`kubeadm` applies a number of deprecations and removals of deprecated features in this release. More details are available in the Urgent Upgrade Notes and Kind / Deprecation sections. - -### 파드의 호스트네임을 FQDN으로 사용하는 것이 베타 단계로 전환 - -Previously introduced in 1.19 behind a feature gate, `SetHostnameAsFQDN` is now enabled by default. More details on this behavior is available in [documentation for DNS for Services and Pods](https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field) - -### `TokenRequest` / `TokenRequestProjection` 이 안정 기능으로 전환 - -Service account tokens bound to pod is now a stable feature. The feature gates will be removed in 1.21 release. For more information, refer to notes below on the changelogs. - -### 런타임클래스(RuntimeClass)가 안정 기능으로 전환 - -The `node.k8s.io` API groups are promoted from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] - -### 클라우드 컨트롤러 관리자(Cloud Controller Manager)가 이제 각 클라우드 공급자를 통해서만 제공 - -Kubernetes will no longer ship an instance of the Cloud Controller Manager binary. Each Cloud Provider is expected to ship their own instance of this binary. Details for a Cloud Provider to create an instance of such a binary can be found under [here](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/cloud-provider/sample). Anyone with questions on building a Cloud Controller Manager should reach out to SIG Cloud Provider. Questions about the Cloud Controller Manager on a Managed Kubernetes solution should go to the relevant Cloud Provider. Questions about the Cloud Controller Manager on a non managed solution can be brought up with SIG Cloud Provider. +The CSI health monitoring feature is being released as a second Alpha in Kubernetes 1.21. This feature enables CSI Drivers to share abnormal volume conditions from the underlying storage systems with Kubernetes so that they can be reported as events on PVCs or Pods. This feature serves as a stepping stone towards programmatic detection and resolution of individual volume health issues by Kubernetes. ## 알려진 이슈 -### kubelet의 요약(Summary) API는 가속기(accelerator) 메트릭을 가지고 있지 않음 -Currently, cadvisor_stats_provider provides AcceleratorStats but cri_stats_provider does not. As a result, when using cri_stats_provider, kubelet's Summary API does not have accelerator metrics. [There is an open work in progress to fix this](https://github.com/kubernetes/kubernetes/pull/96873). +### `TopologyAwareHints` feature falls back to default behavior + +The feature gate currently falls back to the default behavior in most cases. Enabling the feature gate will add hints to `EndpointSlices`, but functional differences are only observed in non-dual stack kube-proxy implementation. [The fix will be available in coming releases](https://github.com/kubernetes/kubernetes/pull/100804). ## 긴급 업그레이드 노트 ### (주의. 업그레이드 전에 반드시 읽어야 함) -- A bug was fixed in kubelet where exec probe timeouts were not respected. This may result in unexpected behavior since the default timeout (if not specified) is `1s` which may be too small for some exec probes. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. See [configure probe](https://docs.k8s.io/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) section of the documentation for more details. - - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the `ExecProbeTimeout` feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] -- RuntimeClass feature graduates to General Availability. Promote `node.k8s.io` API groups from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- API priority and fairness graduated to beta. 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Kubeadm: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - -- Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] -- The deprecated flag --experimental-kustomize is now removed from kubeadm commands. Use --experimental-patches instead, which was introduced in 1.19. Migration infromation available in --help description for --exprimental-patches. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) -- Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42), [SIG API Machinery, Node, Testing]) -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- `TokenRequest` and `TokenRequestProjection` are now GA features. The following flags are required by the API server: - - `--service-account-issuer`, should be set to a URL identifying the API server that will be stable over the cluster lifetime. - - `--service-account-key-file`, set to one or more files containing one or more public keys used to verify tokens. - - `--service-account-signing-key-file`, set to a file containing a private key to use to sign service account tokens. Can be the same file given to `kube-controller-manager` with `--service-account-private-key-file`. ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle] -- kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] - +- Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Remove alpha `CSIMigrationXXComplete` flag and add alpha `InTreePluginXXUnregister` flag. Deprecate `CSIMigrationvSphereComplete` flag and it will be removed in v1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the `storage_operation_duration_seconds` metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) + - `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior` features have been promoted to GA. `ServiceNodeExclusion` and `NodeDisruptionExclusion` are now unconditionally enabled, while `LegacyNodeRoleBehavior` is unconditionally disabled. To prevent control plane nodes from being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) ## 종류(Kind)별 변경 사항 ### 사용 중단 -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] - +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm: deprecated command "alpha selfhosting pivot" is now removed. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Remove deprecated `--generator, --replicas, --service-generator, --service-overrides, --schedule` from `kubectl run` + Deprecate `--serviceaccount, --hostport, --requests, --limits` in `kubectl run` ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- The GA TokenRequest and TokenRequestProjection feature gates have been removed and are unconditionally enabled. Remove explicit use of those feature gates in CLI invocations. ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `discovery.k8s.io/v1beta1` EndpointSlices are deprecated in favor of `discovery.k8s.io/v1`, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) +- `diskformat` storage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) ### API 변경 -- `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - - kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a `serving` and `terminating` condition to the EndpointSlice API. - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fewer candidates are enumerated for preemption to improve performance in large clusters. ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- GPU metrics provided by kubelet are now disabled by default. ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The status of v1beta1 CRDs without "preserveUnknownFields:false" now shows a violation, "spec.preserveUnknownFields: Invalid value: true: must be false". ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBService is enabled. The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] - -### 기능(feature) - -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add foreground cascading deletion to kubectl with the new `kubectl delete foreground|background|orphan` option. ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation. ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- Configure AWS LoadBalancer health check protocol via service annotations. ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] -- Kubeadm now prints warnings instead of throwing errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. - - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: [@mikedanese](https://github.com/mikedanese): - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. - - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. - - - make kube::util::find-binary not dependent on bazel-out/ structure - - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node-to-pod PID isolation. - Promote SupportPodPidsLimit to GA to provide ability to limit PIDs per pod. ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Send GCE node startup scripts logs to console and journal. ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric buckets were set to 0.005 0.0125 0.03125 0.078125 0.1953125 0.48828125 1.220703125 3.0517578125 7.62939453125 19.073486328125 47.6837158203125 119.20928955078125 298.0232238769531 and 745.0580596923828 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed`. This is an alpha field and is only honored by servers with the `IndexedJob` feature gate enabled. ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Adds support for endPort field in NetworkPolicy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- CSIServiceAccountToken graduates to Beta and enabled by default. ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) +- Cluster admins can now turn off `/debug/pprof` and `/debug/flags/v` endpoint in kubelet by setting `enableProfilingHandler` and `enableDebugFlagsHandler` to `false` in the Kubelet configuration file. Options `enableProfilingHandler` and `enableDebugFlagsHandler` can be set to `true` only when `enableDebuggingHandlers` is also set to `true`. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl portforward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Fixes server-side apply for APIService resources. ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Hugepages request values are limited to integer multiples of the page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration (previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) +- Jobs API has a new `.spec.suspend` field that can be used to suspend and resume Jobs. This is an alpha field which is only honored by servers with the `SuspendJob` feature gate enabled. ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) +- Kubelet Graceful Node Shutdown feature graduates to Beta and enabled by default. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Namespace API objects now have a `kubernetes.io/metadata.name` label matching their metadata.name field to allow selecting any namespace by its name using a label selector. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodDisruptionBudget API objects can now contain conditions in status. ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Promote Immutable Secrets/ConfigMaps feature to Stable. This allows to set `immutable` field in Secret or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] +- Services can specify loadBalancerClass to use a custom load balancer ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) +- Storage capacity tracking (= the CSIStorageCapacity feature) graduates to Beta and enabled by default, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] +- The EndpointSlice Controllers are now GA. The `EndpointSliceController` will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The `EndpointSlice` API is now GA. The `EndpointSlice` topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The `discovery.k8s.io/v1alpha1` API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) +- The `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a `Pod` compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) +- The kube-apiserver now resets `managedFields` that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#97099](https://github.com/kubernetes/kubernetes/pull/97099), [@pacoxu](https://github.com/pacoxu)) [SIG CLI] + +### 기능 (Feature) + +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- A new histogram metric to track the time it took to delete a job by the `TTLAfterFinished` controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) +- AWS cloud provider supports auto-discovering subnets without any `kubernetes.io/cluster/` tags. It also supports additional service annotation `service.beta.kubernetes.io/aws-load-balancer-subnets` to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Add --permit-address-sharing flag to `kube-apiserver` to listen with `SO_REUSEADDR`. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoids waiting for the kernel to release socket in `TIME_WAIT` state, and hence, considerably reducing `kube-apiserver` restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Add metric etcd_lease_object_counts for kube-apiserver to observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Added support for installing `arm64` node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, as well as a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- CRIContainerLogRotation graduates to GA and unconditionally enabled. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99385](https://github.com/kubernetes/kubernetes/pull/99385), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Release] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- EndpointSliceNodeName graduates to GA and thus will be unconditionally enabled -- NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) +- Export `NewDebuggingRoundTripper` function and `DebugLevel` options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: IPv6DualStack feature gate graduates to Beta and enabled by default ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: perform pre-flight validation on host/node name upon `kubeadm init` and `kubeadm join`, showing warnings on non-compliant names ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubectl: Add `--use-protocol-buffers` flag to kubectl top pods and nodes. ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Kubectl: a Pod can be preselected as default container using `kubectl.kubernetes.io/default-container` annotation ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) +- Kubectl: add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=metric1,metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) +- New admission controller `DenyServiceExternalIPs` is available. Clusters which do not *need* the Service `externalIPs` feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Port the devicemanager to Windows node to allow device plugins like directx ([#93285](https://github.com/kubernetes/kubernetes/pull/93285), [@aarnaud](https://github.com/aarnaud)) [SIG Node, Testing and Windows] +- Removes cAdvisor JSON metrics (/stats/container, /stats//, /stats////) from the kubelet. ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Sysctls graduates to General Availability and thus unconditionally enabled. ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The `CronJobControllerV2` feature flag graduates to Beta and set to be enabled by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) +- The `EndpointSlice` mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation `endpoints.kubernetes.io/last-change-trigger-time` is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] +- The `TTLAfterFinished` feature flag is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` takes into consideration global IP addresses on loopback interfaces when 1) the host has default routes, or 2) there are no global IPs on those interfaces in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] +- The feature gate `RootCAConfigMap` graduated to GA in v1.21 and therefore will be unconditionally enabled. This flag will be removed in v1.22 release. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) +- The pause image upgraded to `v3.4.1` in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. New clusters or existing clusters are not be affected until an actor starts adding secondary Pods and service CIDRS CLI flags as described here: [IPv4/IPv6 Dual-stack](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack) ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) +- Users might specify the `kubectl.kubernetes.io/default-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] +- `kubectl wait` ensures that observedGeneration >= generation to prevent stale state reporting. An example scenario can be found on CRD updates. ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) ### 문서 -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- Azure file migration graduates to beta, with CSIMigrationAzureFile flag off by default + as it requires installation of AzureFile CSI Driver. Users should enable CSIMigration and + CSIMigrationAzureFile features and install the [AzureFile CSI Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) + to avoid disruption to existing Pod and PVC objects at that time. Azure File CSI driver does not support using same persistent + volume with different fsgroups. When CSI migration is enabled for azurefile driver, such case is not supported. + (there is a case we support where volume is mounted with 0777 and then it readable/writable by everyone) ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### 실패 테스트 -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP `NodePort` Services where stale connection tracking entries may blackhole the traffic directed to the `NodePort` ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) +- Kubelet: fixes a bug in the HostPort dockershim implementation that caused the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### 버그 또는 회귀(regression) -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure armclient backoff step defaults to 1 (no retry). ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp fields of an audit event now take into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) -- Build/lib/release: Explicitly use '--platform' in building server images - - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. - - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. - - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. - - Before: - `FROM ${base_image}` - - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- Ceph RBD volume expansion now works even when ceph.conf was not provided. ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Change the calculation of pod UIDs so that static pods get a unique value - will cause all containers to be killed and recreated after in-place upgrade. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Endpoint slice controller now mirrors parent's service label to its corresponding endpoint slices. ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix a bug where loadbalancer deletion gets stuck because of missing resource group. ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a panic in `kubectl debug` when a pod has multiple init or ephemeral containers. ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix race condition on timeCache locks. ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) -- Fix regression on `kubectl portforward` when TCP and UCP services were configured on the same port. ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Increase maximum IOPS of AWS EBS io1 volume to current maximum (64,000). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kubeadm ensures "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user. ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm now warns (instead of error out) on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Port mapping now allows the same `containerPort` of different containers to different `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Recreate EndpointSlices on rapid Service creation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- StatefulSet Controller now waits for PersistentVolumeClaim deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) -- StreamWatcher now calls HandleCrash at appropriate sequence. ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] -- Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- All data is no longer automatically deleted when a failure is detected during creation of the volume data file on a CSI volume. Now only the data file and volume path is removed. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Client-go exec credential plugins will pass stdin only when interactive terminal is detected on stdin. This fixes a bug where previously it was checking if **stdout** is an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Cluster Autoscaler version bump to v1.20.0 ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure all vSphere nodes are are tracked by volume attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) +- Ensure empty string annotations are copied over in rollbacks. ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Explicitly pass `KUBE_BUILD_CONFORMANCE=y` in `package-tarballs` to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) +- Fix Azure file migration e2e test failure when CSIMigration is turned on. ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) +- Fix CSI-migrated inline EBS volumes failing to mount if their volumeID is prefixed by aws:// ([#96821](https://github.com/kubernetes/kubernetes/pull/96821), [@wongma7](https://github.com/wongma7)) [SIG Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix PermissionDenied issue on SMB mount for Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fix kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix privileged config of Pod Sandbox which was previously ignored. ([#96877](https://github.com/kubernetes/kubernetes/pull/96877), [@xeniumlee](https://github.com/xeniumlee)) +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed authentication_duration_seconds metric scope. Previously, it included whole apiserver request duration which yields inaccurate results. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a bug of identifying the correct containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) +- Fixes add-on manager leader election to use leases instead of endpoints, similar to what kube-controller-manager does in 1.20 ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- Fixes problem where invalid selector on `PodDisruptionBudget` leads to a nil pointer dereference that causes the Controller manager to crash loop. ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) +- Fixes spurious errors about IPv6 in `kube-proxy` logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm installs etcd v3.4.13 when creating cluster v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: Fixes a kubeadm upgrade bug that could cause a custom CoreDNS configuration to be replaced with the default. ([#97016](https://github.com/kubernetes/kubernetes/pull/97016), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl logs: `--ignore-errors` is now honored by all containers, maintaining consistency with parallelConsumeRequest behavior. ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) +- Kubectl-convert: Fix `no kind "Ingress" is registered for version` error ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fix repeatedly acquiring the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Limits lease to have 1000 maximum attached objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Performance regression #97685 has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] +- Pod status updates faster when reacting on probe results. The first readiness probe will be called faster when startup probes succeeded, which will make Pod status as ready faster. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) +- Readjust `kubelet_containers_per_pod_count` buckets to only show metrics greater than 1. ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Requests with invalid timeout parameters in the request URL now appear in the audit log correctly. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Resolve a "concurrent map read and map write" crashing error in the kubelet ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- ResourceQuota of an entity now inclusively calculate Pod overhead ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using `dockershim` as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) +- Reverts breaking change to inline AzureFile volumes; referenced secrets are now searched for in the same namespace as the pod as in previous releases. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- The calculation of pod UIDs for static pods has changed to ensure each static pod gets a unique value - this will cause all static pod containers to be recreated/restarted if an in-place kubelet upgrade from 1.20 to 1.21 is performed. Note that draining pods before upgrading the kubelet across minor versions is the supported upgrade path. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Truncates a message if it hits the `NoteLengthLimit` when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) +- Updated k8s.gcr.io/ingress-gce-404-server-with-metrics-amd64 to a version that serves /metrics endpoint on a non-default port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Updates the commands ` + - kubectl kustomize {arg} + - kubectl apply -k {arg} + `to use same code as kustomize CLI [v4.0.5](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5) ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] +- When using `Containerd` on Windows, the `C:\Windows\System32\drivers\etc\hosts` file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### 기타 (정리 또는 플레이크(flake)) -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: - - ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Add fine grained debugging to intra-pod conformance test to troubleshoot networking issues for potentially unhealthy nodes when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) -- Add the following metrics: - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Decrease warning message frequency on setting volume ownership for configmap/secret. ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Fix kubelet to properly log when a container is started. Previously, kubelet may log that container is dead and was restarted when it was actually started for the first time. This behavior only happened on pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Mask ceph RBD adminSecrets in logs when logLevel >= 4. ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) -- Remove offensive words from kubectl cluster-info command. ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) -- Remove support for "ci/k8s-master" version label in kubeadm, use "ci/latest" instead. See [kubernetes/test-infra#18517](https://github.com/kubernetes/test-infra/pull/18517). ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- APIs for kubelet annotations and labels from `k8s.io/kubernetes/pkg/kubelet/apis` are now moved under `k8s.io/kubelet/pkg/apis/` ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Clients required to use go1.15.8+ or go1.16+ if kube-apiserver has the goaway feature enabled to avoid unexpected data race condition. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy graduates to GA and unconditionally enabled. ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) +- Increase timeout for pod lifecycle test to reach pod status=ready ([#96691](https://github.com/kubernetes/kubernetes/pull/96691), [@hh](https://github.com/hh)) +- Increased `CSINodeIDMaxLength` from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) +- Kube-apiserver: The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Kubelet command line flags related to dockershim are now showing deprecation message as they will be removed along with dockershim in future release. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The `AttachVolumeLimit` feature gate (GA since v1.17) has been removed and now unconditionally enabled. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The `apiserver_request_total` metric is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts`) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) + +### 분류되지 않음 + +- GCE L4 Loadbalancers now handle > 5 ports in service spec correctly. ([#99595](https://github.com/kubernetes/kubernetes/pull/99595), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## 의존성 ### 추가 -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### 변경 -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/creack/pty: [v1.1.7 → v1.1.11](https://github.com/creack/pty/compare/v1.1.7...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.5...v0.39.0) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: c1934b7 → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/gengo: 83324d8 → b6c5ce2 +- k8s.io/klog/v2: v2.4.0 → v2.8.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.2.0 → v1.4.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.1.0 ### 제거 -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 +- sigs.k8s.io/kustomize: v2.0.3+incompatible ## 의존성 ### 추가 -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### 변경 -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/creack/pty: [v1.1.7 → v1.1.11](https://github.com/creack/pty/compare/v1.1.7...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.5...v0.39.0) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: c1934b7 → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/gengo: 83324d8 → b6c5ce2 +- k8s.io/klog/v2: v2.4.0 → v2.8.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.2.0 → v1.4.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.1.0 ### 제거 -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-rc.0 +# v1.21.0-rc.0 -## Downloads for v1.20.0-rc.0 +## Downloads for v1.21.0-rc.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes.tar.gz) | acfee8658831f9503fccda0904798405434f17be7064a361a9f34c6ed04f1c0f685e79ca40cef5fcf34e3193bacbf467665e8dc277e0562ebdc929170034b5ae -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-src.tar.gz) | 9d962f8845e1fa221649cf0c0e178f0f03808486c49ea15ab5ec67861ec5aa948cf18bc0ee9b2067643c8332227973dd592e6a4457456a9d9d80e8ef28d5f7c3 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes.tar.gz) | ef53a41955d6f8a8d2a94636af98b55d633fb8a5081517559039e019b3dd65c9d10d4e7fa297ab88a7865d772f3eecf72e7b0eeba5e87accb4000c91da33e148 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-src.tar.gz) | 9335a01b50d351776d3b8d00c07a5233844c51d307e361fa7e55a0620c1cb8b699e43eacf45ae9cafd8cbc44752e6987450c528a5bede8204706b7673000b5fc ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 062b57f1a450fe01d6184f104d81d376bdf5720010412821e315fd9b1b622a400ac91f996540daa66cee172006f3efade4eccc19265494f1a1d7cc9450f0b50a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-386.tar.gz) | 86e96d2c2046c5e62e02bef30a6643f25e01f1b3eba256cab7dd61252908540c26cb058490e9cecc5a9bad97d2b577f5968884e9f1a90237e302419f39e068bc -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 619d3afb9ce902368390e71633396010e88e87c5fd848e3adc71571d1d4a25be002588415e5f83afee82460f8a7c9e0bd968335277cb8f8cb51e58d4bb43e64e -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 60965150a60ab3d05a248339786e0c7da4b89a04539c3719737b13d71302bac1dd9bcaa427d8a1f84a7b42d0c67801dce2de0005e9e47d21122868b32ac3d40f -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | 688e064f4ef6a17189dbb5af468c279b9de35e215c40500fb97b1d46692d222747023f9e07a7f7ba006400f9532a8912e69d7c5143f956b1dadca144c67ee711 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | 47b8abc02b42b3b1de67da184921b5801d7e3cb09befac840c85913193fc5ac4e5e3ecfcb57da6b686ff21af9a3bd42ae6949d4744dbe6ad976794340e328b83 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 971b41d3169f30e6c412e0254c180636abb7ccc8dcee6641b0e9877b69752fc61aa30b76c19c108969df654fe385da3cb3a44dd59d3c28dc45561392d7e08874 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-386.tar.gz) | 2d34e8387e31531d9aca5655f2f0d18e75b01825dc1c39b7beb73a7b7b610e2ba429e5ca97d5c41a71b67e75e7096c86ab63fda9baab4c0878c1ccb3a1aefac8 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | f909640f4140693bb871936f10a40e79b43502105d0adb318b35bb7a64a770ad9d05a3a732368ccd3d15d496d75454789165bd1f5c2571da9a00569b3e6c007c +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 964135e43234cee275c452f5f06fb6d2bcd3cff3211a0d50fa35fff1cc4446bc5a0ac5125405dadcfb6596cb152afe29fabf7aad5b35b100e1288db890b70f8e +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-arm64.tar.gz) | 50d782abaa4ded5e706b3192d87effa953ceabbd7d91e3d48b0c1fa2206a1963a909c14b923560f5d09cac2c7392edc5f38a13fbf1e9a40bc94e3afe8de10622 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-386.tar.gz) | 72af5562f24184a2d7c27f95fa260470da979fbdcacce39a372f8f3add2991d7af8bc78f4e1dbe7a0f97e3f559b149b72a51491d3b13008da81872ee50f02f37 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 1eddb8f6b51e005bc6f7b519d036cbe3d2f6d97dbf7d212dd933fb56354c29f222d050519115a9bcf94555aef095db7cf763469e47bb4ae3c6c07f97edf437cb +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 670f8ca60ea3cf0bb3262a772715e0ea735fccda6a92f3186299361dc455b304ae177d4017e0b67bbfa4a95e36f4cc3f7eb335e2a5130c93ac3fba2aff4519bf +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | a69a47907cff138ba393d8c87044fd95d97f3ca8f35d301b50742e2801ad7c229d99d6667971091f65825eb51854d585be0dd7421670110b1aa567e67e7ab4b3 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | b929feade94b71c81908abdcd4343b1e1e20098fd65e10d4d02585ad649d292d06f52c7ddc349efa188ce5b093e703c7aa9582c6ae5a69699adb87bbf5350243 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 899d1470e412282cf289d8e24806d1a08c62ec0151f345ae3c9e497cc7bc0feab76498de4dd897d6adcdfa0c422e6b1a37e25d928669030f53457fd69d6e7df7 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-386.tar.gz) | 9f0bc90a269eabd06fe4f637b5172a3a6a7d3de26de0d66504c2e1f2093083c584ea39031db6075a7da7a86b98c48bed25aa88d4ac09060b38692c6a5b637078 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | 05c8cc10188a1294b0d51d052942742a9b26411a08ec73494bf0e728a8a167e0a7863bdfc8864e76a371b584380098381805341e18b4b283b5d0cf298d5f7c7c ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 0ea4458ae34108c633b4d48f1f128c6274dbc82b613492e78b3e0a2f656ac0df0bb9a75124e15d67c8e81850adcecf19f4ab0234c17247ee7ddf84f2df3e5eaa -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm.tar.gz) | aef6a4d457faa29936603370f29a8523bb274211c3cb5101bd31aaf469c91ba6bd149ea99a4ccdd83352cf37e4d6508c5ee475ec10292bccd2f77ceea31e1c28 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | 4829f473e9d60f9929ad17c70fdc2b6b6509ed75418be0b23a75b28580949736cb5b0bd6382070f93aa0a2a8863f0b1596daf965186ca749996c29d03ef7d8b8 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 9ab0790d382a3e28df1c013762c09da0085449cfd09d176d80be932806c24a715ea829be0075c3e221a2ad9cf06e726b4c39ab41987c1fb0fee2563e48206763 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 98670b587e299856dd9821b7517a35f9a65835b915b153de08b66c54d82160438b66f774bf5306c07bc956d70ff709860bc23162225da5e89f995d3fdc1f0122 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 355f278728ef7ac7eb2f5568c99c1429543c6302bbd0ed3bd0378c08116075e56ae850a49241313f078e2392702672ec6c9b70c8d97b4f2f5f4bee36828a63ba +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm.tar.gz) | 9ac02c2825e2fd4e92f0c0f67180c67c24e32841ccbabc82284bf6293727ffecfae65e8a42b527c2a7ca482752384928eb65c2a1706144ae7819a6b3a1ab291c +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | eb412453da03c82a9248412c8ccf4d4baa1fbfa81edd8d4f81d28969b40a3727e18934accc68f643d253446c58ffd2623292402495480b3d4b2a837b5318b957 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 07da2812c35bbc427ee5b4a0b601c3ae271e0d50ab0dd4c5c25399f43506fa2a187642eb9d4d2085df7b90264d48ea2f31088af87d9efa7eb2e87f91e1fdbde4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 3b79442a3d6e389c4ff105922a8e49994c0b6c088d2c501bd8c78d9f9e814902f5bb72c8f9c89380b750fda9b3a336759b9b68f11d70bef4f0e984564a95c29e ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | 699e9c8d1837198312eade8eb6fec390f6a2fea9e08207d2f58e8bb6e3e799028aca69e4670aac0a4ba7cf0af683aee2c158bf78cc520c80edc876c8d94d521a -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm.tar.gz) | f3b5eab0669490e3cd7e802693daf3555d08323dfff6e73a881fce00fed4690e8bdaf1610278d9de74036ca37631016075e5695a02158b7d3e7582b20ef7fa35 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | e5012f77363561a609aaf791baaa17d09009819c4085a57132e5feb5366275a54640094e6ed1cba527f42b586c6d62999c2a5435edf5665ff0e114db4423c2ae -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 2a6d6501620b1a9838dff05c66a40260cc22154a28027813346eb16e18c386bc3865298a46a0f08da71cd55149c5e7d07c4c4c431b4fd231486dd9d716548adb -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 5eca02777519e31428a1e5842fe540b813fb8c929c341bbc71dcfd60d98deb89060f8f37352e8977020e21e053379eead6478eb2d54ced66fb9d38d5f3142bf0 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | 8ace02e7623dff894e863a2e0fa7dfb916368431d1723170713fe82e334c0ae0481b370855b71e2561de0fb64fed124281be604761ec08607230b66fb9ed1c03 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | f12edf1faf5f07de1ebc5a8626601c12927902e10aca3f11e398637382fdf55365dbd9a0ef38858553fb7569495ae2cf68f155dd2e49b85b27d76fb599bb92e4 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm.tar.gz) | 4fba8fc4e2102f07fb778aab597ec7231ea65c35e1aa618fe98b707b64a931237bd842c173e9120326e4d9deb983bb3917176762bba2212612bbc09d6e2105c4 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | a2e1be5459a8346839970faf4e7ebdb8ab9f3273e02babf1f3199b06bdb67434a2d18fcd1628cf1b989756e99d8dad6624a455b9db11d50f51f509f4df5c27da +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 16d2c1cc295474fc49fe9a827ddd73e81bdd6b76af7074987b90250023f99b6d70bf474e204c7d556802111984fcb3a330740b150bdc7970d0e3634eb94a1665 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 9dc6faa6cd007b13dfce703f3e271f80adcc4e029c90a4a9b4f2f143b9756f2893f8af3d7c2cf813f2bd6731cffd87d15d4229456c1685939f65bf467820ec6e +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | f8bac2974c9142bfb80cd5eadeda79f79f27b78899a4e6e71809b795c708824ba442be83fdbadb98e01c3823dd8350776358258a205e851ed045572923cacba7 -## Changelog since v1.20.0-beta.2 +## Changelog since v1.21.0-beta.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + + - Migrated pkg/kubelet/cm/cpuset/cpuset.go to structured logging. Exit code changed from 255 to 1. ([#100007](https://github.com/kubernetes/kubernetes/pull/100007), [@utsavoza](https://github.com/utsavoza)) [SIG Instrumentation and Node] + ## Changes by Kind -### Feature +### API Change -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- CSIServiceAccountToken is Beta now ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) [SIG Auth, Storage and Testing] +- Discovery.k8s.io/v1beta1 EndpointSlices are deprecated in favor of discovery.k8s.io/v1, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) [SIG Network] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] -### Failing Test +### Feature -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Add e2e test to validate performance metrics of volume lifecycle operations ([#94334](https://github.com/kubernetes/kubernetes/pull/94334), [@RaunakShah](https://github.com/RaunakShah)) [SIG Storage and Testing] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- Introduce a churn operator to scheduler perf testing framework. ([#98900](https://github.com/kubernetes/kubernetes/pull/98900), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Migrated pkg/kubelet/cm/devicemanager to structured logging ([#99976](https://github.com/kubernetes/kubernetes/pull/99976), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/memorymanager to structured logging ([#99974](https://github.com/kubernetes/kubernetes/pull/99974), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/topologymanager to structure logging ([#99969](https://github.com/kubernetes/kubernetes/pull/99969), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] ### Bug or Regression -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] +- Add ability to skip OpenAPI handler installation to the GenericAPIServer ([#100341](https://github.com/kubernetes/kubernetes/pull/100341), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery] +- Count pod overhead against an entity's ResourceQuota ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) [SIG API Machinery and Node] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed authentication_duration_seconds metric. Previously it included whole apiserver request duration. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) [SIG API Machinery, Instrumentation and Scalability] +- Fixes issue where inline AzueFile secrets could not be accessed from the pod's namespace. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) [SIG Storage] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubectl: fix case of age column in describe node (#96963, @bl-ue) ([#96963](https://github.com/kubernetes/kubernetes/pull/96963), [@bl-ue](https://github.com/bl-ue)) [SIG CLI] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Pass `KUBE_BUILD_CONFORMANCE=y` to the package-tarballs to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) [SIG Release] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] + +### Other (Cleanup or Flake) + +- A new storage E2E testsuite covers CSIStorageCapacity publishing if a driver opts into the test. ([#100537](https://github.com/kubernetes/kubernetes/pull/100537), [@pohly](https://github.com/pohly)) [SIG Storage and Testing] +- Convert cmd/kubelet/app/server.go to structured logging ([#98334](https://github.com/kubernetes/kubernetes/pull/98334), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- If kube-apiserver enabled goaway feature, clients required golang 1.15.8 or 1.16+ version to avoid un-expected data race issue. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] +- Increased CSINodeIDMaxLength from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps and Storage] +- Migrate `pkg/kubelet/pluginmanager` to structured logging ([#99885](https://github.com/kubernetes/kubernetes/pull/99885), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/preemption/preemption.go` and `pkg/kubelet/logs/container_log_manager.go` to structured logging ([#99848](https://github.com/kubernetes/kubernetes/pull/99848), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/(cri)` to structured logging ([#99006](https://github.com/kubernetes/kubernetes/pull/99006), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(node, pod)` to structured logging ([#98847](https://github.com/kubernetes/kubernetes/pull/98847), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(volume,container)` to structured logging ([#98850](https://github.com/kubernetes/kubernetes/pull/98850), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/kubelet_node_status.go` to structured logging ([#98154](https://github.com/kubernetes/kubernetes/pull/98154), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node and Release] +- Migrate `pkg/kubelet/lifecycle,oom` to structured logging ([#99479](https://github.com/kubernetes/kubernetes/pull/99479), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG Instrumentation and Node] +- Migrate cmd/kubelet/+ pkg/kubelet/cadvisor/cadvisor_linux.go + pkg/kubelet/cri/remote/util/util_unix.go + pkg/kubelet/images/image_manager.go to structured logging ([#99994](https://github.com/kubernetes/kubernetes/pull/99994), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/container_manager_linux.go and pkg/kubelet/cm/container_manager_stub.go to structured logging ([#100001](https://github.com/kubernetes/kubernetes/pull/100001), [@shiyajuan123](https://github.com/shiyajuan123)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanage/{topology/togit pology.go, policy_none.go, cpu_assignment.go} to structured logging ([#100163](https://github.com/kubernetes/kubernetes/pull/100163), [@lala123912](https://github.com/lala123912)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanager/state to structured logging ([#99563](https://github.com/kubernetes/kubernetes/pull/99563), [@jmguzik](https://github.com/jmguzik)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/config to structured logging ([#100002](https://github.com/kubernetes/kubernetes/pull/100002), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubelet.go to structured logging ([#99861](https://github.com/kubernetes/kubernetes/pull/99861), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubeletconfig to structured logging ([#100265](https://github.com/kubernetes/kubernetes/pull/100265), [@ehashman](https://github.com/ehashman)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime to structured logging ([#99970](https://github.com/kubernetes/kubernetes/pull/99970), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/prober to structured logging ([#99830](https://github.com/kubernetes/kubernetes/pull/99830), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/winstats to structured logging ([#99855](https://github.com/kubernetes/kubernetes/pull/99855), [@hexxdump](https://github.com/hexxdump)) [SIG Instrumentation and Node] +- Migrate probe log messages to structured logging ([#97093](https://github.com/kubernetes/kubernetes/pull/97093), [@aldudko](https://github.com/aldudko)) [SIG Instrumentation and Node] +- Migrate remaining kubelet files to structured logging ([#100196](https://github.com/kubernetes/kubernetes/pull/100196), [@ehashman](https://github.com/ehashman)) [SIG Instrumentation and Node] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] ## Dependencies @@ -967,411 +774,411 @@ filename | sha512 hash _Nothing has changed._ ### Changed -- github.com/google/cadvisor: [v0.38.4 → v0.38.5](https://github.com/google/cadvisor/compare/v0.38.4...v0.38.5) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/creack/pty: [v1.1.9 → v1.1.11](https://github.com/creack/pty/compare/v1.1.9...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/google/cadvisor: [v0.38.8 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.8...v0.39.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/klog/v2: v2.5.0 → v2.8.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.3 → v4.1.0 ### Removed _Nothing has changed._ -# v1.20.0-beta.2 +# v1.21.0-beta.1 -## Downloads for v1.20.0-beta.2 +## Downloads for v1.21.0-beta.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes.tar.gz) | fe769280aa623802a949b6a35fbddadbba1d6f9933a54132a35625683719595ecf58096a9aa0f7456f8d4931774df21bfa98e148bc3d85913f1da915134f77bd -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-src.tar.gz) | ce1c8d97c52e5189af335d673bd7e99c564816f6adebf249838f7e3f0e920f323b4e398a5d163ea767091497012ec38843c59ff14e6fdd07683b682135eed645 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes.tar.gz) | c9f4f25242e319e5d90f49d26f239a930aad69677c0f3c2387c56bb13482648a26ed234be2bfe2352508f35010e3eb6d3b127c31a9f24fa1e53ac99c38520fe4 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-src.tar.gz) | 255357db8fa160cab2187658906b674a8b0d9b9a5b5f688cc7b69dc124f5da00362c6cc18ae9b80f7ddb3da6f64c2ab2f12fb9b63a4e063c7366a5375b175cda ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | d6c14bd0f6702f4bbdf14a6abdfa4e5936de5b4efee38aa86c2bd7272967ec6d7868b88fc00ad4a7c3a20717a35e6be2b84e56dec04154fd702315f641409f7c -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-386.tar.gz) | b923c44cb0acb91a8f6fd442c2168aa6166c848f5d037ce50a7cb11502be3698db65836b373c916f75b648d6ac8d9158807a050eecc4e1c77cffa25b386c8cdb -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | 8cae14146a9034dcd4e9d69d5d700f195a77aac35f629a148960ae028ed8b4fe12213993fe3e6e464b4b3e111adebe6f3dd7ca0accc70c738ed5cfd8993edd7c -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm.tar.gz) | 1f54e5262a0432945ead57fcb924e6bfedd9ea76db1dd9ebd946787a2923c247cf16e10505307b47e365905a1b398678dac5af0f433c439c158a33e08362d97b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | 31cf79c01e4878a231b4881fe3ed5ef790bd5fb5419388438d3f8c6a2129e655aba9e00b8e1d77e0bc5d05ecc75cf4ae02cf8266788822d0306c49c85ee584ed -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | 2527948c40be2e16724d939316ad5363f15aa22ebf42d59359d8b6f757d30cfef6447434cc93bc5caa5a23a6a00a2da8d8191b6441e06bba469d9d4375989a97 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | b777ad764b3a46651ecb0846e5b7f860bb2c1c4bd4d0fcc468c6ccffb7d3b8dcb6dcdd73b13c16ded7219f91bba9f1e92f9258527fd3bb162b54d7901ac303ff -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-386.tar.gz) | 8a2f58aaab01be9fe298e4d01456536047cbdd39a37d3e325c1f69ceab3a0504998be41a9f41a894735dfc4ed22bed02591eea5f3c75ce12d9e95ba134e72ec5 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | 2f69cda177a178df149f5de66b7dba7f5ce14c1ffeb7c8d7dc4130c701b47d89bb2fbe74e7a262f573e4d21dee2c92414d050d7829e7c6fc3637a9d6b0b9c5c1 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | 02efd389c8126456416fd2c7ea25c3cc30f612649ad91f631f068d6c0e5e539484d3763cb9a8645ad6b8077e4fcd1552a659d7516ebc4ce6828cf823b65c3016 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-arm64.tar.gz) | ac90dcd1699d1d7ff9c8342d481f6d0d97ccdc3ec501a56dc7c9e1898a8f77f712bf66942d304bfe581b5494f13e3efa211865de88f89749780e9e26e673dbdb +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-386.tar.gz) | cce5fb84cc7a1ee664f89d8ad3064307c51c044e9ddd2ae5a004939b69d3b3ef6f29acc5782e27d0c8f0d6d3d9c96e922f5d1b99d210ca3e754666d775df9f0c +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 2e93bbd2e60ad7cd8fe495115e96c55b1dc8facd100a827ef9c197a732679b60cceb9ea7bf92a1f5e328c3b8adfa8d3922cbc5d8370e374f3381b83f5b877b4f +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm.tar.gz) | 23f03b6a8fa9decce9b89a2c1bd3dae6d0b2f9e533e35a79e2c5a29326a165259677594ae83c877219a21bdb95557a284e55f4eec12954742794579c89a7d7e5 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3acf3101b46568b0ded6b90f13df0e918870d6812dc1a584903ddb8ba146484a204b9e442f863df47c7d4dab043fd9f7294c5510d3eb09004993d6d3b1e9e13c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | f749198df69577f62872d3096138a1b8969ec6b1636eb68eb56640bf33cf5f97a11df4363462749a1c0dc3ccbb8ae76c5d66864bf1c5cf7e52599caaf498e504 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | 3f6c0189d59fca22cdded3a02c672ef703d17e6ab0831e173a870e14ccec436c142600e9fc35b403571b6906f2be8d18d38d33330f7caada971bbe1187b388f6 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-386.tar.gz) | 03d92371c425cf331c80807c0ac56f953be304fc6719057258a363d527d186d610e1d4b4d401b34128062983265c2e21f2d2389231aa66a6f5787eee78142cf6 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | 489ece0c886a025ca3a25d28518637a5a824ea6544e7ef8778321036f13c8909a978ad4ceca966cec1e1cda99f25ca78bfd37460d1231c77436d216d43c872ad ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | 3ecaac0213d369eab691ac55376821a80df5013cb12e1263f18d1c236a9e49d42b3cea422175556d8f929cdf3109b22c0b6212ac0f2e80cc7a5f4afa3aba5f24 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm.tar.gz) | 580030b57ff207e177208fec0801a43389cae10cc2c9306327d354e7be6a055390184531d54b6742e0983550b7a76693cc4a705c2d2f4ac30495cf63cef26b9b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | 3e3286bd54671549fbef0dfdaaf1da99bc5c3efb32cc8d1e1985d9926520cea0c43bcf7cbcbbc8b1c1a95eab961255693008af3bb1ba743362998b5f0017d6d7 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | 9fa051e7e97648e97e26b09ab6d26be247b41b1a5938d2189204c9e6688e455afe76612bbcdd994ed5692935d0d960bd96dc222bce4b83f61d62557752b9d75b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | fa85d432eff586f30975c95664ac130b9f5ae02dc52b97613ed7a41324496631ea11d1a267daba564cf2485a9e49707814d86bbd3175486c7efc8b58a9314af5 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 2e95cb31d5afcb6842c41d25b7d0c18dd7e65693b2d93c8aa44e5275f9c6201e1a67685c7a8ddefa334babb04cb559d26e39b6a18497695a07dc270568cae108 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 2927e82b98404c077196ce3968f3afd51a7576aa56d516019bd3976771c0213ba01e78da5b77478528e770da0d334e9457995fafb98820ed68b2ee34beb68856 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | e0f7aea3ea598214a9817bc04949389cb7e4e7b9503141a590ef48c0b681fe44a4243ebc6280752fa41aa1093149b3ee1bcef7664edb746097a342281825430b +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | c011f7eb01294e9ba5d5ced719068466f88ed595dcb8d554a36a4dd5118fb6b3d6bafe8bf89aa2d42988e69793ed777ba77b8876c6ec74f898a43cfce1f61bf4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 15f6683e7f16caab7eebead2b7c15799460abbf035a43de0b75f96b0be19908f58add98a777a0cca916230d60cf6bfe3fee92b9dcff50274b1e37c243c157969 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | 86e631f95fe670b467ead2b88d34e0364eaa275935af433d27cc378d82dcaa22041ccce40f5fa9561b9656dadaa578dc018ad458a59b1690d35f86dca4776b5c -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm.tar.gz) | a8754ff58a0e902397056b8615ab49af07aca347ba7cc4a812c238e3812234862270f25106b6a94753b157bb153b8eae8b39a01ed67384774d798598c243583b -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | 28d727d7d08e2c856c9b4a574ef2dbf9e37236a0555f7ec5258b4284fa0582fb94b06783aaf50bf661f7503d101fbd70808aba6de02a2f0af94db7d065d25947 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | a1283449f1a0b155c11449275e9371add544d0bdd4609d6dc737ed5f7dd228e84e24ff249613a2a153691627368dd894ad64f4e6c0010eecc6efd2c13d4fb133 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | 5806028ba15a6a9c54a34f90117bc3181428dbb0e7ced30874c9f4a953ea5a0e9b2c73e6b1e2545e1b4e5253e9c7691588538b44cdfa666ce6865964b92d2fa8 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | d5327e3b7916c78777b9b69ba0f3758c3a8645c67af80114a0ae52babd7af27bb504febbaf51b1bfe5bd2d74c8c5c573471e1cb449f2429453f4b1be9d5e682a +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | ed58679561197110f366b9109f7afd62c227bfc271918ccf3eea203bb2ab6428eb5db4dd6c965f202a8a636f66da199470269b863815809b99d53d2fa47af2ea +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 7e6c7f1957fcdecec8fef689c5019edbc0d0c11d22dafbfef0a07121d10d8f6273644f73511bd06a9a88b04d81a940bd6645ffb5711422af64af547a45c76273 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | a3618f29967e7a1574917a67f0296e65780321eda484b99aa32bfd4dc9b35acdefce33da952ac52dfb509fbac5bf700cf177431fad2ab4adcab0544538939faa +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 326d3eb521b41bdf489912177f70b8cdd7cd828bb9b3d847ed3694eb27e457f24e0a88b8e51b726eee39800a3c5a40c1b30e3a8ec4a34d8041b3d8ef05d1b749 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | 022d05ebaa66a0332c4fe18cdaf23d14c2c7e4d1f2af7f27baaf1eb042e6890dc3434b4ac8ba58c35d590717956f8c3458112685aff4938b94b18e263c3f4256 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | fa691ed93f07af6bc1cf57e20a30580d6c528f88e5fea3c14f39c1820969dc5a0eb476c5b87b288593d0c086c4dd93aff6165082393283c3f46c210f9bb66d61 -## Changelog since v1.20.0-beta.1 +## Changelog since v1.21.0-beta.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - A bug was fixed in kubelet where exec probe timeouts were not respected. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the ExecProbeTimeout feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] - - For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] - - Kubeadm: - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - - For more details see: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Migrate `pkg/kubelet/(dockershim, network)` to structured logging + Exit code changed from 255 to 1 ([#98939](https://github.com/kubernetes/kubernetes/pull/98939), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Network and Node] + - Migrate `pkg/kubelet/certificate` to structured logging + Exit code changed from 255 to 1 ([#98993](https://github.com/kubernetes/kubernetes/pull/98993), [@SataQiu](https://github.com/SataQiu)) [SIG Auth and Node] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Package pkg/kubelet/server migrated to structured logging + Exit code changed from 255 to 1 ([#99838](https://github.com/kubernetes/kubernetes/pull/99838), [@adisky](https://github.com/adisky)) [SIG Node] + - Pkg/kubelet/kuberuntime/kuberuntime_manager.go migrated to structured logging + Exit code changed from 255 to 1 ([#99841](https://github.com/kubernetes/kubernetes/pull/99841), [@adisky](https://github.com/adisky)) [SIG Instrumentation and Node] + ## Changes by Kind ### Deprecation -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Remove deprecated --generator --replicas --service-generator --service-overrides --schedule from kubectl run + Deprecate --serviceaccount --hostport --requests --limits in kubectl run ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `diskformat` stroage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] ### API Change -- API priority and fairness graduated to beta - 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- Add LoadBalancerIPMode feature gate ([#92312](https://github.com/kubernetes/kubernetes/pull/92312), [@Sh4d1](https://github.com/Sh4d1)) [SIG Apps, CLI, Cloud Provider and Network] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a 'serving' and `terminating` condition to the EndpointSlice API. - - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- Fewer candidates are enumerated for preemption to improve performance in large clusters ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) [SIG Scheduling] -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBSVC is enabled. - "action required" - The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add a default metadata name labels for selecting any namespace by its name. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed` ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Clarified NetworkPolicy policyTypes documentation ([#97216](https://github.com/kubernetes/kubernetes/pull/97216), [@joejulian](https://github.com/joejulian)) [SIG Network] +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- EndpointSlice API is now GA. The EndpointSlice topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The discovery.k8s.io/v1alpha1 API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network and Testing] +- EndpointSlice Controllers are now GA. The EndpointSlice Controller will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, Apps, Auth, Network and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration(previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) [SIG API Machinery, CLI and Network] +- Introduce conditions for PodDisruptionBudget ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- Jobs API has a new .spec.suspend field that can be used to suspend and resume Jobs ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) [SIG API Machinery, Apps, Node, Scheduling and Testing] +- Kubelet Graceful Node Shutdown feature is now beta. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) [SIG Node] +- Limit the quest value of hugepage to integer multiple of page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Setting loadBalancerClass in load balancer type of service is available with this PR. + Users who want to use a custom load balancer can specify loadBalancerClass to achieve it. ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) [SIG API Machinery, Apps, Cloud Provider and Network] +- Storage capacity tracking (= the CSIStorageCapacity feature) is beta, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, Scheduling, Storage and Testing] +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The apiserver now resets managedFields that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) [SIG API Machinery and Testing] +- `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a pod compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) [SIG Apps] ### Feature -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Cluster Lifecycle] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric got additional buckets of 60, 300, 600, 900 and 1200 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- Add --use-protocol-buffers flag to kubectl top pods and nodes ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) [SIG CLI] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, and a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps and Instrumentation] +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. + - To enable: export KUBECTL_COMMAND_HEADERS=1; kubectl ... ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) [SIG API Machinery and CLI] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- Disruption controller only sends one event per PodDisruptionBudget if scale can't be computed ([#98128](https://github.com/kubernetes/kubernetes/pull/98128), [@mortent](https://github.com/mortent)) [SIG Apps] +- EndpointSliceNodeName will always be enabled, so NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] +- Graduate CRIContainerLogRotation feature gate to GA. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) [SIG Node and Testing] +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: promote IPv6DualStack feature gate to Beta ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Remove cAdvisor json metrics api collected by Kubelet ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Sysctls is now GA and locked to default ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) [SIG Node] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The endpointslice mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation endpoints.kubernetes.io/last-change-trigger-time is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) [SIG Apps, Network and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] ### Bug or Regression -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Fix --ignore-errors does not take effect if multiple logs are printed and unfollowed ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) [SIG CLI] +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix smb mount PermissionDenied issue on Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider, Storage and Windows] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixes add-on manager leader election ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider] +- Improved update time of pod statuses following new probe results. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) [SIG Node and Testing] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using dockershim as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) [SIG Node] +- Stdin is now only passed to client-go exec credential plugins when it is detected to be an interactive terminal. Previously, it was passed to client-go exec plugins when **stdout*- was detected to be an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Updates the commands + - kubectl kustomize {arg} + - kubectl apply -k {arg} + to use same code as kustomize CLI v4.0.5 + - [v4.0.5]: https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5 ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) [SIG API Machinery, Architecture, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node and Storage] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When using Containerd on Windows, the "C:\Windows\System32\drivers\etc\hosts" file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node and Windows] +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### Other (Cleanup or Flake) -- Handle slow cronjob lister in cronjob controller v2 and improve memory footprint. ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- NONE ([#96179](https://github.com/kubernetes/kubernetes/pull/96179), [@bbyrne5](https://github.com/bbyrne5)) [SIG Network] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Apiserver_request_total is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy is promoted to GA ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] +- Migrate `pkg/kubelet/(eviction)` to structured logging ([#99032](https://github.com/kubernetes/kubernetes/pull/99032), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate deployment controller log messages to structured logging ([#97507](https://github.com/kubernetes/kubernetes/pull/97507), [@aldudko](https://github.com/aldudko)) [SIG Apps] +- Migrate pkg/kubelet/cloudresource to structured logging ([#98999](https://github.com/kubernetes/kubernetes/pull/98999), [@sladyn98](https://github.com/sladyn98)) [SIG Node] +- Migrate pkg/kubelet/cri/remote logs to structured logging ([#98589](https://github.com/kubernetes/kubernetes/pull/98589), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime/kuberuntime_container.go logs to structured logging ([#96973](https://github.com/kubernetes/kubernetes/pull/96973), [@chenyw1990](https://github.com/chenyw1990)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/status to structured logging ([#99836](https://github.com/kubernetes/kubernetes/pull/99836), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/token to structured logging ([#99264](https://github.com/kubernetes/kubernetes/pull/99264), [@palnabarun](https://github.com/palnabarun)) [SIG Auth, Instrumentation and Node] +- Migrate pkg/kubelet/util to structured logging ([#99823](https://github.com/kubernetes/kubernetes/pull/99823), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate proxy/userspace/proxier.go logs to structured logging ([#97837](https://github.com/kubernetes/kubernetes/pull/97837), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some kubelet/metrics log messages to structured logging ([#98627](https://github.com/kubernetes/kubernetes/pull/98627), [@jialaijun](https://github.com/jialaijun)) [SIG Instrumentation and Node] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] + +### Uncategorized + +- Migrate pkg/kubelet/stats to structured logging ([#99607](https://github.com/kubernetes/kubernetes/pull/99607), [@krzysiekg](https://github.com/krzysiekg)) [SIG Node] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## Dependencies ### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.35.5 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.35.5...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.4](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.4) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 8b50664 → d219536 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.12 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: b3cf1e8 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/creack/pty: [v1.1.7 → v1.1.9](https://github.com/creack/pty/compare/v1.1.7...v1.1.9) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/google/cadvisor: [v0.38.7 → v0.38.8](https://github.com/google/cadvisor/compare/v0.38.7...v0.38.8) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: 113979e → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.3.0 → v1.4.0 ### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-beta.1 +# v1.21.0-beta.0 -## Downloads for v1.20.0-beta.1 +## Downloads for v1.21.0-beta.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes.tar.gz) | 4eddf4850c2d57751696f352d0667309339090aeb30ff93e8db8a22c6cdebf74cb2d5dc78d4ae384c4e25491efc39413e2e420a804b76b421a9ad934e56b0667 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-src.tar.gz) | 59de5221162e9b6d88f5abbdb99765cb2b2e501498ea853fb65f2abe390211e28d9f21e0d87be3ade550a5ea6395d04552cf093d2ce2f99fd45ad46545dd13cb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes.tar.gz) | 69b73a03b70b0ed006e9fef3f5b9bc68f0eb8dc40db6cc04777c03a2cb83a008c783012ca186b1c48357fb192403dbcf6960f120924785e2076e215b9012d546 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-src.tar.gz) | 9620fb6d37634271bdd423c09f33f3bd29e74298aa82c47dffc8cb6bd2ff44fa8987a53c53bc529db4ca96ec41503aa81cc8d0c3ac106f3b06c4720de933a8e6 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | d69ffed19b034a4221fc084e43ac293cf392e98febf5bf580f8d92307a8421d8b3aab18f9ca70608937e836b42c7a34e829f88eba6e040218a4486986e2fca21 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-386.tar.gz) | 1b542e165860c4adcd4550adc19b86c3db8cd75d2a1b8db17becc752da78b730ee48f1b0aaf8068d7bfbb1d8e023741ec293543bc3dd0f4037172a6917db8169 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 90ad52785eecb43a6f9035b92b6ba39fc84e67f8bc91cf098e70f8cfdd405c4b9d5c02dccb21022f21bb5b6ce92fdef304def1da0a7255c308e2c5fb3a9cdaab -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm.tar.gz) | d0cb3322b056e1821679afa70728ffc0d3375e8f3326dabbe8185be2e60f665ab8985b13a1a432e10281b84a929e0f036960253ac0dd6e0b44677d539e98e61b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3aecc8197e0aa368408624add28a2dd5e73f0d8a48e5e33c19edf91d5323071d16a27353a6f3e22df4f66ed7bfbae8e56e0a9050f7bbdf927ce6aeb29bba6374 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | 6ff145058f62d478b98f1e418e272555bfb5c7861834fbbf10a8fb334cc7ff09b32f2666a54b230932ba71d2fc7d3b1c1f5e99e6fe6d6ec83926a9b931cd2474 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | ff7b8bb894076e05a3524f6327a4a6353b990466f3292e84c92826cb64b5c82b3855f48b8e297ccadc8bcc15552bc056419ff6ff8725fc4e640828af9cc1331b -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-386.tar.gz) | 6c6dcac9c725605763a130b5a975f2b560aa976a5c809d4e0887900701b707baccb9ca1aebc10a03cfa7338a6f42922bbf838ccf6800fc2a3e231686a72568b6 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | d12e3a29c960f0ddd1b9aabf5426ac1259863ac6c8f2be1736ebeb57ddca6b1c747ee2c363be19e059e38cf71488c5ea3509ad4d0e67fd5087282a5ad0ae9a48 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | 2a6f3fcd6b571f5ccde56b91e6e179a01899244be496dae16a2a16e0405c9437b75c6dc853b56f9a4876a7c0a60ec624ccd28400bf8fb960258263172f6860ba +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-386.tar.gz) | 78fe9ad9f9a9bc043293327223f0038a2c087ca65e87187a6dcae7a24aef9565fe498d295a4639b0b90524469a04930022fcecd815d0afc742eb87ddd8eb7ef5 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | c025f5e5bd132355e7dd1296cf2ec752264e7f754c4d95fc34b076bd75bef2f571d30872bcb3d138ce95c592111353d275a80eb31f82c07000874b4c56282dbd +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm.tar.gz) | 9975cd2f08fbc202575fb15ba6fc51dab23155ca4d294ebb48516a81efa51f58bab3a87d41c865103756189b554c020371d729ad42880ba788f25047ffc46910 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 56a6836e24471e42e9d9a8488453f2d55598d70c8aca0a307d5116139c930c25c469fd0d1ab5060fbe88dad75a9b5209a08dc11d644af5f3ebebfbcb6c16266c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | b6a6cc9baad0ad85ed079ee80e6d6acc905095cfb440998bbc0f553b94fa80077bd58b8692754de477517663d51161705e6e89a1b6d04aa74819800db3517722 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 7b743481b340f510bf9ae28ea8ea91150aa1e8c37fe104b66d7b3aff62f5e6db3c590d2c13d14dbb5c928de31c7613372def2496075853611d10d6b5fa5b60bd +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-386.tar.gz) | df06c7a524ce84c1f8d7836aa960c550c88dbca0ec4854df4dd0a85b3c84b8ecbc41b54e8c4669ce28ac670659ff0fad795deb1bc539f3c3b3aa885381265f5a +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 4568497b684564f2a94fbea6cbfd778b891231470d9a6956c3b7a3268643d13b855c0fc5ebea5f769300cc0c7719c2c331c387f468816f182f63e515adeaa7a0 ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 904e8c049179e071c6caa65f525f465260bb4d4318a6dd9cc05be2172f39f7cfc69d1672736e01d926045764fe8872e806444e3af77ffef823ede769537b7d20 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 5934959374868aed8d4294de84411972660bca7b2e952201a9403f37e40c60a5c53eaea8001344d0bf4a00c8cd27de6324d88161388de27f263a5761357cb82b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | 4c884585970f80dc5462d9a734d7d5be9558b36c6e326a8a3139423efbd7284fa9f53fb077983647e17e19f03f5cb9bf26201450c78daecf10afa5a1ab5f9efc -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | 235b78b08440350dcb9f13b63f7722bd090c672d8e724ca5d409256e5a5d4f46d431652a1aa908c3affc5b1e162318471de443d38b93286113e79e7f90501a9b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 220fc9351702b3ecdcf79089892ceb26753a8a1deaf46922ffb3d3b62b999c93fef89440e779ca6043372b963081891b3a966d1a5df0cf261bdd44395fd28dce +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 42883cca2d312153baf693fc6024a295359a421e74fd70eefc927413be4e0353debe634e7cca6b9a8f7d8a0cee3717e03ba5d29a306e93139b1c2f3027535a6d +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm.tar.gz) | e0042215e84c769ba4fc4d159ccf67b2c4a26206bfffb0ec5152723dc813ff9c1426aa0e9b963d7bfa2efb266ca43561b596b459152882ebb42102ccf60bd8eb +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | bfad29d43e14152cb9bc7c4df6aa77929c6eca64a294bb832215bdba9fa0ee2195a2b709c0267dc7426bb371b547ee80bb8461a8c678c9bffa0819aa7db96289 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | ca67674c01c6cebdc8160c85b449eab1a23bb0557418665246e0208543fa2eaaf97679685c7b49bee3a4300904c0399c3d762ae34dc3e279fd69ce792c4b07ff +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 285352b628ec754b01b8ad4ef1427223a142d58ebcb46f6861df14d68643133b32330460b213b1ba5bc5362ff2b6dacd8e0c2d20cce6e760fa1954af8a60df8b ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | fe59d3a1f21c47bab126f689687657f77fbcb46a2caeef48eecd073b2b22879f997a466911b5c5c829e9cf27e68a36ecdf18686d42714839d4b97d6c7281578d -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 93e545aa963cfd11e0b2c6d47669b5ef70c5a86ef80c3353c1a074396bff1e8e7371dda25c39d78c7a9e761f2607b8b5ab843fa0c10b8ff9663098fae8d25725 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | 5e0f177f9bec406a668d4b37e69b191208551fdf289c82b5ec898959da4f8a00a2b0695cbf1d2de5acb809321c6e5604f5483d33556543d92b96dcf80e814dd3 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 574412059e4d257eb904cd4892a075b6a2cde27adfa4976ee64c46d6768facece338475f1b652ad94c8df7cfcbb70ebdf0113be109c7099ab76ffdb6f023eefd -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | b1ffaa6d7f77d89885c642663cb14a86f3e2ec2afd223e3bb2000962758cf0f15320969ffc4be93b5826ff22d54fdbae0dbea09f9d8228eda6da50b6fdc88758 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | 388983765213cf3bdc1f8b27103ed79e39028767e5f1571e35ed1f91ed100e49f3027f7b7ff19b53fab7fbb6d723c0439f21fc6ed62be64532c25f5bfa7ee265 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | d92d9b30e7e44134a0cd9db4c01924d365991ea16b3131200b02a82cff89c8701f618cd90e7f1c65427bd4bb5f78b10d540b2262de2c143b401fa44e5b25627b +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 551092f23c27fdea4bb2d0547f6075892534892a96fc2be7786f82b58c93bffdb5e1c20f8f11beb8bed46c24f36d4c18ec5ac9755435489efa28e6ae775739bd +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 26ae7f4163e527349b8818ee38b9ee062314ab417f307afa49c146df8f5a2bd689509b128bd4a1efd3896fd89571149a9955ada91f8ca0c2f599cd863d613c86 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 821fa953f6cebc69d2d481e489f3e90899813d20e2eefbabbcadd019d004108e7540f741fabe60e8e7c6adbb1053ac97898bbdddec3ca19f34a71aa3312e0d4e +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | 22197d4f66205d5aa9de83dfddcc4f2bb3195fd7067cdb5c21e61dbeae217bc112fb7ecff8a539579b60ad92298c2b4c87b9b7c7e6ec1ee1ffa0c6e4bc4412c1 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | 7e22e0d9603562a04dee16a513579f06b1ff6354d97d669bd68f8777ec7f89f6ef027fb23ab0445d7bba0bb689352f0cc748ce90e3f597c6ebe495464a96b860 -## Changelog since v1.20.0-beta.0 +## Changelog since v1.21.0-alpha.3 + +## Urgent Upgrade Notes +### (No, really, you MUST read this before you upgrade) + + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the storage_operation_duration_seconds metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- ACTION REQUIRED: The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42)) [SIG API Machinery, Node and Testing] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] ### API Change -- + `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - + kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] +- Cluster admins can now turn off /debug/pprof and /debug/flags/v endpoint in kubelet by setting enableProfilingHandler and enableDebugFlagsHandler to false in their kubelet configuration file. enableProfilingHandler and enableDebugFlagsHandler can be set to true only when enableDebuggingHandlers is also set to true. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) [SIG Node] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] ### Feature -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] +- A new histogram metric to track the time it took to delete a job by the ttl-after-finished controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Instrumentation] +- AWS cloudprovider supports auto-discovering subnets without any kubernetes.io/cluster/ tags. It also supports additional service annotation service.beta.kubernetes.io/aws-load-balancer-subnets to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Add --permit-address-sharing flag to kube-apiserver to listen with SO_REUSEADDR. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoid waiting for the kernel to release socket in TIME_WAIT state, and hence, considably reducing kube-apiserver restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) [SIG CLI] +- Added support for installing arm64 node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) [SIG Cloud Provider] +- Feature gate RootCAConfigMap is graduated to GA in 1.21 and will be removed in 1.22. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] +- Kubeadm: during "init" and "join" perform preflight validation on the host / node name and throw warnings if a name is not compliant ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=bad_metric1,bad_metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- TTLAfterFinished is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Auth] +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- Turn CronJobControllerV2 on by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) [SIG Apps] +- UDP protocol support for Agnhost connect subcommand ([#98639](https://github.com/kubernetes/kubernetes/pull/98639), [@knabben](https://github.com/knabben)) [SIG Testing] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. Clusters new and existing will not be affected until user starting adding secondary pod and service cidrs cli flags as described here: https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, Cloud Provider, Network and Node] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] +- Fix ALPHA stability level reference link ([#98641](https://github.com/kubernetes/kubernetes/pull/98641), [@Jeffwan](https://github.com/Jeffwan)) [SIG Auth, Cloud Provider, Instrumentation and Storage] + +### Failing Test + +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP NodePort Services where stale conntrack entries may blackhole the traffic directed to the NodePort. ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) [SIG Network] ### Bug or Regression -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] +- Add missing --kube-api-content-type in kubemark hollow template ([#98911](https://github.com/kubernetes/kubernetes/pull/98911), [@Jeffwan](https://github.com/Jeffwan)) [SIG Scalability and Testing] +- Avoid duplicate error messages when runing kubectl edit quota ([#98201](https://github.com/kubernetes/kubernetes/pull/98201), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery and Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fixes spurious errors about IPv6 in kube-proxy logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- In the method that ensures that the docker and containerd are in the correct containers with the proper OOM score set up, fixed the bug of identifying containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] ### Other (Cleanup or Flake) -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] +- APIs for kubelet annotations and labels from k8s.io/kubernetes/pkg/kubelet/apis are now available under k8s.io/kubelet/pkg/apis/ ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) [SIG Apps, Auth and Node] +- Migrate `pkg/kubelet/(pod, pleg)` to structured logging ([#98990](https://github.com/kubernetes/kubernetes/pull/98990), [@gjkim42](https://github.com/gjkim42)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/nodestatus to structured logging ([#99001](https://github.com/kubernetes/kubernetes/pull/99001), [@QiWang19](https://github.com/QiWang19)) [SIG Node] +- Migrate pkg/kubelet/server logs to structured logging ([#98643](https://github.com/kubernetes/kubernetes/pull/98643), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate proxy/winkernel/proxier.go logs to structured logging ([#98001](https://github.com/kubernetes/kubernetes/pull/98001), [@JornShen](https://github.com/JornShen)) [SIG Network and Windows] +- Migrate scheduling_queue.go to structured logging ([#98358](https://github.com/kubernetes/kubernetes/pull/98358), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Several flags related to the deprecated dockershim which are present in the kubelet command line are now deprecated. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) [SIG Node] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] ## Dependencies @@ -1379,763 +1186,530 @@ filename | sha512 hash _Nothing has changed._ ### Changed -_Nothing has changed._ +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.0.3 ### Removed _Nothing has changed._ -# v1.20.0-beta.0 +# v1.21.0-alpha.3 -## Downloads for v1.20.0-beta.0 +## Downloads for v1.21.0-alpha.3 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes.tar.gz) | 385e49e32bbd6996f07bcadbf42285755b8a8ef9826ee1ba42bd82c65827cf13f63e5634b834451b263a93b708299cbb4b4b0b8ddbc688433deaf6bec240aa67 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-src.tar.gz) | 842e80f6dcad461426fb699de8a55fde8621d76a94e54288fe9939cc1a3bbd0f4799abadac2c59bcf3f91d743726dbd17e1755312ae7fec482ef560f336dbcbb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes.tar.gz) | 704ec916a1dbd134c54184d2652671f80ae09274f9d23dbbed312944ebeccbc173e2e6b6949b38bdbbfdaf8aa032844deead5efeda1b3150f9751386d9184bc8 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-src.tar.gz) | 57db9e7560cfc9c10e7059cb5faf9c4bd5eb8f9b7964f44f000a417021cf80873184b774e7c66c80d4aba84c14080c6bc335618db3d2e5f276436ae065e25408 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | bde5e7d9ee3e79d1e69465a3ddb4bb36819a4f281b5c01a7976816d7c784410812dde133cdf941c47e5434e9520701b9c5e8b94d61dca77c172f87488dfaeb26 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-386.tar.gz) | 721bb8444c9e0d7a9f8461e3f5428882d76fcb3def6eb11b8e8e08fae7f7383630699248660d69d4f6a774124d6437888666e1fa81298d5b5518bc4a6a6b2c92 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | 71e4edc41afbd65f813e7ecbc22b27c95f248446f005e288d758138dc4cc708735be7218af51bcf15e8b9893a3598c45d6a685f605b46f50af3762b02c32ed76 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm.tar.gz) | bbefc749156f63898973f2f7c7a6f1467481329fb430d641fe659b497e64d679886482d557ebdddb95932b93de8d1e3e365c91d4bf9f110b68bd94b0ba702ded -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 9803190685058b4b64d002c2fbfb313308bcea4734ed53a8c340cfdae4894d8cb13b3e819ae64051bafe0fbf8b6ecab53a6c1dcf661c57640c75b0eb60041113 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | bcdceea64cba1ae38ea2bab50d8fd77c53f6d673de12566050b0e3c204334610e6c19e4ace763e68b5e48ab9e811521208b852b1741627be30a2b17324fc1daf -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 41e36d00867e90012d5d5adfabfaae8d9f5a9fd32f290811e3c368e11822916b973afaaf43961081197f2cbab234090d97d89774e674aeadc1da61f7a64708a9 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-386.tar.gz) | c50fec5aec2d0e742f851f25c236cb73e76f8fc73b0908049a10ae736c0205b8fff83eb3d29b1748412edd942da00dd738195d9003f25b577d6af8359d84fb2f -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 0fd6777c349908b6d627e849ea2d34c048b8de41f7df8a19898623f597e6debd35b7bcbf8e1d43a1be3a9abb45e4810bc498a0963cf780b109e93211659e9c7e +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | e2706efda92d5cf4f8b69503bb2f7703a8754407eff7f199bb77847838070e720e5f572126c14daa4c0c03b59bb1a63c1dfdeb6e936a40eff1d5497e871e3409 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 007bb23c576356ed0890bdfd25a0f98d552599e0ffec19fb982591183c7c1f216d8a3ffa3abf15216be12ae5c4b91fdcd48a7306a2d26b007b86a6abd553fc61 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | 39504b0c610348beba60e8866fff265bad58034f74504951cd894c151a248db718d10f77ebc83f2c38b2d517f8513a46325b38889eefa261ca6dbffeceba50ff +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | 30bc2c40d0c759365422ad1651a6fb35909be771f463c5b971caf401f9209525d05256ab70c807e88628dd357c2896745eecf13eda0b748464da97d0a5ef2066 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 085cdf574dc8fd33ece667130b8c45830b522a07860e03a2384283b1adea73a9652ef3dfaa566e69ee00aea1a6461608814b3ce7a3f703e4a934304f7ae12f97 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b34b845037d83ea7b3e2d80a9ede4f889b71b17b93b1445f0d936a36e98c13ed6ada125630a68d9243a5fcd311ee37cdcc0c05da484da8488ea5060bc529dbfc +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | c4758adc7a404b776556efaa79655db2a70777c562145d6ea6887f3335988367a0c2fcd4383e469340f2a768b22e786951de212805ca1cb91104d41c21e0c9ce +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-386.tar.gz) | f51edc79702bbd1d9cb3a672852a405e11b20feeab64c5411a7e85c9af304960663eb6b23ef96e0f8c44a722fecf58cb6d700ea2c42c05b3269d8efd5ad803f2 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 6a3507ce4ac40a0dc7e4720538863fa15f8faf025085a032f34b8fa0f6fa4e8c26849baf649b5b32829b9182e04f82721b13950d31cf218c35be6bf1c05d6abf ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 30d982424ca64bf0923503ae8195b2e2a59497096b2d9e58dfd491cd6639633027acfa9750bc7bccf34e1dc116d29d2f87cbd7ae713db4210ce9ac16182f0576 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm.tar.gz) | f08b62be9bc6f0745f820b0083c7a31eedb2ce370a037c768459a59192107b944c8f4345d0bb88fc975f2e7a803ac692c9ac3e16d4a659249d4600e84ff75d9e -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | e3472b5b3dfae0a56e5363d52062b1e4a9fc227a05e0cf5ece38233b2c442f427970aab94a52377fb87e583663c120760d154bc1c4ac22dca1f4d0d1ebb96088 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | 06c254e0a62f755d31bc40093d86c44974f0a60308716cc3214a6b3c249a4d74534d909b82f8a3dd3a3c9720e61465b45d2bb3a327ef85d3caba865750020dfb -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 2edeb4411c26a0de057a66787091ab1044f71774a464aed898ffee26634a40127181c2edddb38e786b6757cca878fd0c3a885880eec6c3448b93c645770abb12 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 19181d162dfb0b30236e2bf1111000e037eece87c037ca2b24622ca94cb88db86aa4da4ca533522518b209bc9983bbfd6b880a7898e0da96b33f3f6c4690539b +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 42a02f9e08a78ad5da6e5fa1ab12bf1e3c967c472fdbdadbd8746586da74dc8093682ba8513ff2a5301393c47ee9021b860e88ada56b13da386ef485708e46ca +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 3c8ba8eb02f70061689bd7fab7813542005efe2edc6cfc6b7aecd03ffedf0b81819ad91d69fff588e83023d595eefbfe636aa55e1856add8733bf42fff3c748f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | cd9e6537450411c39a06fd0b5819db3d16b668d403fb3627ec32c0e32dd1c4860e942934578ca0e1d1b8e6f21f450ff81e37e0cd46ff5c5faf7847ab074aefc5 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | ada3f65e53bc0e0c0229694dd48c425388089d6d77111a62476d1b08f6ad1d8ab3d60b9ed7d95ac1b42c2c6be8dc0618f40679717160769743c43583d8452362 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | cc1d5b94b86070b5e7746d7aaeaeac3b3a5e5ebbff1ec33885f7eeab270a6177d593cb1975b2e56f4430b7859ad42da76f266629f9313e0f688571691ac448ed -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 75e82c7c9122add3b24695b94dcb0723c52420c3956abf47511e37785aa48a1fa8257db090c6601010c4475a325ccfff13eb3352b65e3aa1774f104b09b766b0 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 16ef27c40bf4d678a55fcd3d3f7d09f1597eec2cc58f9950946f0901e52b82287be397ad7f65e8d162d8a9cdb4a34a610b6db8b5d0462be8e27c4b6eb5d6e5e7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 939865f2c4cb6a8934f22a06223e416dec5f768ffc1010314586149470420a1d62aef97527c34d8a636621c9669d6489908ce1caf96f109e8d073cee1c030b50 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | bbfdd844075fb816079af7b73d99bc1a78f41717cdbadb043f6f5872b4dc47bc619f7f95e2680d4b516146db492c630c17424e36879edb45e40c91bc2ae4493c -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | a2b3ea40086fd71aed71a4858fd3fc79fd1907bc9ea8048ff3c82ec56477b0a791b724e5a52d79b3b36338c7fbd93dfd3d03b00ccea9042bda0d270fc891e4ec +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | ae0fec6aa59e49624b55d9a11c12fdf717ddfe04bdfd4f69965d03004a34e52ee4a3e83f7b61d0c6a86f43b72c99f3decb195b39ae529ef30526d18ec5f58f83 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | 9a48c140ab53b7ed8ecec6903988a1a474efc16d2538e5974bc9a12f0c9190be78c4f9e326bf4e982d0b7045a80b99dd0fda7e9b650663be5b89bfd991596746 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | 6912adbc9300344bea470d6435f7b387bfce59767078c11728ce59faf47cd3f72b41b9604fcc5cda45e9816fe939fbe2fb33e52a773e6ff2dfa9a615b4df6141 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | d66dccfe3e6ed6d81567c70703f15375a53992b3a5e2814b98c32e581b861ad95912e03ed2562415d087624c008038bb4a816611fa255442ae752968ea15856b +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | ad8c69a28f1fbafa3f1cb54909bfd3fc22b104bed63d7ca2b296208c9d43eb5f2943a0ff267da4c185186cdd9f7f77b315cd7f5f1bf9858c0bf42eceb9ac3c58 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 91d723aa848a9cb028f5bcb41090ca346fb973961521d025c4399164de2c8029b57ca2c4daca560d3c782c05265d2eb0edb0abcce6f23d3efbecf2316a54d650 -## Changelog since v1.20.0-alpha.3 +## Changelog since v1.21.0-alpha.2 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] - - Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] - + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - Remove alpha CSIMigrationXXComplete flag and add alpha InTreePluginXXUnregister flag. Deprecate CSIMigrationvSphereComplete flag and it will be removed in 1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Node and Storage] + ## Changes by Kind -### Deprecation - -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] - ### API Change -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] +- Adds support for portRange / EndPort in Network Policy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- Fixes using server-side apply with APIService resources ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Apps and Testing] +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] ### Feature -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] +- A lease can only attach up to 10k objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery] +- Add ignore-errors flag for drain, support none-break drain in group ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) [SIG CLI] +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- Export NewDebuggingRoundTripper function and DebugLevel options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) [SIG API Machinery] +- Kubectl wait ensures that observedGeneration >= generation if applicable ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) [SIG CLI] +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- New admission controller "denyserviceexternalips" is available. Clusters which do not *need- the Service "externalIPs" feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) [SIG API Machinery] +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Pause image upgraded to 3.4.1 in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Testing and Windows] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] +- Feat: azure file migration go beta in 1.21. Feature gates CSIMigration to Beta (on by default) and CSIMigrationAzureFile to Beta (off by default since it requires installation of the AzureFile CSI Driver) + The in-tree AzureFile plugin "kubernetes.io/azure-file" is now deprecated and will be removed in 1.23. Users should enable CSIMigration + CSIMigrationAzureFile features and install the AzureFile CSI Driver (https://github.com/kubernetes-sigs/azurefile-csi-driver) to avoid disruption to existing Pod and PVC objects at that time. + Users should start using the AzureFile CSI Driver directly for any new volumes. ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -### Bug or Regression - -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions. ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] - -### Other (Cleanup or Flake) - -- NO ([#95690](https://github.com/kubernetes/kubernetes/pull/95690), [@nikhita](https://github.com/nikhita)) [SIG Release] - -## Dependencies - -### Added -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) - -### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.9.0 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.9.0...v0.9.5) -- github.com/Azure/go-autorest/autorest/mocks: [v0.4.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.4.0...v0.4.1) -- golang.org/x/crypto: 75b2880 → 7f63de1 - -### Removed -_Nothing has changed._ - - - -# v1.20.0-alpha.3 - - -## Downloads for v1.20.0-alpha.3 - -### Source Code - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes.tar.gz) | 542cc9e0cd97732020491456402b6e2b4f54f2714007ee1374a7d363663a1b41e82b50886176a5313aaccfbfd4df2bc611d6b32d19961cdc98b5821b75d6b17c -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-src.tar.gz) | 5e5d725294e552fd1d14fd6716d013222827ac2d4e2d11a7a1fdefb77b3459bbeb69931f38e1597de205dd32a1c9763ab524c2af1551faef4f502ef0890f7fbf - -### Client binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | 60004939727c75d0f06adc4449e16b43303941937c0e9ea9aca7d947e93a5aed5d11e53d1fc94caeb988be66d39acab118d406dc2d6cead61181e1ced6d2be1a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 7edba9c4f1bf38fdf1fa5bff2856c05c0e127333ce19b17edf3119dc9b80462c027404a1f58a5eabf1de73a8f2f20aced043dda1fafd893619db1a188cda550c -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | db1818aa82d072cb3e32a2a988e66d76ecf7cebc6b8a29845fa2d6ec27f14a36e4b9839b1b7ed8c43d2da9cde00215eb672a7e8ee235d2e3107bc93c22e58d38 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | d2922e70d22364b1f5a1e94a0c115f849fe2575b231b1ba268f73a9d86fc0a9fbb78dc713446839a2593acf1341cb5a115992f350870f13c1a472bb107b75af7 -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 2e3ae20e554c7d4fc3a8afdfcafe6bbc81d4c5e9aea036357baac7a3fdc2e8098aa8a8c3dded3951667d57f667ce3fbf37ec5ae5ceb2009a569dc9002d3a92f9 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b54a34e572e6a86221577de376e6f7f9fcd82327f7fe94f2fc8d21f35d302db8a0f3d51e60dc89693999f5df37c96d0c3649a29f07f095efcdd59923ae285c95 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | 5be1b70dc437d3ba88cb0b89cd1bc555f79896c3f5b5f4fa0fb046a0d09d758b994d622ebe5cef8e65bba938c5ae945b81dc297f9dfa0d98f82ea75f344a3a0d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-386.tar.gz) | 88cf3f66168ef3bf9a5d3d2275b7f33799406e8205f2c202997ebec23d449aa4bb48b010356ab1cf52ff7b527b8df7c8b9947a43a82ebe060df83c3d21b7223a -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 87d2d4ea1829da8cfa1a705a03ea26c759a03bd1c4d8b96f2c93264c4d172bb63a91d9ddda65cdc5478b627c30ae8993db5baf8be262c157d83bffcebe85474e - -### Server binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 7af691fc0b13a937797912374e3b3eeb88d5262e4eb7d4ebe92a3b64b3c226cb049aedfd7e39f639f6990444f7bcf2fe58699cf0c29039daebe100d7eebf60de -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 557c47870ecf5c2090b2694c8f0c8e3b4ca23df5455a37945bd037bc6fb5b8f417bf737bb66e6336b285112cb52de0345240fdb2f3ce1c4fb335ca7ef1197f99 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 981de6cf7679d743cdeef1e894314357b68090133814801870504ef30564e32b5675e270db20961e9a731e35241ad9b037bdaf749da87b6c4ce8889eeb1c5855 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | 506578a21601ccff609ae757a55e68634c15cbfecbf13de972c96b32a155ded29bd71aee069c77f5f721416672c7a7ac0b8274de22bfd28e1ecae306313d96c5 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | af0cdcd4a77a7cc8060a076641615730a802f1f02dab084e41926023489efec6102d37681c70ab0dbe7440cd3e72ea0443719a365467985360152b9aae657375 - -### Node binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | 2d92c61596296279de1efae23b2b707415565d9d50cd61a7231b8d10325732b059bcb90f3afb36bef2575d203938c265572721e38df408e8792d3949523bd5d9 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | c298de9b5ac1b8778729a2d8e2793ff86743033254fbc27014333880b03c519de81691caf03aa418c729297ee8942ce9ec89d11b0e34a80576b9936015dc1519 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | daa3c65afda6d7aff206c1494390bbcc205c2c6f8db04c10ca967a690578a01c49d49c6902b85e7158f79fd4d2a87c5d397d56524a75991c9d7db85ac53059a7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | 05661908bb73bfcaf9c2eae96e9a6a793db5a7a100bce6df9e057985dd53a7a5248d72e81b6d13496bd38b9326c17cdb2edaf0e982b6437507245fb846e1efc6 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | 845e518e2c4ef0cef2c3b58f0b9ea5b5fe9b8a249717f789607752484c424c26ae854b263b7c0a004a8426feb9aa3683c177a9ed2567e6c3521f4835ea08c24a -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 530e536574ed2c3e5973d3c0f0fdd2b4d48ef681a7a7c02db13e605001669eeb4f4b8a856fc08fc21436658c27b377f5d04dbcb3aae438098abc953b6eaf5712 - -## Changelog since v1.20.0-alpha.2 - -## Changes by Kind - -### API Change - -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] - -### Feature +### Failing Test -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] +- Kubelet: the HostPort implementation in dockershim was not taking into consideration the HostIP field, causing that the same HostPort can not be used with different IP addresses. + This bug causes the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### Bug or Regression -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) [SIG API Machinery, Release and Testing] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a panic in the disruption budget controller for PDB objects with invalid selectors ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) [SIG Apps] +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. + - in cases where the client specifies a timeout in the request URL, the overall request deadline is shortened now since the deadline is setup as soon as the request is received by the apiserver. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: fix bad token placeholder text in "config print *-defaults --help" ([#98839](https://github.com/kubernetes/kubernetes/pull/98839), [@Mattias-](https://github.com/Mattias-)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Truncates a message if it hits the NoteLengthLimit when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) [SIG Scheduling] +- We will no longer automatically delete all data when a failure is detected during creation of the volume data file on a CSI volume. Now we will only remove the data file and volume path. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) [SIG Storage] ### Other (Cleanup or Flake) -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] +- Fix the description of command line flags that can override --config ([#98254](https://github.com/kubernetes/kubernetes/pull/98254), [@changshuchao](https://github.com/changshuchao)) [SIG Scheduling] +- Migrate scheduler/taint_manager.go structured logging ([#98259](https://github.com/kubernetes/kubernetes/pull/98259), [@tanjing2020](https://github.com/tanjing2020)) [SIG Apps] +- Migrate staging/src/k8s.io/apiserver/pkg/admission logs to structured logging ([#98138](https://github.com/kubernetes/kubernetes/pull/98138), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] ## Dependencies ### Added -_Nothing has changed._ +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) ### Changed -_Nothing has changed._ +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/tools: c1934b7 → 113979e +- k8s.io/klog/v2: v2.4.0 → v2.5.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 ### Removed -_Nothing has changed._ +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) -# v1.20.0-alpha.2 +# v1.21.0-alpha.2 -## Downloads for v1.20.0-alpha.2 +## Downloads for v1.21.0-alpha.2 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes.tar.gz) | 45089a4d26d56a5d613ecbea64e356869ac738eca3cc71d16b74ea8ae1b4527bcc32f1dc35ff7aa8927e138083c7936603faf063121d965a2f0f8ba28fa128d8 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-src.tar.gz) | 646edd890d6df5858b90aaf68cc6e1b4589b8db09396ae921b5c400f2188234999e6c9633906692add08c6e8b4b09f12b2099132b0a7533443fb2a01cfc2bf81 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes.tar.gz) | 6836f6c8514253fe0831fd171fc4ed92eb6d9a773491c8dc82b90d171a1b10076bd6bfaea56ec1e199c5f46c273265bdb9f174f0b2d99c5af1de4c99b862329e +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-src.tar.gz) | d137694804741a05ab09e5f9a418448b66aba0146c028eafce61bcd9d7c276521e345ce9223ffbc703e8172041d58dfc56a3242a4df3686f24905a4541fcd306 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | c136273883e24a2a50b5093b9654f01cdfe57b97461d34885af4a68c2c4d108c07583c02b1cdf7f57f82e91306e542ce8f3bddb12fcce72b744458bc4796f8eb -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 6ec59f1ed30569fa64ddb2d0de32b1ae04cda4ffe13f339050a7c9d7c63d425ee6f6d963dcf82c17281c4474da3eaf32c08117669052872a8c81bdce2c8a5415 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | 7b40a4c087e2ea7f8d055f297fcd39a3f1cb6c866e7a3981a9408c3c3eb5363c648613491aad11bc7d44d5530b20832f8f96f6ceff43deede911fb74aafad35f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | cda9955feebea5acb8f2b5b87895d24894bbbbde47041453b1f926ebdf47a258ce0496aa27d06bcbf365b5615ce68a20d659b64410c54227216726e2ee432fca -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | f65bd9241c7eb88a4886a285330f732448570aea4ededaebeabcf70d17ea185f51bf8a7218f146ee09fb1adceca7ee71fb3c3683834f2c415163add820fba96e -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 1e377599af100a81d027d9199365fb8208d443a8e0a97affff1a79dc18796e14b78cb53d6e245c1c1e8defd0e050e37bf5f2a23c8a3ff45a6d18d03619709bf5 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 1cdee81478246aa7e7b80ae4efc7f070a5b058083ae278f59fad088b75a8052761b0e15ab261a6e667ddafd6a69fb424fc307072ed47941cad89a85af7aee93d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-386.tar.gz) | d8774167c87b6844c348aa15e92d5033c528d6ab9e95d08a7cb22da68bafd8e46d442cf57a5f6affad62f674c10ae6947d524b94108b5e450ca78f92656d63c0 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | f664b47d8daa6036f8154c1dc1f881bfe683bf57c39d9b491de3848c03d051c50c6644d681baf7f9685eae45f9ce62e4c6dfea2853763cfe8256a61bdd59d894 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | 9478b047a97717953f365c13a098feb7e3cb30a3df22e1b82aa945f2208dcc5cb90afc441ba059a3ae7aafb4ee000ec3a52dc65a8c043a5ac7255a391c875330 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 44c8dd4b1ddfc256d35786c8abf45b0eb5f0794f5e310d2efc865748adddc50e8bf38aa71295ae8a82884cb65f2e0b9b0737b000f96fd8f2d5c19971d7c4d8e8 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | e1291989892769de6b978c17b8612b94da6f3b735a4d895100af622ca9ebb968c75548afea7ab00445869625dd0da3afec979e333afbb445805f5d31c1c13cc7 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | 3c4bcb8cbe73822d68a2f62553a364e20bec56b638c71d0f58679b4f4b277d809142346f18506914e694f6122a3e0f767eab20b7b1c4dbb79e4c5089981ae0f1 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | 9389974a790268522e187f5ba5237f3ee4684118c7db76bc3d4164de71d8208702747ec333b204c7a78073ab42553cbbce13a1883fab4fec617e093b05fab332 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 63399e53a083b5af3816c28ff162c9de6b64c75da4647f0d6bbaf97afdf896823cb1e556f2abac75c6516072293026d3ff9f30676fd75143ac6ca3f4d21f4327 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 50898f197a9d923971ff9046c9f02779b57f7b3cea7da02f3ea9bab8c08d65a9c4a7531a2470fa14783460f52111a52b96ebf916c0a1d8215b4070e4e861c1b0 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-386.tar.gz) | a7743e839e1aa19f5ee20b6ee5000ac8ef9e624ac5be63bb574fad6992e4b9167193ed07e03c9bc524e88bfeed66c95341a38a03bff1b10bc9910345f33019f0 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | 5f1d19c230bd3542866d16051808d184e9dd3e2f8c001ed4cee7b5df91f872380c2bf56a3add8c9413ead9d8c369efce2bcab4412174df9b823d3592677bf74e ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | d6fcb4600be0beb9de222a8da64c35fe22798a0da82d41401d34d0f0fc7e2817512169524c281423d8f4a007cd77452d966317d5a1b67d2717a05ff346e8aa7d -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | 022a76cf10801f8afbabb509572479b68fdb4e683526fa0799cdbd9bab4d3f6ecb76d1d63d0eafee93e3edf6c12892d84b9c771ef2325663b95347728fa3d6c0 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 0679aadd60bbf6f607e5befad74b5267eb2d4c1b55985cc25a97e0f4c5efb7acbb3ede91bfa6a5a5713dae4d7a302f6faaf678fd6b359284c33d9a6aca2a08bb -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 9f2cfeed543b515eafb60d9765a3afff4f3d323c0a5c8a0d75e3de25985b2627817bfcbe59a9a61d969e026e2b861adb974a09eae75b58372ed736ceaaed2a82 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | 937258704d7b9dcd91f35f2d34ee9dd38c18d9d4e867408c05281bfbbb919ad012c95880bee84d2674761aa44cc617fb2fae1124cf63b689289286d6eac1c407 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | ef2cac10febde231aeb6f131e589450c560eeaab8046b49504127a091cddc17bc518c2ad56894a6a033033ab6fc6e121b1cc23691683bc36f45fe6b1dd8e0510 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | d11c9730307f08e80b2b8a7c64c3e9a9e43c622002e377dfe3a386f4541e24adc79a199a6f280f40298bb36793194fd44ed45defe8a3ee54a9cb1386bc26e905 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 28f8c32bf98ee1add7edf5d341c3bac1afc0085f90dcbbfb8b27a92087f13e2b53c327c8935ee29bf1dc3160655b32bbe3e29d5741a8124a3848a777e7d42933 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 99ae8d44b0de3518c27fa8bbddd2ecf053dfb789fb9d65f8a4ecf4c8331cf63d2f09a41c2bcd5573247d5f66a1b2e51944379df1715017d920d521b98589508a +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | f8c0e954a2dfc6845614488dadeed069cc7f3f08e33c351d7a77c6ef97867af590932e8576d12998a820a0e4d35d2eee797c764e2810f09ab1e90a5acaeaad33 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | 076165d745d47879de68f4404eaf432920884be48277eb409e84bf2c61759633bf3575f46b0995f1fc693023d76c0921ed22a01432e756d7f8d9e246a243b126 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 1ff2e2e3e43af41118cdfb70c778e15035bbb1aca833ffd2db83c4bcd44f55693e956deb9e65017ebf3c553f2820ad5cd05f5baa33f3d63f3e00ed980ea4dfed -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | b232c7359b8c635126899beee76998078eec7a1ef6758d92bcdebe8013b0b1e4d7b33ecbf35e3f82824fe29493400845257e70ed63c1635bfa36c8b3b4969f6f -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 51d415a068f554840f4c78d11a4fedebd7cb03c686b0ec864509b24f7a8667ebf54bb0a25debcf2b70f38be1e345e743f520695b11806539a55a3620ce21946f -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | b51c082d8af358233a088b632cf2f6c8cfe5421471c27f5dc9ba4839ae6ea75df25d84298f2042770097554c01742bb7686694b331ad9bafc93c86317b867728 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | 91b9d26620a2dde67a0edead0039814efccbdfd54594dda3597aaced6d89140dc92612ed0727bc21d63468efeef77c845e640153b09e39d8b736062e6eee0c76 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | c5456d50bfbe0d75fb150b3662ed7468a0abd3970792c447824f326894382c47bbd3a2cc5a290f691c8c09585ff6fe505ab86b4aff2b7e5ccee11b5e6354ae6c +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 335b5cd8672e053302fd94d932fb2fa2e48eeeb1799650b3f93acdfa635e03a8453637569ab710c46885c8317759f4c60aaaf24dca9817d9fa47500fe4a3ca53 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | 3ee87dbeed8ace9351ac89bdaf7274dd10b4faec3ceba0825f690ec7a2bb7eb7c634274a1065a0939eec8ff3e43f72385f058f4ec141841550109e775bc5eff9 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 6956f965b8d719b164214ec9195fdb2c776b907fe6d2c524082f00c27872a73475927fd7d2a994045ce78f6ad2aa5aeaf1eb5514df1810d2cfe342fd4e5ce4a1 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | 3b643aa905c709c57083c28dd9e8ffd88cb64466cda1499da7fc54176b775003e08b9c7a07b0964064df67c8142f6f1e6c13bfc261bd65fb064049920bfa57d0 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | b2e6d6fb0091f2541f9925018c2bdbb0138a95bab06b4c6b38abf4b7144b2575422263b78fb3c6fd09e76d90a25a8d35a6d4720dc169794d42c95aa22ecc6d5f + +## Changelog since v1.21.0-alpha.1 -## Changelog since v1.20.0-alpha.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- Action-required: kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Action-required: kubeadm: remove the deprecated feature --experimental-kustomize from kubeadm commands. The feature was replaced with --experimental-patches in 1.19. To migrate see the --help description for the --experimental-patches flag. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] +- Remove the TokenRequest and TokenRequestProjection feature gates ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] ### API Change -- GPU metrics provided by kubelet are now disabled by default ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Status of v1beta1 CRDs without "preserveUnknownFields:false" will show violation "spec.preserveUnknownFields: Invalid value: true: must be false" ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) [SIG API Machinery] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl port-forward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] -### Feature +### Documentation -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Allow configuring AWS LoadBalancer health check protocol via service annotations ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kubeadm: do not throw errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. Print warnings instead. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- Send gce node startup scripts logs to console and journal ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) [SIG Cloud Provider and Node] -- Support kubectl delete orphan/foreground/background options ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) [SIG CLI and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### Bug or Regression -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Fix a bug where the endpoint slice controller was not mirroring the parent service labels to its corresponding endpoint slices ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) [SIG Apps and Network] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: warn but do not error out on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Port mapping allows to map the same `containerPort` to multiple `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Network and Node] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Fix azure file migration issue ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) [SIG Auth, Cloud Provider and Storage] +- Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix repeatedly acquire the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- Indentation of `Resource Quota` block in kubectl describe namespaces output gets correct. ([#97946](https://github.com/kubernetes/kubernetes/pull/97946), [@dty1er](https://github.com/dty1er)) [SIG CLI] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubelet should ignore cgroup driver check on Windows node. ([#97764](https://github.com/kubernetes/kubernetes/pull/97764), [@pacoxu](https://github.com/pacoxu)) [SIG Node and Windows] +- Make podTopologyHints protected by lock ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Readjust kubelet_containers_per_pod_count bucket ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) [SIG Instrumentation and Node] +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] ### Other (Cleanup or Flake) -- Added fine grained debugging to the intra-pod conformance test for helping easily resolve networking issues for nodes that might be unhealthy when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) [SIG Network and Testing] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Errors from staticcheck: - vendor/k8s.io/client-go/discovery/cached/memory/memcache_test.go:94:2: this value of g is never used (SA4006) ([#95098](https://github.com/kubernetes/kubernetes/pull/95098), [@phunziker](https://github.com/phunziker)) [SIG API Machinery] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Masks ceph RBD adminSecrets in logs when logLevel >= 4 ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) [SIG Storage] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove offensive words from kubectl cluster-info command ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) [SIG Architecture, CLI and Testing] -- The following new metrics are available. - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) [SIG Instrumentation, Network and Node] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Migrate generic_scheduler.go and types.go to structured logging. ([#98134](https://github.com/kubernetes/kubernetes/pull/98134), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Migrate proxy/winuserspace/proxier.go logs to structured logging ([#97941](https://github.com/kubernetes/kubernetes/pull/97941), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go logs to structured logging. ([#98252](https://github.com/kubernetes/kubernetes/pull/98252), [@lala123912](https://github.com/lala123912)) [SIG API Machinery and Auth] +- Migrate staging\src\k8s.io\apiserver\pkg\endpoints logs to structured logging ([#98093](https://github.com/kubernetes/kubernetes/pull/98093), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Node ([#96552](https://github.com/kubernetes/kubernetes/pull/96552), [@pandaamanda](https://github.com/pandaamanda)) [SIG Apps, Cloud Provider, Node and Scheduling] +- The kubectl alpha debug command was scheduled to be removed in v1.21. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] ## Dependencies ### Added -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) +_Nothing has changed._ ### Changed -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.5](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.5) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- k8s.io/kube-openapi: 6aeccd4 → 8b50664 -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.12 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → b3cf1e8 +- github.com/google/cadvisor: [v0.38.6 → v0.38.7](https://github.com/google/cadvisor/compare/v0.38.6...v0.38.7) +- k8s.io/gengo: 83324d8 → b6c5ce2 ### Removed _Nothing has changed._ -# v1.20.0-alpha.1 +# v1.21.0-alpha.1 -## Downloads for v1.20.0-alpha.1 +## Downloads for v1.21.0-alpha.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes.tar.gz) | e7daed6502ea07816274f2371f96fe1a446d0d7917df4454b722d9eb3b5ff6163bfbbd5b92dfe7a0c1d07328b8c09c4ae966e482310d6b36de8813aaf87380b5 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-src.tar.gz) | e91213a0919647a1215d4691a63b12d89a3e74055463a8ebd71dc1a4cabf4006b3660881067af0189960c8dab74f4a7faf86f594df69021901213ee5b56550ea +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes.tar.gz) | b2bacd5c3fc9f829e6269b7d2006b0c6e464ff848bb0a2a8f2fe52ad2d7c4438f099bd8be847d8d49ac6e4087f4d74d5c3a967acd798e0b0cb4d7a2bdb122997 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-src.tar.gz) | 518ac5acbcf23902fb1b902b69dbf3e86deca5d8a9b5f57488a15f185176d5a109558f3e4df062366af874eca1bcd61751ee8098b0beb9bcdc025d9a1c9be693 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | 1f3add5f826fa989820d715ca38e8864b66f30b59c1abeacbb4bfb96b4e9c694eac6b3f4c1c81e0ee3451082d44828cb7515315d91ad68116959a5efbdaef1e1 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-386.tar.gz) | c62acdc8993b0a950d4b0ce0b45473bf96373d501ce61c88adf4007afb15c1d53da8d53b778a7eccac6c1624f7fdda322be9f3a8bc2d80aaad7b4237c39f5eaf -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1203ababfe00f9bc5be5c059324c17160a96530c1379a152db33564bbe644ccdb94b30eea15a0655bd652efb17895a46c31bbba19d4f5f473c2a0ff62f6e551f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | 31860088596e12d739c7aed94556c2d1e217971699b950c8417a3cea1bed4e78c9ff1717b9f3943354b75b4641d4b906cd910890dbf4278287c0d224837d9a7d -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8d469f37fe20d6e15b5debc13cce4c22e8b7a4f6a4ac787006b96507a85ce761f63b28140d692c54b5f7deb08697f8d5ddb9bbfa8f5ac0d9241fc7de3a3fe3cd -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 0d62ee1729cd5884946b6c73701ad3a570fa4d642190ca0fe5c1db0fb0cba9da3ac86a948788d915b9432d28ab8cc499e28aadc64530b7d549ee752a6ed93ec1 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 0fc0420e134ec0b8e0ab2654e1e102cebec47b48179703f1e1b79d51ee0d6da55a4e7304d8773d3cf830341ac2fe3cede1e6b0460fd88f7595534e0730422d5a -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-386.tar.gz) | 3fb53b5260f4888c77c0e4ff602bbcf6bf38c364d2769850afe2b8d8e8b95f7024807c15e2b0d5603e787c46af8ac53492be9e88c530f578b8a389e3bd50c099 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 2f44c93463d6b5244ce0c82f147e7f32ec2233d0e29c64c3c5759e23533aebd12671bf63e986c0861e9736f9b5259bb8d138574a7c8c8efc822e35cd637416c0 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | eaa7aea84a5ed954df5ec710cbeb6ec88b46465f43cb3d09aabe2f714b84a050a50bf5736089f09dbf1090f2e19b44823d656c917e3c8c877630756c3026f2b6 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-386.tar.gz) | 47f74b8d46ad1779c5b0b5f15aa15d5513a504eeb6f53db4201fbe9ff8956cb986b7c1b0e9d50a99f78e9e2a7f304f3fc1cc2fa239296d9a0dd408eb6069e975 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1a148e282628b008c8abd03dd12ec177ced17584b5115d92cd33dd251e607097d42e9da8c7089bd947134b900f85eb75a4740b6a5dd580c105455b843559df39 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | d13d2feb73bd032dc01f7e2955b98d8215a39fe1107d037a73fa1f7d06c3b93ebaa53ed4952d845c64454ef3cca533edb97132d234d50b6fb3bcbd8a8ad990eb +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8252105a17b09a78e9ad2c024e4e401a69764ac869708a071aaa06f81714c17b9e7c5b2eb8efde33f24d0b59f75c5da607d5e1e72bdf12adfbb8c829205cd1c1 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 297a9082df4988389dc4be30eb636dff49f36f5d87047bab44745884e610f46a17ae3a08401e2cab155b7c439f38057bfd8288418215f7dd3bf6a49dbe61ea0e +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 04c06490dd17cd5dccfd92bafa14acf64280ceaea370d9635f23aeb6984d1beae6d0d1d1506edc6f30f927deeb149b989d3e482b47fbe74008b371f629656e79 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-386.tar.gz) | ec6e9e87a7d685f8751d7e58f24f417753cff5554a7229218cb3a08195d461b2e12409344950228e9fbbc92a8a06d35dd86242da6ff1e6652ec1fae0365a88c1 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 51039e6221d3126b5d15e797002ae01d4f0b10789c5d2056532f27ef13f35c5a2e51be27764fda68e8303219963126559023aed9421313bec275c0827fbcaf8a ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | ae82d14b1214e4100f0cc2c988308b3e1edd040a65267d0eddb9082409f79644e55387889e3c0904a12c710f91206e9383edf510990bee8c9ea2e297b6472551 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | 9a2a5828b7d1ddb16cc19d573e99a4af642f84129408e6203eeeb0558e7b8db77f3269593b5770b6a976fe9df4a64240ed27ad05a4bd43719e55fce1db0abf58 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | ed700dd226c999354ce05b73927388d36d08474c15333ae689427de15de27c84feb6b23c463afd9dd81993315f31eb8265938cfc7ecf6f750247aa42b9b33fa9 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | abb7a9d726538be3ccf5057a0c63ff9732b616e213c6ebb81363f0c49f1e168ce8068b870061ad7cba7ba1d49252f94cf00a5f68cec0f38dc8fce4e24edc5ca6 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 3a51888af1bfdd2d5b0101d173ee589c1f39240e4428165f5f85c610344db219625faa42f00a49a83ce943fb079be873b1a114a62003fae2f328f9bf9d1227a4 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | 4edf820930c88716263560275e3bd7fadb8dc3700b9f8e1d266562e356e0abeb1a913f536377dab91218e3940b447d6bf1da343b85da25c2256dc4dcde5798dd +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | b15213e53a8ab4ba512ce6ef9ad42dd197d419c61615cd23de344227fd846c90448d8f3d98e555b63ba5b565afa627cca6b7e3990ebbbba359c96f2391302df1 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | 5be29cca9a9358fc68351ee63e99d57dc2ffce6e42fc3345753dbbf7542ff2d770c4852424158540435fa6e097ce3afa9b13affc40c8b3b69fe8406798f8068f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | 89fd99ab9ce85db0b94b86709932105efc883cc93959cf7ea9a39e79a4acea23064d7010eeb577450cccabe521c04b7ba47bbec212ed37edeed7cb04bad34518 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 2fbc30862c77d247aa8d96ab9d1a144599505287b0033a3a2d0988958e7bb2f2e8b67f52c1fec74b4ec47d74ba22cd0f6cb5c4228acbaa72b1678d5fece0254d ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | d0f28e3c38ca59a7ff1bfecb48a1ce97116520355d9286afdca1200d346c10018f5bbdf890f130a388654635a2e83e908b263ed45f8a88defca52a7c1d0a7984 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | ed9d3f13028beb3be39bce980c966f82c4b39dc73beaae38cc075fea5be30b0309e555cb2af8196014f2cc9f0df823354213c314b4d6545ff6e30dd2d00ec90e -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | ad5b3268db365dcdded9a9a4bffc90c7df0f844000349accdf2b8fb5f1081e553de9b9e9fb25d5e8a4ef7252d51fa94ef94d36d2ab31d157854e164136f662c2 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | c4de2524e513996def5eeba7b83f7b406f17eaf89d4d557833a93bd035348c81fa9375dcd5c27cfcc55d73995449fc8ee504be1b3bd7b9f108b0b2f153cb05ae -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 9157b44e3e7bd5478af9f72014e54d1afa5cd19b984b4cd8b348b312c385016bb77f29db47f44aea08b58abf47d8a396b92a2d0e03f2fe8acdd30f4f9466cbdb -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 8b40a43c5e6447379ad2ee8aac06e8028555e1b370a995f6001018a62411abe5fbbca6060b3d1682c5cadc07a27d49edd3204e797af46368800d55f4ca8aa1de +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | 95658d321a0a371c0900b401d1469d96915310afbc4e4b9b11f031438bb188513b57d5a60b5316c3b0c18f541cda6f0ac42f59a76495f8abc743a067115da23a +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | f375acfb42aad6c65b833c270e7e3acfe9cd1d6b2441c33874e77faae263957f7acfe86f1b71f14298118595e4cc6952c7dea0c832f7f2e72428336f13034362 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | 43b4baccd58d74e7f48d096ab92f2bbbcdf47e30e7a3d2b56c6cc9f90002cfd4fefaac894f69bd5f9f4dbdb09a4749a77eb76b1b97d91746bd96fe94457879ab +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | e7962b522c6c7c14b9ee4c1d254d8bdd9846b2b33b0443fc9c4a41be6c40e5e6981798b720f0148f36263d5cc45d5a2bb1dd2f9ab2838e3d002e45b9bddeb7bf +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 49ebc97f01829e65f7de15be00b882513c44782eaadd1b1825a227e3bd3c73cc6aca8345af05b303d8c43aa2cb944a069755b2709effb8cc22eae621d25d4ba5 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 6e0fd7724b09e6befbcb53b33574e97f2db089f2eee4bbf391abb7f043103a5e6e32e3014c0531b88f9a3ca88887bbc68625752c44326f98dd53adb3a6d1bed8 -## Changelog since v1.20.0-alpha.0 +## Changelog since v1.20.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Azure blob disk feature(`kind`: `Shared`, `Dedicated`) has been deprecated, you should use `kind`: `Managed` in `kubernetes.io/azure-disk` storage class. ([#92905](https://github.com/kubernetes/kubernetes/pull/92905), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] - - CVE-2020-8559 (Medium): Privilege escalation from compromised node to cluster. See https://github.com/kubernetes/kubernetes/issues/92914 for more details. - The API Server will no longer proxy non-101 responses for upgrade requests. This could break proxied backends (such as an extension API server) that respond to upgrade requests with a non-101 response code. ([#92941](https://github.com/kubernetes/kubernetes/pull/92941), [@tallclair](https://github.com/tallclair)) [SIG API Machinery] - + - Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] + ## Changes by Kind ### Deprecation -- Kube-apiserver: the componentstatus API is deprecated. This API provided status of etcd, kube-scheduler, and kube-controller-manager components, but only worked when those components were local to the API server, and when kube-scheduler and kube-controller-manager exposed unsecured health endpoints. Instead of this API, etcd health is included in the kube-apiserver health check and kube-scheduler/kube-controller-manager health checks can be made directly against those components' health endpoints. ([#93570](https://github.com/kubernetes/kubernetes/pull/93570), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Cluster Lifecycle] -- Kubeadm: deprecate the "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. ([#92881](https://github.com/kubernetes/kubernetes/pull/92881), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt to `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kubeadm: deprecated command "alpha selfhosting pivot" is removed now. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] ### API Change -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- A new alpha-level field, `SupportsFsGroup`, has been introduced for CSIDrivers to allow them to specify whether they support volume ownership and permission modifications. The `CSIVolumeSupportFSGroup` feature gate must be enabled to allow this field to be used. ([#92001](https://github.com/kubernetes/kubernetes/pull/92001), [@huffmanca](https://github.com/huffmanca)) [SIG API Machinery, CLI and Storage] -- Added pod version skew strategy for seccomp profile to synchronize the deprecated annotations with the new API Server fields. Please see the corresponding section [in the KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190717-seccomp-ga.md#version-skew-strategy) for more detailed explanations. ([#91408](https://github.com/kubernetes/kubernetes/pull/91408), [@saschagrunert](https://github.com/saschagrunert)) [SIG Apps, Auth, CLI and Node] -- Adds the ability to disable Accelerator/GPU metrics collected by Kubelet ([#91930](https://github.com/kubernetes/kubernetes/pull/91930), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- Custom Endpoints are now mirrored to EndpointSlices by a new EndpointSliceMirroring controller. ([#91637](https://github.com/kubernetes/kubernetes/pull/91637), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Cloud Provider, Instrumentation, Network and Testing] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Generic ephemeral volumes, a new alpha feature under the `GenericEphemeralVolume` feature gate, provide a more flexible alternative to `EmptyDir` volumes: as with `EmptyDir`, volumes are created and deleted for each pod automatically by Kubernetes. But because the normal provisioning process is used (`PersistentVolumeClaim`), storage can be provided by third-party storage vendors and all of the usual volume features work. Volumes don't need to be empt; for example, restoring from snapshot is supported. ([#92784](https://github.com/kubernetes/kubernetes/pull/92784), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Instrumentation, Node, Scheduling, Storage and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Kubernetes is now built with golang 1.15.0-rc.1. - - The deprecated, legacy behavior of treating the CommonName field on X.509 serving certificates as a host name when no Subject Alternative Names are present is now disabled by default. It can be temporarily re-enabled by adding the value x509ignoreCN=0 to the GODEBUG environment variable. ([#93264](https://github.com/kubernetes/kubernetes/pull/93264), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The kube-controller-manager managed signers can now have distinct signing certificates and keys. See the help about `--cluster-signing-[signer-name]-{cert,key}-file`. `--cluster-signing-{cert,key}-file` is still the default. ([#90822](https://github.com/kubernetes/kubernetes/pull/90822), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps and Auth] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.tls[*].secretName` values are required to pass validation rules for Secret API object names. ([#93929](https://github.com/kubernetes/kubernetes/pull/93929), [@liggitt](https://github.com/liggitt)) [SIG Network] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] +- Change the APIVersion proto name of BoundObjectRef from aPIVersion to apiVersion. ([#97379](https://github.com/kubernetes/kubernetes/pull/97379), [@kebe7jun](https://github.com/kebe7jun)) [SIG Auth] +- Promote Immutable Secrets/ConfigMaps feature to Stable. + This allows to set `Immutable` field in Secrets or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] ### Feature -- ACTION REQUIRED : In CoreDNS v1.7.0, [metrics names have been changed](https://github.com/coredns/coredns/blob/master/notes/coredns-1.7.0.md#metric-changes) which will be backward incompatible with existing reporting formulas that use the old metrics' names. Adjust your formulas to the new names before upgrading. - - Kubeadm now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92651](https://github.com/kubernetes/kubernetes/pull/92651), [@rajansandeep](https://github.com/rajansandeep)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Add tags support for Azure File Driver ([#92825](https://github.com/kubernetes/kubernetes/pull/92825), [@ZeroMagic](https://github.com/ZeroMagic)) [SIG Cloud Provider and Storage] -- Added kube-apiserver metrics: apiserver_current_inflight_request_measures and, when API Priority and Fairness is enable, windowed_request_stats. ([#91177](https://github.com/kubernetes/kubernetes/pull/91177), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery, Instrumentation and Testing] -- Audit events for API requests to deprecated API versions now include a `"k8s.io/deprecated": "true"` audit annotation. If a target removal release is identified, the audit event includes a `"k8s.io/removal-release": "."` audit annotation as well. ([#92842](https://github.com/kubernetes/kubernetes/pull/92842), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Instrumentation] -- Cloud node-controller use InstancesV2 ([#91319](https://github.com/kubernetes/kubernetes/pull/91319), [@gongguan](https://github.com/gongguan)) [SIG Apps, Cloud Provider, Scalability and Storage] -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate the "--csr-only" and "--csr-dir" flags of the "kubeadm init phase certs" subcommands. Please use "kubeadm alpha certs generate-csr" instead. This new command allows you to generate new private keys and certificate signing requests for all the control-plane components, so that the certificates can be signed by an external CA. ([#92183](https://github.com/kubernetes/kubernetes/pull/92183), [@wallrj](https://github.com/wallrj)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. - - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: @mikedanese: - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. - - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. - - - make kube::util::find-binary not dependent on bazel-out/ structure - - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node to pod pid isolation - Promote SupportPodPidsLimit to GA to provide ability to limit pids per pod ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node and Testing] -- Rename pod_preemption_metrics to preemption_metrics. ([#93256](https://github.com/kubernetes/kubernetes/pull/93256), [@ahg-g](https://github.com/ahg-g)) [SIG Instrumentation and Scheduling] -- Server-side apply behavior has been regularized in the case where a field is removed from the applied configuration. Removed fields which have no other owners are deleted from the live object, or reset to their default value if they have one. Safe ownership transfers, such as the transfer of a `replicas` field from a user to an HPA without resetting to the default value are documented in [Transferring Ownership](https://kubernetes.io/docs/reference/using-api/api-concepts/#transferring-ownership) ([#92661](https://github.com/kubernetes/kubernetes/pull/92661), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Testing] -- Set CSIMigrationvSphere feature gates to beta. - Users should enable CSIMigration + CSIMigrationvSphere features and install the vSphere CSI Driver (https://github.com/kubernetes-sigs/vsphere-csi-driver) to move workload from the in-tree vSphere plugin "kubernetes.io/vsphere-volume" to vSphere CSI Driver. - - Requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15 ([#92816](https://github.com/kubernetes/kubernetes/pull/92816), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support a smooth upgrade from client-side apply to server-side apply without conflicts, as well as support the corresponding downgrade. ([#90187](https://github.com/kubernetes/kubernetes/pull/90187), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery and Testing] -- Trace output in apiserver logs is more organized and comprehensive. Traces are nested, and for all non-long running request endpoints, the entire filter chain is instrumented (e.g. authentication check is included). ([#88936](https://github.com/kubernetes/kubernetes/pull/88936), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Scheduling] -- `kubectl alpha debug` now supports debugging nodes by creating a debugging container running in the node's host namespaces. ([#92310](https://github.com/kubernetes/kubernetes/pull/92310), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] - -### Failing Test - -- Kube-proxy iptables min-sync-period defaults to 1 sec. Previously, it was 0. ([#92836](https://github.com/kubernetes/kubernetes/pull/92836), [@aojea](https://github.com/aojea)) [SIG Network] +- Add flag --lease-max-object-size and metric etcd_lease_object_counts for kube-apiserver to config and observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` + takes into consideration global ip addresses on loopback interfaces when: + - the host has default routes + - there are no global IPs on those interfaces. + in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] ### Bug or Regression -- A panic in the apiserver caused by the `informer-sync` health checker is now fixed. ([#93600](https://github.com/kubernetes/kubernetes/pull/93600), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery] -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Adding fix to the statefulset controller to wait for pvc deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) [SIG Apps] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Azure: per VMSS VMSS VMs cache to prevent throttling on clusters having many attached VMSS ([#93107](https://github.com/kubernetes/kubernetes/pull/93107), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp field of an audit event take - into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Auth and Instrumentation] -- Build/lib/release: Explicitly use '--platform' in building server images - - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. - - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. - - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. - - Before: - `FROM ${base_image}` - - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- CVE-2020-8557 (Medium): Node-local denial of service via container /etc/hosts file. See https://github.com/kubernetes/kubernetes/issues/93032 for more details. ([#92916](https://github.com/kubernetes/kubernetes/pull/92916), [@joelsmith](https://github.com/joelsmith)) [SIG Node] -- Do not add nodes labeled with kubernetes.azure.com/managed=false to backend pool of load balancer. ([#93034](https://github.com/kubernetes/kubernetes/pull/93034), [@matthias50](https://github.com/matthias50)) [SIG Cloud Provider] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Do not retry volume expansion if CSI driver returns FailedPrecondition error ([#92986](https://github.com/kubernetes/kubernetes/pull/92986), [@gnufied](https://github.com/gnufied)) [SIG Node and Storage] -- Dockershim security: pod sandbox now always run with `no-new-privileges` and `runtime/default` seccomp profile - dockershim seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90948](https://github.com/kubernetes/kubernetes/pull/90948), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Endpoint controller requeues service after an endpoint deletion event occurs to confirm that deleted endpoints are undesired to mitigate the effects of an out of sync endpoint cache. ([#93030](https://github.com/kubernetes/kubernetes/pull/93030), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] -- EndpointSlice controllers now return immediately if they encounter an error creating, updating, or deleting resources. ([#93908](https://github.com/kubernetes/kubernetes/pull/93908), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now copies labels from Endpoints to EndpointSlices. ([#93442](https://github.com/kubernetes/kubernetes/pull/93442), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now mirrors Endpoints that do not have a Service associated with them. ([#94171](https://github.com/kubernetes/kubernetes/pull/94171), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Ensure backoff step is set to 1 for Azure armclient. ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Eviction requests for pods that have a non-zero DeletionTimestamp will always succeed ([#91342](https://github.com/kubernetes/kubernetes/pull/91342), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Extended DSR loadbalancer feature in winkernel kube-proxy to HNS versions 9.3-9.max, 10.2+ ([#93080](https://github.com/kubernetes/kubernetes/pull/93080), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Fix HandleCrash order ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG API Machinery] -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug where loadbalancer deletion gets stuck because of missing resource group #75198 ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) [SIG Cloud Provider] -- Fix calling AttachDisk on a previously attached EBS volume ([#93567](https://github.com/kubernetes/kubernetes/pull/93567), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider, Storage and Testing] -- Fix detection of image filesystem, disk metrics for devicemapper, detection of OOM Kills on 5.0+ linux kernels. ([#92919](https://github.com/kubernetes/kubernetes/pull/92919), [@dashpole](https://github.com/dashpole)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix instance not found issues when an Azure Node is recreated in a short time ([#93316](https://github.com/kubernetes/kubernetes/pull/93316), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix kube-apiserver /readyz to contain "informer-sync" check ensuring that internal informers are synced. ([#93670](https://github.com/kubernetes/kubernetes/pull/93670), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in EndpointSliceTracker for EndpointSliceMirroring controller. ([#93441](https://github.com/kubernetes/kubernetes/pull/93441), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: determine the correct ip config based on ip family ([#93043](https://github.com/kubernetes/kubernetes/pull/93043), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- Fix: initial delay in mounting azure disk & file ([#93052](https://github.com/kubernetes/kubernetes/pull/93052), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed Ceph RBD volume expansion when no ceph.conf exists ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) [SIG Storage] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a bug whereby the allocation of reusable CPUs and devices was not being honored when the TopologyManager was enabled ([#93189](https://github.com/kubernetes/kubernetes/pull/93189), [@klueska](https://github.com/klueska)) [SIG Node] -- Fixed a panic in kubectl debug when pod has multiple init containers or ephemeral containers ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) [SIG CLI] -- Fixed a regression that sometimes prevented `kubectl portforward` to work when TCP and UDP services were configured on the same port ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) [SIG CLI] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixed memory leak in endpointSliceTracker ([#92838](https://github.com/kubernetes/kubernetes/pull/92838), [@tnqn](https://github.com/tnqn)) [SIG Apps and Network] -- Fixed node data lost in kube-scheduler for clusters with imbalance on number of nodes across zones ([#93355](https://github.com/kubernetes/kubernetes/pull/93355), [@maelk](https://github.com/maelk)) [SIG Scheduling] -- Fixed the EndpointSliceController to correctly create endpoints for IPv6-only pods. - - Fixed the EndpointController to allow IPv6 headless services, if the IPv6DualStack - feature gate is enabled, by specifying `ipFamily: IPv6` on the service. (This already - worked with the EndpointSliceController.) ([#91399](https://github.com/kubernetes/kubernetes/pull/91399), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Fixes a bug evicting pods after a taint with a limited tolerationSeconds toleration is removed from a node ([#93722](https://github.com/kubernetes/kubernetes/pull/93722), [@liggitt](https://github.com/liggitt)) [SIG Apps and Node] -- Fixes a bug where EndpointSlices would not be recreated after rapid Service recreation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Fixes a race condition in kubelet pod handling ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) [SIG Node] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes an issue that can result in namespaced custom resources being orphaned when their namespace is deleted, if the CRD defining the custom resource is removed concurrently with namespaces being deleted, then recreated. ([#93790](https://github.com/kubernetes/kubernetes/pull/93790), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Increased maximum IOPS of AWS EBS io1 volumes to 64,000 (current AWS maximum). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) [SIG Cloud Provider and Storage] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-aggregator certificates are dynamically loaded on change from disk ([#92791](https://github.com/kubernetes/kubernetes/pull/92791), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] -- Kube-apiserver: fixed a bug returning inconsistent results from list requests which set a field or label selector and set a paging limit ([#94002](https://github.com/kubernetes/kubernetes/pull/94002), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Kube-apiserver: jsonpath expressions with consecutive recursive descent operators are no longer evaluated for custom resource printer columns ([#93408](https://github.com/kubernetes/kubernetes/pull/93408), [@joelsmith](https://github.com/joelsmith)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kube-up now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92718](https://github.com/kubernetes/kubernetes/pull/92718), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cloud Provider] -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove duplicate DNS names and IP addresses from generated certificates ([#92753](https://github.com/kubernetes/kubernetes/pull/92753), [@QianChenglong](https://github.com/QianChenglong)) [SIG Cluster Lifecycle] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- Kubelet: fix race condition in pluginWatcher ([#93622](https://github.com/kubernetes/kubernetes/pull/93622), [@knight42](https://github.com/knight42)) [SIG Node] -- Kuberuntime security: pod sandbox now always runs with `runtime/default` seccomp profile - kuberuntime seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90949](https://github.com/kubernetes/kubernetes/pull/90949), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- NONE ([#71269](https://github.com/kubernetes/kubernetes/pull/71269), [@DeliangFan](https://github.com/DeliangFan)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Pods with invalid Affinity/AntiAffinity LabelSelectors will now fail scheduling when these plugins are enabled ([#93660](https://github.com/kubernetes/kubernetes/pull/93660), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Reverted devicemanager for Windows node added in 1.19rc1. ([#93263](https://github.com/kubernetes/kubernetes/pull/93263), [@liggitt](https://github.com/liggitt)) [SIG Node and Windows] -- Scheduler bugfix: Scheduler doesn't lose pod information when nodes are quickly recreated. This could happen when nodes are restarted or quickly recreated reusing a nodename. ([#93938](https://github.com/kubernetes/kubernetes/pull/93938), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scalability, Scheduling and Testing] -- The EndpointSlice controller now waits for EndpointSlice and Node caches to be synced before starting. ([#94086](https://github.com/kubernetes/kubernetes/pull/94086), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- The terminationGracePeriodSeconds from pod spec is respected for the mirror pod. ([#92442](https://github.com/kubernetes/kubernetes/pull/92442), [@tedyu](https://github.com/tedyu)) [SIG Node and Testing] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Updated Cluster Autoscaler to 1.19.0; ([#93577](https://github.com/kubernetes/kubernetes/pull/93577), [@vivekbagade](https://github.com/vivekbagade)) [SIG Autoscaling and Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] +- ## Changelog + + ### General + - Fix priority expander falling back to a random choice even though there is a higher priority option to choose + - Clone `kubernetes/kubernetes` in `update-vendor.sh` shallowly, instead of fetching all revisions + - Speed up binpacking by reducing the number of PreFilter calls (call once per pod instead of #pods*#nodes times) + - Speed up finding unneeded nodes by 5x+ in very large clusters by reducing the number of PreFilter calls + - Expose `--max-nodes-total` as a metric + - Errors in `IncreaseSize` changed from type `apiError` to `cloudProviderError` + - Make `build-in-docker` and `test-in-docker` work on Linux systems with SELinux enabled + - Fix an error where existing nodes were not considered as destinations while finding place for pods in scale-down simulations + - Remove redundant log lines and reduce severity around parsing kubeEnv + - Don't treat nodes created by virtual kubelet as nodes from non-autoscaled node groups + - Remove redundant logging around calculating node utilization + - Add configurable `--network` and `--rm` flags for docker in `Makefile` + - Subtract DaemonSet pods' requests from node allocatable in the denominator while computing node utilization + - Include taints by condition when determining if a node is unready/still starting + - Fix `update-vendor.sh` to work on OSX and zsh + - Add best-effort eviction for DaemonSet pods while scaling down non-empty nodes + - Add build support for ARM64 + + ### AliCloud + - Add missing daemonsets and replicasets to ALI example cluster role + + ### Apache CloudStack + - Add support for Apache CloudStack + + ### AWS + - Regenerate list of EC2 instances + - Fix pricing endpoint in AWS China Region + + ### Azure + - Add optional jitter on initial VMSS VM cache refresh, keep the refreshes spread over time + - Serve from cache for the whole period of ongoing throttling + - Fix unwanted VMSS VMs cache invalidations + - Enforce setting the number of retries if cloud provider backoff is enabled + - Don't update capacity if VMSS provisioning state is updating + - Support allocatable resources overrides via VMSS tags + - Add missing stable labels in template nodes + - Proactively set instance status to deleting on node deletions + + ### Cluster API + - Migrate interaction with the API from using internal types to using Unstructured + - Improve tests to work better with constrained resources + - Add support for node autodiscovery + - Add support for `--cloud-config` + - Update group identifier to use for Cluster API annotations + + ### Exoscale + - Add support for Exoscale + + ### GCE + - Decrease the number of GCE Read Requests made while deleting nodes + - Base pricing of custom instances on their instance family type + - Add pricing information for missing machine types + - Add pricing information for different GPU types + - Ignore the new `topology.gke.io/zone` label when comparing groups + - Add missing stable labels to template nodes + + ### HuaweiCloud + - Add auto scaling group support + - Implement node group by AS + - Implement getting desired instance number of node group + - Implement increasing node group size + - Implement TemplateNodeInfo + - Implement caching instances + + ### IONOS + - Add support for IONOS + + ### Kubemark + - Skip non-kubemark nodes while computing node infos for node groups. + + ### Magnum + - Add Magnum support in the Cluster Autoscaler helm chart + + ### Packet + - Allow empty nodepools + - Add support for multiple nodepools + - Add pricing support + + ## Image + Image: `k8s.gcr.io/autoscaling/cluster-autoscaler:v1.20.0` ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) [SIG Cloud Provider] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Add limited lines to log when having tail option ([#93920](https://github.com/kubernetes/kubernetes/pull/93920), [@zhouya0](https://github.com/zhouya0)) [SIG Node] +- Avoid systemd-logind loading configuration warning ([#97950](https://github.com/kubernetes/kubernetes/pull/97950), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Copy annotations with empty value when deployment rolls back ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Detach volumes from vSphere nodes not tracked by attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Fix kubectl label error when local=true is set. ([#97440](https://github.com/kubernetes/kubernetes/pull/97440), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Fix Azure file share not deleted issue when the namespace is deleted ([#97417](https://github.com/kubernetes/kubernetes/pull/97417), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix kubectl-convert import known versions ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) [SIG CLI and Testing] +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Kubeadm now installs version 3.4.13 of etcd when creating a cluster with v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: avoid detection of the container runtime for commands that do not need it ([#97625](https://github.com/kubernetes/kubernetes/pull/97625), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Performance regression [#97685](https://github.com/kubernetes/kubernetes/issues/97685) has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- The current version of the container image publicly exposed IP serving a /metrics endpoint to the Internet. The new version of the container image serves /metrics endpoint on a different port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] - Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.rules[*].http` values are now validated consistently when the `host` field contains a wildcard. ([#93954](https://github.com/kubernetes/kubernetes/pull/93954), [@Miciah](https://github.com/Miciah)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Storage and Testing] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- Warning about using a deprecated volume plugin is logged only once. ([#96751](https://github.com/kubernetes/kubernetes/pull/96751), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] ### Other (Cleanup or Flake) -- --cache-dir sets cache directory for both http and discovery, defaults to $HOME/.kube/cache ([#92910](https://github.com/kubernetes/kubernetes/pull/92910), [@soltysh](https://github.com/soltysh)) [SIG API Machinery and CLI] -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.2 and debian-iptables@v12.1.1 ([#93667](https://github.com/kubernetes/kubernetes/pull/93667), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.3 and debian-iptables@v12.1.2 ([#93916](https://github.com/kubernetes/kubernetes/pull/93916), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to go-runner:buster-v2.0.0 ([#94167](https://github.com/kubernetes/kubernetes/pull/94167), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Fix kubelet to properly log when a container is started. Before, sometimes the log said that a container is dead and was restarted when it was started for the first time. This only happened when using pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) [SIG Node] -- Fix: license issue in blob disk feature ([#92824](https://github.com/kubernetes/kubernetes/pull/92824), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixes the flooding warning messages about setting volume ownership for configmap/secret volumes ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) [SIG Instrumentation, Node and Storage] -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Kube-up: defaults to limiting critical pods to the kube-system namespace to match behavior prior to 1.17 ([#93121](https://github.com/kubernetes/kubernetes/pull/93121), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider and Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove support for the "ci/k8s-master" version label. This label has been removed in the Kubernetes CI release process and would no longer work in kubeadm. You can use the "ci/latest" version label instead. See kubernetes/test-infra#18517 ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubernetes is now built with go1.15.0 ([#93939](https://github.com/kubernetes/kubernetes/pull/93939), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Kubernetes is now built with go1.15.0-rc.2 ([#93827](https://github.com/kubernetes/kubernetes/pull/93827), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update Golang to v1.14.5 - - Update repo-infra to 0.0.7 (to support go1.14.5 and go1.13.13) - - Includes: - - bazelbuild/bazel-toolchains@3.3.2 - - bazelbuild/rules_go@v0.22.7 ([#93088](https://github.com/kubernetes/kubernetes/pull/93088), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update Golang to v1.14.6 - - Update repo-infra to 0.0.8 (to support go1.14.6 and go1.13.14) - - Includes: - - bazelbuild/bazel-toolchains@3.4.0 - - bazelbuild/rules_go@v0.22.8 ([#93198](https://github.com/kubernetes/kubernetes/pull/93198), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.9 ([#92349](https://github.com/kubernetes/kubernetes/pull/92349), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Delete deprecated mixed protocol annotation ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: fix a whitespace issue in the output of the "kubeadm join" command shown as the output of "kubeadm init" and "kubeadm token create --print-join-command" ([#97413](https://github.com/kubernetes/kubernetes/pull/97413), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: improve the error messaging when the user provides an invalid discovery token CA certificate hash. ([#97290](https://github.com/kubernetes/kubernetes/pull/97290), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Migrate log messages in pkg/scheduler/{scheduler.go,factory.go} to structured logging ([#97509](https://github.com/kubernetes/kubernetes/pull/97509), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- Migrate proxy/iptables/proxier.go logs to structured logging ([#97678](https://github.com/kubernetes/kubernetes/pull/97678), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some scheduler log messages to structured logging ([#97349](https://github.com/kubernetes/kubernetes/pull/97349), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- NONE ([#97167](https://github.com/kubernetes/kubernetes/pull/97167), [@geegeea](https://github.com/geegeea)) [SIG Node] +- NetworkPolicy validation framework optimizations for rapidly verifying CNI's work correctly across several pods and namespaces ([#91592](https://github.com/kubernetes/kubernetes/pull/91592), [@jayunit100](https://github.com/jayunit100)) [SIG Network, Storage and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Scheduler plugin validation now provides all errors detected instead of the first one. ([#96745](https://github.com/kubernetes/kubernetes/pull/96745), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node, Scheduling and Testing] +- Storage related e2e testsuite redesign & cleanup ([#96573](https://github.com/kubernetes/kubernetes/pull/96573), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Storage and Testing] +- The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- The `AttachVolumeLimit` feature gate that is GA since v1.17 is now removed. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Storage] +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior`(locked to false) features have been promoted to GA. + To prevent control plane nodes being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Apps, Cloud Provider and Network] + +### Uncategorized + +- Adding Brazilian Portuguese translation for kubectl ([#61595](https://github.com/kubernetes/kubernetes/pull/61595), [@cpanato](https://github.com/cpanato)) [SIG CLI] ## Dependencies ### Added -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/yuin/goldmark: [v1.1.27](https://github.com/yuin/goldmark/tree/v1.1.27) -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 +_Nothing has changed._ ### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.0](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.0) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.0](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.0) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/hcsshim: [v0.8.9 → 5eafd15](https://github.com/Microsoft/hcsshim/compare/v0.8.9...5eafd15) -- github.com/cilium/ebpf: [9f1617e → 1c8d4c9](https://github.com/cilium/ebpf/compare/9f1617e...1c8d4c9) -- github.com/containerd/cgroups: [bf292b2 → 0dbf7f0](https://github.com/containerd/cgroups/compare/bf292b2...0dbf7f0) -- github.com/coredns/corefile-migration: [v1.0.8 → v1.0.10](https://github.com/coredns/corefile-migration/compare/v1.0.8...v1.0.10) -- github.com/evanphx/json-patch: [e83c0a1 → v4.9.0+incompatible](https://github.com/evanphx/json-patch/compare/e83c0a1...v4.9.0) -- github.com/google/cadvisor: [8450c56 → v0.37.0](https://github.com/google/cadvisor/compare/8450c56...v0.37.0) -- github.com/json-iterator/go: [v1.1.9 → v1.1.10](https://github.com/json-iterator/go/compare/v1.1.9...v1.1.10) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [1b94395 → 819fcc6](https://github.com/opencontainers/runc/compare/1b94395...819fcc6) -- github.com/prometheus/client_golang: [v1.6.0 → v1.7.1](https://github.com/prometheus/client_golang/compare/v1.6.0...v1.7.1) -- github.com/prometheus/common: [v0.9.1 → v0.10.0](https://github.com/prometheus/common/compare/v0.9.1...v0.10.0) -- github.com/prometheus/procfs: [v0.0.11 → v0.1.3](https://github.com/prometheus/procfs/compare/v0.0.11...v0.1.3) -- github.com/rubiojr/go-vhd: [0bfd3b3 → 02e2102](https://github.com/rubiojr/go-vhd/compare/0bfd3b3...02e2102) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/urfave/cli: [v1.22.1 → v1.22.2](https://github.com/urfave/cli/compare/v1.22.1...v1.22.2) -- go.etcd.io/etcd: 54ba958 → dd1b699 -- golang.org/x/crypto: bac4c82 → 75b2880 -- golang.org/x/mod: v0.1.0 → v0.3.0 -- golang.org/x/net: d3edc99 → ab34263 -- golang.org/x/tools: c00d67e → c1934b7 -- k8s.io/kube-openapi: 656914f → 6aeccd4 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: 6e3d28b → d5654de +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.38.6](https://github.com/google/cadvisor/compare/v0.38.5...v0.38.6) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- k8s.io/system-validators: v1.2.0 → v1.3.0 ### Removed -- github.com/godbus/dbus: [ade71ed](https://github.com/godbus/dbus/tree/ade71ed) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- sigs.k8s.io/structured-merge-diff/v3: v3.0.0 -- vbom.ml/util: db5cfe1 +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 diff --git a/content/ko/docs/setup/release/version-skew-policy.md b/content/ko/docs/setup/release/version-skew-policy.md index feb675f8ba6bc..76ff7504fd032 100644 --- a/content/ko/docs/setup/release/version-skew-policy.md +++ b/content/ko/docs/setup/release/version-skew-policy.md @@ -1,11 +1,18 @@ --- + + + + + + + title: 쿠버네티스 버전 및 버전 차이(skew) 지원 정책 content_type: concept weight: 30 --- -이 문서는 다양한 쿠버네티스 구성 요소 간에 지원되는 최대 버전 차이를 설명한다. +이 문서는 다양한 쿠버네티스 구성 요소 간에 지원되는 최대 버전 차이를 설명한다. 특정 클러스터 배포 도구는 버전 차이에 대한 추가적인 제한을 설정할 수 있다. @@ -19,14 +26,14 @@ weight: 30 쿠버네티스 프로젝트는 최근 세 개의 마이너 릴리스 ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}) 에 대한 릴리스 분기를 유지한다. 쿠버네티스 1.19 이상은 약 1년간의 패치 지원을 받는다. 쿠버네티스 1.18 이상은 약 9개월의 패치 지원을 받는다. -보안 수정사항을 포함한 해당 수정사항은 심각도와 타당성에 따라 세 개의 릴리스 브랜치로 백포트(backport) 될 수 있다. +보안 수정사항을 포함한 해당 수정사항은 심각도와 타당성에 따라 세 개의 릴리스 브랜치로 백포트(backport) 될 수 있다. 패치 릴리스는 각 브랜치별로 [정기적인 주기](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence)로 제공하며, 필요한 경우 추가 긴급 릴리스도 추가한다. [릴리스 관리자](https://git.k8s.io/sig-release/release-managers.md) 그룹이 이러한 결정 권한을 가진다. 자세한 내용은 쿠버네티스 [패치 릴리스](https://git.k8s.io/sig-release/releases/patch-releases.md) 페이지를 참조한다. -## 지원되는 버전 차이 +## 지원되는 버전 차이 ### kube-apiserver @@ -133,6 +140,11 @@ HA 클러스터의 `kube-apiserver` 인스턴스 간에 버전 차이가 있으 필요에 따라서 `kubelet` 인스턴스를 **{{< skew latestVersion >}}** 으로 업그레이드할 수 있다(또는 **{{< skew prevMinorVersion >}}** 아니면 **{{< skew oldestMinorVersion >}}** 으로 유지할 수 있음). +{{< note >}} +`kubelet` 마이너 버전 업그레이드를 수행하기 전에, 해당 노드의 파드를 [드레인(drain)](/docs/tasks/administer-cluster/safely-drain-node/)해야 한다. +인플레이스(In-place) 마이너 버전 `kubelet` 업그레이드는 지원되지 않는다. +{{}} + {{< warning >}} 클러스터 안의 `kubelet` 인스턴스를 `kube-apiserver`의 버전보다 2단계 낮은 버전으로 실행하는 것을 권장하지 않는다: diff --git a/content/ko/docs/tasks/access-application-cluster/access-cluster.md b/content/ko/docs/tasks/access-application-cluster/access-cluster.md index e78046f0eef7b..9e7c9b4fc77b6 100644 --- a/content/ko/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/ko/docs/tasks/access-application-cluster/access-cluster.md @@ -26,9 +26,9 @@ kubectl이 인지하는 위치정보와 인증정보는 다음 커맨드로 확 kubectl config view ``` -많은 [예제들](/ko/docs/reference/kubectl/cheatsheet/)에서 -kubectl을 사용하는 것을 소개하고 있으며 완전한 문서는 -[kubectl 매뉴얼](/ko/docs/reference/kubectl/overview/)에서 찾아볼 수 있다. +[여기](/ko/docs/reference/kubectl/cheatsheet/)에서 +kubectl 사용 예시를 볼 수 있으며, 완전한 문서는 +[kubectl 매뉴얼](/ko/docs/reference/kubectl/overview/)에서 확인할 수 있다. ## REST API에 직접 접근 @@ -44,12 +44,12 @@ REST API에 직접 접근하려고 한다면 위치 파악과 인증을 하는 - 앞으로는 클라이언트 측의 지능형 load balancing과 failover가 될 것이다. - 직접적으로 http 클라이언트에 위치정보와 인증정보를 제공. - 대안적인 접근 방식. - - proxy 사용과 혼동되는 몇 가지 타입의 클라이언트 code들과 같이 동작한다. - - MITM로부터 보호를 위해 root 인증서를 당신의 브라우저로 import해야 한다. + - proxy 사용과 혼동되는 몇 가지 타입의 클라이언트 코드와 같이 동작한다. + - MITM로부터 보호를 위해 root 인증서를 당신의 브라우저로 임포트해야 한다. ### kubectl proxy 사용 -다음 커맨드는 kubectl을 reverse proxy처럼 동작하는 모드를 실행한다. 이는 +다음 커맨드는 kubectl을 리버스 프록시(reverse proxy)처럼 동작하는 모드를 실행한다. 이는 apiserver의 위치지정과 인증을 처리한다. 다음과 같이 실행한다. @@ -205,7 +205,7 @@ apiserver의 인증서 제공을 검증하는데 사용되어야 한다. - 파드의 sidecar 컨테이너 내에서 `kubectl proxy`를 실행하거나, 컨테이너 내부에서 백그라운드 프로세스로 실행한다. - 이는 쿠버네티스 API를 파드의 localhost 인터페이스로 proxy하여 + 이는 쿠버네티스 API를 파드의 localhost 인터페이스로 프록시하여 해당 파드의 컨테이너 내에 다른 프로세스가 API에 접속할 수 있게 해준다. - Go 클라이언트 라이브러리를 이용하여 `rest.InClusterConfig()`와 `kubernetes.NewForConfig()` 함수들을 사용하도록 클라이언트를 만든다. 이는 apiserver의 위치지정과 인증을 처리한다. [예제](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go) @@ -215,47 +215,47 @@ apiserver의 인증서 제공을 검증하는데 사용되어야 한다. ## 클러스터에서 실행되는 서비스로 접근 이전 장은 쿠버네티스 API server 접속에 대한 내용을 다루었다. 이번 장은 -쿠버네티스 클러스터 상에서 실행되는 다른 서비스로의 연결을 다룰 것이다. 쿠버네티스에서 -[노드들](/ko/docs/concepts/architecture/nodes/), -[파드들](/ko/docs/concepts/workloads/pods/), -[서비스들](/ko/docs/concepts/services-networking/service/)은 -모두 자신의 IP들을 가진다. 당신의 데스크탑 PC와 같은 클러스터 외부 장비에서는 -클러스터 상의 노드 IP들, 파드 IP들, 서비스 IP들로 라우팅되지 않아서 접근을 +쿠버네티스 클러스터 상에서 실행되는 다른 서비스로의 연결을 다룰 것이다. + +쿠버네티스에서, [노드](/ko/docs/concepts/architecture/nodes/), +[파드](/ko/docs/concepts/workloads/pods/) 및 [서비스](/ko/docs/concepts/services-networking/service/)는 모두 +고유한 IP를 가진다. 당신의 데스크탑 PC와 같은 클러스터 외부 장비에서는 +클러스터 상의 노드 IP, 파드 IP, 서비스 IP로 라우팅되지 않아서 접근을 할 수 없을 것이다. ### 통신을 위한 방식들 -클러스터 외부에서 노드들, 파드들, 서비스들에 접속하는 데는 몇 가지 선택지들이 있다. +클러스터 외부에서 노드, 파드 및 서비스에 접속하기 위한 몇 가지 옵션이 있다. - 공인 IP를 통해 서비스에 접근. - 클러스터 외부에서 접근할 수 있도록 `NodePort` 또는 `LoadBalancer` 타입의 서비스를 사용한다. [서비스](/ko/docs/concepts/services-networking/service/)와 [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) 문서를 참조한다. - - 당신의 클러스터 환경에 따라 회사 네트워크에만 서비스를 노출하거나 - 인터넷으로 노출할 수 있다. 이 경우 노출되는 서비스의 보안 여부를 고려해야 한다. + - 클러스터 환경에 따라, 서비스는 회사 네트워크에만 노출되기도 하며, + 인터넷에 노출되는 경우도 있다. 이 경우 노출되는 서비스의 보안 여부를 고려해야 한다. 해당 서비스는 자체적으로 인증을 수행하는가? - - 파드들은 서비스 뒤에 위치시킨다. 레플리카들의 집합에서 특정 파드 하나에 debugging 같은 목적으로 접근하려면 - 해당 파드에 고유의 레이블을 붙이고 셀렉터에 해당 레이블을 선택한 신규 서비스를 생성한다. + - 파드는 서비스 뒤에 위치시킨다. 레플리카들의 집합에서 특정 파드 하나에 debugging 같은 목적으로 접근하려면 + 해당 파드에 고유의 레이블을 붙이고 셀렉터에 해당 레이블을 선택하는 신규 서비스를 생성한다. - 대부분의 경우에는 애플리케이션 개발자가 노드 IP를 통해 직접 노드에 접근할 필요는 없다. - Proxy Verb를 사용하여 서비스, 노드, 파드에 접근. - 원격 서비스에 접근하기에 앞서 apiserver의 인증과 인가를 받아야 한다. - 서비스가 인터넷에 노출하기에 보안이 충분하지 않거나 노드 IP 상의 port에 + 서비스가 인터넷에 노출하기에 보안이 충분하지 않거나 노드 IP 상의 포트에 접근을 하려고 하거나 debugging을 하려면 이를 사용한다. - - 어떤 web 애플리케이션에서는 proxy가 문제를 일으킬 수 있다. + - 어떤 web 애플리케이션에서는 프록시가 문제를 일으킬 수 있다. - HTTP/HTTPS에서만 동작한다. - [여기](#수작업으로-apiserver-proxy-url들을-구축)에서 설명하고 있다. - 클러스터 내 노드 또는 파드에서 접근. - - 파드를 Running시킨 다음 [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec)를 사용하여 해당 파드의 셸로 접속한다. - 해당 셸에서 다른 노드들, 파드들, 서비스들에 연결한다. + - 파드를 실행한 다음, [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec)를 사용하여 해당 파드의 셸로 접속한다. + 해당 셸에서 다른 노드, 파드, 서비스에 연결한다. - 어떤 클러스터는 클러스터 내의 노드에 ssh 접속을 허용하기도 한다. 이런 클러스터에서는 클러스터 서비스에 접근도 가능하다. 이는 비표준 방식으로 특정 클러스터에서는 동작하지만 다른 클러스터에서는 동작하지 않을 수 있다. 브라우저와 다른 도구들이 설치되지 않았거나 설치되었을 수 있다. 클러스터 DNS가 동작하지 않을 수도 있다. -### 빌트인 서비스들의 발견 +### 빌트인 서비스 검색 -일반적으로 kube-system에 의해 클러스터 상에서 start되는 몇 가지 서비스들이 존재한다. -`kubectl cluster-info` 커맨드로 이 서비스들의 리스트를 볼 수 있다. +일반적으로 kube-system에 의해 클러스터에 실행되는 몇 가지 서비스가 있다. +`kubectl cluster-info` 커맨드로 이 서비스의 리스트를 볼 수 있다. ```shell kubectl cluster-info @@ -280,20 +280,20 @@ heapster is running at https://104.197.5.247/api/v1/namespaces/kube-system/servi #### 수작업으로 apiserver proxy URL을 구축 -위에서 언급한 것처럼 서비스의 proxy URL을 검색하는데 `kubectl cluster-info` 커맨드를 사용할 수 있다. 서비스 endpoint, 접미사, 매개변수를 포함하는 proxy URL을 생성하려면 단순하게 해당 서비스에 +위에서 언급한 것처럼 서비스의 proxy URL을 검색하는 데 `kubectl cluster-info` 커맨드를 사용할 수 있다. 서비스 endpoint, 접미사, 매개변수를 포함하는 proxy URL을 생성하려면 해당 서비스에 `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`service_name[:port_name]`*`/proxy` 형식의 proxy URL을 덧붙인다. -당신이 port에 이름을 지정하지 않았다면 URL에 *port_name* 을 지정할 필요는 없다. +당신이 포트에 이름을 지정하지 않았다면 URL에 *port_name* 을 지정할 필요는 없다. 이름이 있는 포트와 이름이 없는 포트 모두에 대하여, *port_name* 이 들어갈 자리에 포트 번호를 기재할 수도 있다. -기본적으로 API server는 http를 사용하여 서비스를 proxy한다. https를 사용하려면 다음과 같이 서비스 네임의 접두사에 `https:`를 붙인다. +기본적으로 API server는 http를 사용하여 서비스를 프록시한다. https를 사용하려면 다음과 같이 서비스 네임의 접두사에 `https:`를 붙인다. `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`https:service_name:[port_name]`*`/proxy` URL의 네임 부분에 지원되는 양식은 다음과 같다. -* `` - http를 사용하여 기본값 또는 이름이 없는 port로 proxy한다 -* `:` - http를 사용하여 지정된 port로 proxy한다 -* `https::` - https를 사용하여 기본값 또는 이름이 없는 port로 proxy한다(마지막 콜론:에 주의) -* `https::` - https를 사용하여 지정된 port로 proxy한다 +* `` - http를 사용하여 기본값 또는 이름이 없는 포트로 프록시한다. +* `:` - http를 사용하여 지정된 포트 이름 또는 포트 번호로 프록시한다. +* `https::` - https를 사용하여 기본값 또는 이름이 없는 포트로 프록시한다. (마지막 콜론:에 주의) +* `https::` - https를 사용하여 지정된 포트 이름 또는 포트 번호로 프록시한다. ##### 예제들 @@ -326,38 +326,38 @@ URL의 네임 부분에 지원되는 양식은 다음과 같다. ## 요청 redirect -redirect 기능은 deprecated되고 제거 되었다. 대신 (아래의) proxy를 사용하기를 바란다. +redirect 기능은 deprecated되고 제거 되었다. 대신 (아래의) 프록시를 사용하기를 바란다. -## 다양한 Proxy들 +## 다양한 프록시들 -쿠버네티스를 사용하면서 당신이 접할 수 있는 몇 가지 다른 proxy들이 존재한다. +쿠버네티스를 사용하면서 당신이 접할 수 있는 몇 가지 다른 프록시들이 존재한다. 1. [kubectl proxy](#rest-api에-직접-접근): - 사용자의 데스크탑이나 파드 내에서 실행한다 - - localhost 주소에서 쿠버네티스 apiserver로 proxy한다 - - proxy하는 클라이언트는 HTTP를 사용한다 - - apiserver의 proxy는 HTTPS를 사용한다 + - localhost 주소에서 쿠버네티스 apiserver로 프록시한다 + - 프록시하는 클라이언트는 HTTP를 사용한다 + - apiserver의 프록시는 HTTPS를 사용한다 - apiserver를 위치지정한다 - 인증 header들을 추가한다 -1. [apiserver proxy](#빌트인-서비스들의-발견): +1. [apiserver proxy](#빌트인-서비스-검색): - apiserver 내의 빌트인 bastion이다 - - 다른 방식으로는 연결할 수 없는 클러스터 외부의 사용자를 클러스터 IP들로 연결한다 + - 다른 방식으로는 연결할 수 없는 클러스터 외부의 사용자를 클러스터 IP로 연결한다 - apiserver process들 내에서 실행된다 - - proxy하는 클라이언트는 HTTPS를 사용한다(또는 apiserver가 http로 구성되었다면 http) - - 타겟으로의 proxy는 가용정보를 사용하는 proxy에 의해서 HTTP 또는 HTTPS를 사용할 수도 있다 + - 프록시하는 클라이언트는 HTTPS를 사용한다(또는 apiserver가 http로 구성되었다면 http) + - 타겟으로의 프록시는 가용정보를 사용하는 프록시에 의해서 HTTP 또는 HTTPS를 사용할 수도 있다 - 노드, 파드, 서비스에 접근하는 데 사용될 수 있다 - 서비스에 접근하는 데 사용되면 load balacing한다 1. [kube proxy](/ko/docs/concepts/services-networking/service/#ips-and-vips): - 각 노드 상에서 실행된다 - - UDP와 TCP를 proxy한다 + - UDP와 TCP를 프록시한다 - HTTP를 인지하지 않는다 - load balancing을 제공한다 - - 서비스에 접근하는 데만 사용된다 + - 서비스에 접근하는 데에만 사용된다 1. apiserver(s) 전면의 Proxy/Load-balancer: diff --git a/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md new file mode 100644 index 0000000000000..77f5f5d6359c6 --- /dev/null +++ b/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -0,0 +1,109 @@ +--- +title: 클러스터 내 모든 컨테이너 이미지 목록 보기 +content_type: task +weight: 100 +--- + + + +이 문서는 kubectl을 이용하여 클러스터 내 모든 컨테이너 이미지 목록을 +조회하는 방법에 관해 설명한다. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + +이 작업에서는 kubectl을 사용하여 클러스터 내 모든 파드의 정보를 +조회하고, 결과값의 서식을 변경하여 각 파드에 대한 컨테이너 이미지 목록으로 +재구성할 것이다. + +## 모든 네임스페이스의 모든 컨테이너 이미지 가져오기 + +- `kubectl get pods --all-namespaces` 를 사용하여 모든 네임스페이스의 모든 파드 정보를 가져온다. +- 컨테이너 이미지 이름만 출력하기 위해 `-o jsonpath={..image}` 를 사용한다. + 이 명령어는 결과값으로 받은 json을 반복적으로 파싱하여, + `image` 필드만을 출력한다. + - jsonpath를 사용하는 방법에 대해 더 많은 정보를 얻고 싶다면 + [Jsonpath 지원](/ko/docs/reference/kubectl/jsonpath/)을 확인한다. +- 다음의 표준 툴을 이용해서 결과값을 처리한다. `tr`, `sort`, `uniq` + - `tr` 을 사용하여 공백을 줄 바꾸기로 대체한다. + - `sort` 를 사용하여 결과값을 정렬한다. + - `uniq` 를 사용하여 이미지 개수를 합산한다. + +```shell +kubectl get pods --all-namespaces -o jsonpath="{..image}" |\ +tr -s '[[:space:]]' '\n' |\ +sort |\ +uniq -c +``` + +이 커맨드는 결과값으로 나온 모든 아이템 중에 `image` 라고 명명된 필드를 +모두 출력한다. + +이와 다른 방법으로 파드 이미지 필드 값의 절대 경로를 사용할 수 있다. +이것은 필드명이 반복될 때에도 +정확한 값을 출력하도록 보장한다. +예) 결과값 중에 많은 필드들이 `name`으로 명명되었을 경우, + +```shell +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" +``` + +이 jsonpath는 다음과 같이 해석할 수 있다. + +- `.items[*]`: 각 결과값에 대하여 +- `.spec`: spec 값을 가져온다. +- `.containers[*]`: 각 컨테이너에 대하여 +- `.image`: image 값을 가져온다. + +{{< note >}} +명령어로 하나의 파드를 가져올 때, 예를 들어 `kubectl get pod nginx` 라면, +jsonpath에서 `.items[*]` 부분은 생략해야 하는데, 이는 명령어가 아이템 목록이 아닌 +단 한 개의 아이템(여기선 파드)으로 결과값을 주기 때문이다. +{{< /note >}} + +## 각 파드의 컨테이너 이미지 보기 + +`range` 연산을 사용하여 명령어의 결과값에서 각각의 요소들을 +반복하여 출력할 수 있다. + +```shell +kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |\ +sort +``` + +## 파드 레이블로 필터링된 컨테이너 이미지 목록 보기 + +특정 레이블에 맞는 파드를 지정하기 위해서 -l 플래그를 사용한다. 아래의 +명령어 결과값은 `app=nginx` 레이블에 일치하는 파드만 출력한다. + +```shell +kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx +``` + +## 파드 네임스페이스로 필터링된 컨테이너 이미지 목록 보기 + +특정 네임스페이스의 파드를 지정하려면, 네임스페이스 플래그를 사용한다. +아래의 명령어 결과값은 `kube-system` 네임스페이스에 있는 파드만 출력한다. + +```shell +kubectl get pods --namespace kube-system -o jsonpath="{..image}" +``` + +## jsonpath 대신 Go 템플릿을 사용하여 컨테이너 이미지 목록 보기 + +jsonpath의 대안으로 Kubectl은 [Go 템플릿](https://golang.org/pkg/text/template/)을 지원한다. +다음과 같이 결과값의 서식을 지정할 수 있다. + +```shell +kubectl get pods --all-namespaces -o go-template --template="{{range .items}}{{range .spec.containers}}{{.image}} {{end}}{{end}}" +``` + +## {{% heading "whatsnext" %}} + +### 참조 + +* [Jsonpath](/ko/docs/reference/kubectl/jsonpath/) 참조 +* [Go 템플릿](https://golang.org/pkg/text/template/) 참조 diff --git a/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index a6739cc2ea313..cc5f872cc582d 100644 --- a/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -8,7 +8,7 @@ min-kubernetes-server-version: v1.10 이 페이지는 `kubectl port-forward` 를 사용해서 쿠버네티스 클러스터 내에서 -실행중인 Redis 서버에 연결하는 방법을 보여준다. 이 유형의 연결은 데이터베이스 +실행중인 MongoDB 서버에 연결하는 방법을 보여준다. 이 유형의 연결은 데이터베이스 디버깅에 유용할 수 있다. @@ -19,25 +19,25 @@ min-kubernetes-server-version: v1.10 * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -* [redis-cli](http://redis.io/topics/rediscli)를 설치한다. +* [MongoDB Shell](https://www.mongodb.com/try/download/shell)을 설치한다. -## Redis 디플로이먼트와 서비스 생성하기 +## MongoDB 디플로이먼트와 서비스 생성하기 -1. Redis를 실행하기 위해 디플로이먼트를 생성한다. +1. MongoDB를 실행하기 위해 디플로이먼트를 생성한다. ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml ``` 성공적인 명령어의 출력은 디플로이먼트가 생성됐다는 것을 확인해준다. ``` - deployment.apps/redis-master created + deployment.apps/mongo created ``` 파드 상태를 조회하여 파드가 준비되었는지 확인한다. @@ -49,8 +49,8 @@ min-kubernetes-server-version: v1.10 출력은 파드가 생성되었다는 것을 보여준다. ``` - NAME READY STATUS RESTARTS AGE - redis-master-765d459796-258hz 1/1 Running 0 50s + NAME READY STATUS RESTARTS AGE + mongo-75f59d57f4-4nd6q 1/1 Running 0 2m4s ``` 디플로이먼트 상태를 조회한다. @@ -62,64 +62,65 @@ min-kubernetes-server-version: v1.10 출력은 디플로이먼트가 생성되었다는 것을 보여준다. ``` - NAME READY UP-TO-DATE AVAILABLE AGE - redis-master 1/1 1 1 55s + NAME READY UP-TO-DATE AVAILABLE AGE + mongo 1/1 1 1 2m21s ``` + 디플로이먼트는 자동으로 레플리카셋을 관리한다. 아래의 명령어를 사용하여 레플리카셋 상태를 조회한다. ```shell - kubectl get rs + kubectl get replicaset ``` 출력은 레플리카셋이 생성되었다는 것을 보여준다. ``` - NAME DESIRED CURRENT READY AGE - redis-master-765d459796 1 1 1 1m + NAME DESIRED CURRENT READY AGE + mongo-75f59d57f4 1 1 1 3m12s ``` -2. Redis를 네트워크에 노출시키기 위해 서비스를 생성한다. +2. MongoDB를 네트워크에 노출시키기 위해 서비스를 생성한다. ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml ``` 성공적인 커맨드의 출력은 서비스가 생성되었다는 것을 확인해준다. ``` - service/redis-master created + service/mongo created ``` 서비스가 생성되었는지 확인한다. ```shell - kubectl get svc | grep redis + kubectl get service mongo ``` 출력은 서비스가 생성되었다는 것을 보여준다. ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - redis-master ClusterIP 10.0.0.213 6379/TCP 27s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + mongo ClusterIP 10.96.41.183 27017/TCP 11s ``` -3. Redis 서버가 파드 안에서 실행되고 있고, 6379번 포트에서 수신하고 있는지 확인한다. +3. MongoDB 서버가 파드 안에서 실행되고 있고, 27017번 포트에서 수신하고 있는지 확인한다. ```shell - # redis-master-765d459796-258hz 를 파드 이름으로 변경한다. - kubectl get pod redis-master-765d459796-258hz --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' + # mongo-75f59d57f4-4nd6q 를 당신의 파드 이름으로 대체한다. + kubectl get pod mongo-75f59d57f4-4nd6q --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' ``` - 출력은 파드 내 Redis 포트 번호를 보여준다. + 출력은 파드 내 MongoDB 포트 번호를 보여준다. ``` - 6379 + 27017 ``` - (이 TCP 포트는 Redis가 인터넷에 할당된 것이다). + (이는 인터넷 상의 MongoDB에 할당된 TCP 포트이다.) ## 파드의 포트를 로컬 포트로 포워딩하기 @@ -127,39 +128,39 @@ min-kubernetes-server-version: v1.10 ```shell - # redis-master-765d459796-258hz 를 파드 이름으로 변경한다. - kubectl port-forward redis-master-765d459796-258hz 7000:6379 + # mongo-75f59d57f4-4nd6q 를 당신의 파드 이름으로 대체한다. + kubectl port-forward mongo-75f59d57f4-4nd6q 28015:27017 ``` 이것은 ```shell - kubectl port-forward pods/redis-master-765d459796-258hz 7000:6379 + kubectl port-forward pods/mongo-75f59d57f4-4nd6q 28015:27017 ``` 또는 ```shell - kubectl port-forward deployment/redis-master 7000:6379 + kubectl port-forward deployment/mongo 28015:27017 ``` 또는 ```shell - kubectl port-forward rs/redis-master 7000:6379 + kubectl port-forward replicaset/mongo-75f59d57f4 28015:27017 ``` 또는 다음과 같다. ```shell - kubectl port-forward service/redis-master 7000:redis + kubectl port-forward service/mongo 28015:27017 ``` 위의 명령어들은 모두 동일하게 동작한다. 이와 유사하게 출력된다. ``` - Forwarding from 127.0.0.1:7000 -> 6379 - Forwarding from [::1]:7000 -> 6379 + Forwarding from 127.0.0.1:28015 -> 27017 + Forwarding from [::1]:28015 -> 27017 ``` {{< note >}} @@ -168,22 +169,22 @@ min-kubernetes-server-version: v1.10 {{< /note >}} -2. Redis 커맨드라인 인터페이스를 실행한다. +2. MongoDB 커맨드라인 인터페이스를 실행한다. ```shell - redis-cli -p 7000 + mongosh --port 28015 ``` -3. Redis 커맨드라인 프롬프트에 `ping` 명령을 입력한다. +3. MongoDB 커맨드라인 프롬프트에 `ping` 명령을 입력한다. ```shell - ping + db.runCommand( { ping: 1 } ) ``` 성공적인 핑 요청을 반환한다. ``` - PONG + { ok: 1 } ``` ### 선택적으로 _kubectl_ 이 로컬 포트를 선택하게 하기 {#let-kubectl-choose-local-port} @@ -193,15 +194,15 @@ min-kubernetes-server-version: v1.10 부담을 줄일 수 있다. ```shell -kubectl port-forward deployment/redis-master :6379 +kubectl port-forward deployment/mongo :27017 ``` -`kubectl` 도구는 사용 중이 아닌 로컬 포트 번호를 찾는다. (낮은 포트 번호는 -다른 애플리케이션에서 사용될 것이므로, 낮은 포트 번호를 피해서) 출력은 다음과 같을 것이다. +`kubectl` 도구는 사용 중이 아닌 로컬 포트 번호를 찾는다 (낮은 포트 번호는 +다른 애플리케이션에서 사용될 것이므로, 낮은 포트 번호를 피해서). 출력은 다음과 같을 것이다. ``` -Forwarding from 127.0.0.1:62162 -> 6379 -Forwarding from [::1]:62162 -> 6379 +Forwarding from 127.0.0.1:63753 -> 27017 +Forwarding from [::1]:63753 -> 27017 ``` @@ -209,7 +210,7 @@ Forwarding from [::1]:62162 -> 6379 ## 토의 -로컬 7000 포트에 대한 연결은 Redis 서버가 실행중인 파드의 6379 포트로 포워딩된다. +로컬 28015 포트에 대한 연결은 MongoDB 서버가 실행중인 파드의 27017 포트로 포워딩된다. 이 연결로 로컬 워크스테이션에서 파드 안에서 실행 중인 데이터베이스를 디버깅하는데 사용할 수 있다. diff --git a/content/ko/docs/tasks/administer-cluster/access-cluster-api.md b/content/ko/docs/tasks/administer-cluster/access-cluster-api.md index 123c09d2d251f..2d44a51b0e24c 100644 --- a/content/ko/docs/tasks/administer-cluster/access-cluster-api.md +++ b/content/ko/docs/tasks/administer-cluster/access-cluster-api.md @@ -193,7 +193,7 @@ func main() { } ``` -애플리케이션이 클러스터에서 파드로 배치된 경우, [파드 내에서 API 접근](#accessing-the-api-from-within-a-pod)을 참고한다. +애플리케이션이 클러스터 내의 파드로 배치된 경우, [파드 내에서 API 접근](/ko/docs/tasks/access-application-cluster/access-cluster/#파드에서-api-접근)을 참고한다. #### Python 클라이언트 {#python-client} @@ -216,7 +216,7 @@ for i in ret.items: #### Java 클라이언트 {#java-client} -* [Java 클라이언트](https://github.com/kubernetes-client/java)를 설치하려면, 다음을 실행한다. +[Java 클라이언트](https://github.com/kubernetes-client/java)를 설치하려면, 다음을 실행한다. ```shell # java 라이브러리를 클론한다 @@ -353,99 +353,6 @@ exampleWithKubeConfig = do >>= print ``` +## {{% heading "whatsnext" %}} -### 파드 내에서 API에 접근 {#accessing-the-api-from-within-a-pod} - -파드 내에서 API에 접근할 때, API 서버를 찾아 인증하는 것은 -위에서 설명한 외부 클라이언트 사례와 약간 다르다. - -파드에서 쿠버네티스 API를 사용하는 가장 쉬운 방법은 -공식 [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/) 중 하나를 사용하는 것이다. 이러한 -라이브러리는 API 서버를 자동으로 감지하고 인증할 수 있다. - -#### 공식 클라이언트 라이브러리 사용 - -파드 내에서, 쿠버네티스 API에 연결하는 권장 방법은 다음과 같다. - - - Go 클라이언트의 경우, 공식 [Go 클라이언트 라이브러리](https://github.com/kubernetes/client-go/)를 사용한다. - `rest.InClusterConfig()` 기능은 API 호스트 검색과 인증을 자동으로 처리한다. - [여기 예제](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go)를 참고한다. - - - Python 클라이언트의 경우, 공식 [Python 클라이언트 라이브러리](https://github.com/kubernetes-client/python/)를 사용한다. - `config.load_incluster_config()` 기능은 API 호스트 검색과 인증을 자동으로 처리한다. - [여기 예제](https://github.com/kubernetes-client/python/blob/master/examples/in_cluster_config.py)를 참고한다. - - - 사용할 수 있는 다른 라이브러리가 많이 있다. [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/) 페이지를 참고한다. - -각각의 경우, 파드의 서비스 어카운트 자격 증명은 API 서버와 -안전하게 통신하는 데 사용된다. - -#### REST API에 직접 접근 - -파드에서 실행되는 동안, 쿠버네티스 apiserver는 `default` 네임스페이스에서 `kubernetes`라는 -서비스를 통해 접근할 수 있다. 따라서, 파드는 `kubernetes.default.svc` -호스트 이름을 사용하여 API 서버를 쿼리할 수 있다. 공식 클라이언트 라이브러리는 -이를 자동으로 수행한다. - -API 서버를 인증하는 권장 방법은 [서비스 어카운트](/docs/tasks/configure-pod-container/configure-service-account/) -자격 증명을 사용하는 것이다. 기본적으로, 파드는 -서비스 어카운트와 연결되어 있으며, 해당 서비스 어카운트에 대한 자격 증명(토큰)은 -해당 파드에 있는 각 컨테이너의 파일시스템 트리의 -`/var/run/secrets/kubernetes.io/serviceaccount/token` 에 있다. - -사용 가능한 경우, 인증서 번들은 각 컨테이너의 -파일시스템 트리의 `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` 에 배치되며, -API 서버의 제공 인증서를 확인하는 데 사용해야 한다. - -마지막으로, 네임스페이스가 지정된 API 작업에 사용되는 기본 네임스페이스는 각 컨테이너의 -`/var/run/secrets/kubernetes.io/serviceaccount/namespace` 에 있는 파일에 배치된다. - -#### kubectl 프록시 사용 - -공식 클라이언트 라이브러리 없이 API를 쿼리하려면, 파드에서 -새 사이드카 컨테이너의 [명령](/ko/docs/tasks/inject-data-application/define-command-argument-container/)으로 -`kubectl proxy` 를 실행할 수 있다. 이런 식으로, `kubectl proxy` 는 -API를 인증하고 이를 파드의 `localhost` 인터페이스에 노출시켜서, 파드의 -다른 컨테이너가 직접 사용할 수 있도록 한다. - -#### 프록시를 사용하지 않고 접근 - -인증 토큰을 API 서버에 직접 전달하여 kubectl 프록시 사용을 -피할 수 있다. 내부 인증서는 연결을 보호한다. - -```shell -# 내부 API 서버 호스트 이름을 가리킨다 -APISERVER=https://kubernetes.default.svc - -# ServiceAccount 토큰 경로 -SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount - -# 이 파드의 네임스페이스를 읽는다 -NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) - -# ServiceAccount 베어러 토큰을 읽는다 -TOKEN=$(cat ${SERVICEACCOUNT}/token) - -# 내부 인증 기관(CA)을 참조한다 -CACERT=${SERVICEACCOUNT}/ca.crt - -# TOKEN으로 API를 탐색한다 -curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X GET ${APISERVER}/api -``` - -출력은 다음과 비슷하다. - -```json -{ - "kind": "APIVersions", - "versions": [ - "v1" - ], - "serverAddressByClientCIDRs": [ - { - "clientCIDR": "0.0.0.0/0", - "serverAddress": "10.0.1.149:443" - } - ] -} -``` +* [파드 내에서 쿠버네티스 API에 접근](/ko/docs/tasks/run-application/access-api-from-pod/) diff --git a/content/ko/docs/tasks/administer-cluster/access-cluster-services.md b/content/ko/docs/tasks/administer-cluster/access-cluster-services.md index f8f707eb33d08..8019e46c255ee 100644 --- a/content/ko/docs/tasks/administer-cluster/access-cluster-services.md +++ b/content/ko/docs/tasks/administer-cluster/access-cluster-services.md @@ -19,22 +19,22 @@ content_type: task 쿠버네티스에서, [노드](/ko/docs/concepts/architecture/nodes/), [파드](/ko/docs/concepts/workloads/pods/) 및 [서비스](/ko/docs/concepts/services-networking/service/)는 모두 -고유한 IP를 가진다. 대부분의 경우, 클러스터의 노드 IP, 파드 IP 및 일부 서비스 IP는 라우팅할 수 -없으므로, 데스크톱 시스템과 같은 클러스터 외부 시스템에서 -도달할 수 없다. +고유한 IP를 가진다. 당신의 데스크탑 PC와 같은 클러스터 외부 장비에서는 +클러스터 상의 노드 IP, 파드 IP, 서비스 IP로 라우팅되지 않아서 +접근할 수 없을 것이다. ### 연결하는 방법 -클러스터 외부에서 노드, 파드 및 서비스에 연결하기 위한 몇 가지 옵션이 있다. +클러스터 외부에서 노드, 파드 및 서비스에 접속하기 위한 몇 가지 옵션이 있다. - 퍼블릭 IP를 통해 서비스에 접근한다. - - `NodePort` 또는 `LoadBalancer` 타입의 서비스를 사용하여 해당 서비스를 클러스터 외부에서 - 접근할 수 있게 한다. [서비스](/ko/docs/concepts/services-networking/service/)와 + - 클러스터 외부에서 접근할 수 있도록 `NodePort` 또는 `LoadBalancer` 타입의 + 서비스를 사용한다. [서비스](/ko/docs/concepts/services-networking/service/)와 [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) 문서를 참고한다. - - 클러스터 환경에 따라, 서비스는 단지 회사 네트워크에 노출되기도 하며, - 인터넷에 노출되는 경우도 있다. 노출되는 서비스가 안전한지 생각한다. - 자체 인증을 수행하는가? - - 서비스 뒤에 파드를 배치한다. 디버깅과 같은 목적으로 레플리카 집합에서 특정 파드에 접근하려면, + - 클러스터 환경에 따라, 서비스는 회사 네트워크에만 노출되기도 하며, + 인터넷에 노출되는 경우도 있다. 이 경우 노출되는 서비스의 보안 여부를 고려해야 한다. + 해당 서비스는 자체적으로 인증을 수행하는가? + - 파드는 서비스 뒤에 위치시킨다. 디버깅과 같은 목적으로 레플리카 집합에서 특정 파드에 접근하려면, 파드에 고유한 레이블을 배치하고 이 레이블을 선택하는 새 서비스를 생성한다. - 대부분의 경우, 애플리케이션 개발자가 nodeIP를 통해 노드에 직접 접근할 필요는 없다. @@ -54,8 +54,8 @@ content_type: task ### 빌트인 서비스 검색 -일반적으로, kube-system에 의해 클러스터에서 시작되는 몇 가지 서비스가 있다. `kubectl cluster-info` 명령을 -사용하여 이들의 목록을 얻는다. +일반적으로 kube-system에 의해 클러스터에 실행되는 몇 가지 서비스가 있다. +`kubectl cluster-info` 커맨드로 이 서비스의 리스트를 볼 수 있다. ```shell kubectl cluster-info @@ -83,7 +83,7 @@ heapster is running at https://104.197.5.247/api/v1/namespaces/kube-system/servi #### apiserver 프록시 URL 수동 구성 -위에서 언급한 것처럼, `kubectl cluster-info` 명령을 사용하여 서비스의 프록시 URL을 검색한다. 서비스 엔드포인트, 접미사 및 매개 변수를 포함하는 프록시 URL을 작성하려면, 단순히 서비스의 프록시 URL에 추가하면 된다. +위에서 언급한 것처럼, `kubectl cluster-info` 명령을 사용하여 서비스의 프록시 URL을 검색한다. 서비스 엔드포인트, 접미사 및 매개 변수를 포함하는 프록시 URL을 작성하려면, 서비스의 프록시 URL에 추가하면 된다. `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`[https:]service_name[:port_name]`*`/proxy` 포트에 대한 이름을 지정하지 않은 경우, URL에 *port_name* 을 지정할 필요가 없다. diff --git a/content/ko/docs/tasks/administer-cluster/certificates.md b/content/ko/docs/tasks/administer-cluster/certificates.md new file mode 100644 index 0000000000000..8c8f6a148b01e --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/certificates.md @@ -0,0 +1,250 @@ +--- +title: 인증서 +content_type: task +weight: 20 +--- + + + + +클라이언트 인증서로 인증을 사용하는 경우 `easyrsa`, `openssl` 또는 `cfssl` +을 통해 인증서를 수동으로 생성할 수 있다. + + + + + + +### easyrsa + +**easyrsa** 는 클러스터 인증서를 수동으로 생성할 수 있다. + +1. easyrsa3의 패치 버전을 다운로드하여 압축을 풀고, 초기화한다. + + curl -LO https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz + tar xzf easy-rsa.tar.gz + cd easy-rsa-master/easyrsa3 + ./easyrsa init-pki +1. 새로운 인증 기관(CA)을 생성한다. `--batch` 는 자동 모드를 설정한다. + `--req-cn` 는 CA의 새 루트 인증서에 대한 일반 이름(Common Name (CN))을 지정한다. + + ./easyrsa --batch "--req-cn=${MASTER_IP}@`date +%s`" build-ca nopass +1. 서버 인증서와 키를 생성한다. + `--subject-alt-name` 인수는 API 서버에 접근이 가능한 IP와 DNS + 이름을 설정한다. `MASTER_CLUSTER_IP` 는 일반적으로 API 서버와 + 컨트롤러 관리자 컴포넌트에 대해 `--service-cluster-ip-range` 인수로 + 지정된 서비스 CIDR의 첫 번째 IP이다. `--days` 인수는 인증서가 만료되는 + 일 수를 설정하는데 사용된다. + 또한, 아래 샘플은 기본 DNS 이름으로 `cluster.local` 을 + 사용한다고 가정한다. + + ./easyrsa --subject-alt-name="IP:${MASTER_IP},"\ + "IP:${MASTER_CLUSTER_IP},"\ + "DNS:kubernetes,"\ + "DNS:kubernetes.default,"\ + "DNS:kubernetes.default.svc,"\ + "DNS:kubernetes.default.svc.cluster,"\ + "DNS:kubernetes.default.svc.cluster.local" \ + --days=10000 \ + build-server-full server nopass +1. `pki/ca.crt`, `pki/issued/server.crt` 그리고 `pki/private/server.key` 를 디렉터리에 복사한다. +1. API 서버 시작 파라미터에 다음 파라미터를 채우고 추가한다. + + --client-ca-file=/yourdirectory/ca.crt + --tls-cert-file=/yourdirectory/server.crt + --tls-private-key-file=/yourdirectory/server.key + +### openssl + +**openssl** 은 클러스터 인증서를 수동으로 생성할 수 있다. + +1. ca.key를 2048bit로 생성한다. + + openssl genrsa -out ca.key 2048 +1. ca.key에 따라 ca.crt를 생성한다(인증서 유효 기간을 사용하려면 -days를 사용한다). + + openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt +1. server.key를 2048bit로 생성한다. + + openssl genrsa -out server.key 2048 +1. 인증서 서명 요청(Certificate Signing Request (CSR))을 생성하기 위한 설정 파일을 생성한다. + 파일에 저장하기 전에 꺾쇠 괄호(예: ``)로 + 표시된 값을 실제 값으로 대체한다(예: `csr.conf`). + `MASTER_CLUSTER_IP` 의 값은 이전 하위 섹션에서 + 설명한 대로 API 서버의 서비스 클러스터 IP이다. + 또한, 아래 샘플에서는 `cluster.local` 을 기본 DNS 도메인 + 이름으로 사용하고 있다고 가정한다. + + [ req ] + default_bits = 2048 + prompt = no + default_md = sha256 + req_extensions = req_ext + distinguished_name = dn + + [ dn ] + C = <국가(country)> + ST = <도(state)> + L = <시(city)> + O = <조직(organization)> + OU = <조직 단위(organization unit)> + CN = + + [ req_ext ] + subjectAltName = @alt_names + + [ alt_names ] + DNS.1 = kubernetes + DNS.2 = kubernetes.default + DNS.3 = kubernetes.default.svc + DNS.4 = kubernetes.default.svc.cluster + DNS.5 = kubernetes.default.svc.cluster.local + IP.1 = + IP.2 = + + [ v3_ext ] + authorityKeyIdentifier=keyid,issuer:always + basicConstraints=CA:FALSE + keyUsage=keyEncipherment,dataEncipherment + extendedKeyUsage=serverAuth,clientAuth + subjectAltName=@alt_names +1. 설정 파일을 기반으로 인증서 서명 요청을 생성한다. + + openssl req -new -key server.key -out server.csr -config csr.conf +1. ca.key, ca.crt 그리고 server.csr을 사용해서 서버 인증서를 생성한다. + + openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \ + -CAcreateserial -out server.crt -days 10000 \ + -extensions v3_ext -extfile csr.conf +1. 인증서를 본다. + + openssl x509 -noout -text -in ./server.crt + +마지막으로, API 서버 시작 파라미터에 동일한 파라미터를 추가한다. + +### cfssl + +**cfssl** 은 인증서 생성을 위한 또 다른 도구이다. + +1. 아래에 표시된 대로 커맨드 라인 도구를 다운로드하여 압축을 풀고 준비한다. + 사용 중인 하드웨어 아키텍처 및 cfssl 버전에 따라 샘플 + 명령을 조정해야 할 수도 있다. + + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64 -o cfssl + chmod +x cfssl + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64 -o cfssljson + chmod +x cfssljson + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl-certinfo_1.5.0_linux_amd64 -o cfssl-certinfo + chmod +x cfssl-certinfo +1. 아티팩트(artifact)를 보유할 디렉터리를 생성하고 cfssl을 초기화한다. + + mkdir cert + cd cert + ../cfssl print-defaults config > config.json + ../cfssl print-defaults csr > csr.json +1. CA 파일을 생성하기 위한 JSON 설정 파일을 `ca-config.json` 예시와 같이 생성한다. + + { + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "kubernetes": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "8760h" + } + } + } + } +1. CA 인증서 서명 요청(CSR)을 위한 JSON 설정 파일을 + `ca-csr.json` 예시와 같이 생성한다. 꺾쇠 괄호로 표시된 + 값을 사용하려는 실제 값으로 변경한다. + + { + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names":[{ + "C": "<국가(country)>", + "ST": "<도(state)>", + "L": "<시(city)>", + "O": "<조직(organization)>", + "OU": "<조직 단위(organization unit)>" + }] + } +1. CA 키(`ca-key.pem`)와 인증서(`ca.pem`)을 생성한다. + + ../cfssl gencert -initca ca-csr.json | ../cfssljson -bare ca +1. API 서버의 키와 인증서를 생성하기 위한 JSON 구성파일을 + `server-csr.json` 예시와 같이 생성한다. 꺾쇠 괄호 안의 값을 + 사용하려는 실제 값으로 변경한다. `MASTER_CLUSTER_IP` 는 + 이전 하위 섹션에서 설명한 API 서버의 클러스터 IP이다. + 아래 샘플은 기본 DNS 도메인 이름으로 `cluster.local` 을 + 사용한다고 가정한다. + + { + "CN": "kubernetes", + "hosts": [ + "127.0.0.1", + "", + "", + "kubernetes", + "kubernetes.default", + "kubernetes.default.svc", + "kubernetes.default.svc.cluster", + "kubernetes.default.svc.cluster.local" + ], + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [{ + "C": "<국가(country)>", + "ST": "<도(state)>", + "L": "<시(city)>", + "O": "<조직(organization)>", + "OU": "<조직 단위(organization unit)>" + }] + } +1. API 서버 키와 인증서를 생성하면, 기본적으로 + `server-key.pem` 과 `server.pem` 파일에 각각 저장된다. + + ../cfssl gencert -ca=ca.pem -ca-key=ca-key.pem \ + --config=ca-config.json -profile=kubernetes \ + server-csr.json | ../cfssljson -bare server + + +## 자체 서명된 CA 인증서의 배포 + +클라이언트 노드는 자체 서명된 CA 인증서를 유효한 것으로 인식하지 않을 수 있다. +비-프로덕션 디플로이먼트 또는 회사 방화벽 뒤에서 실행되는 +디플로이먼트의 경우, 자체 서명된 CA 인증서를 모든 클라이언트에 +배포하고 유효한 인증서의 로컬 목록을 새로 고칠 수 있다. + +각 클라이언트에서, 다음 작업을 수행한다. + +```bash +sudo cp ca.crt /usr/local/share/ca-certificates/kubernetes.crt +sudo update-ca-certificates +``` + +``` +Updating certificates in /etc/ssl/certs... +1 added, 0 removed; done. +Running hooks in /etc/ca-certificates/update.d.... +done. +``` + +## 인증서 API + +`certificates.k8s.io` API를 사용해서 +[여기](/docs/tasks/tls/managing-tls-in-a-cluster)에 +설명된 대로 인증에 사용할 x509 인증서를 프로비전 할 수 있다. diff --git a/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md b/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md index 8fd7445fb7dc5..ff6379ee1f75d 100644 --- a/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md +++ b/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md @@ -32,7 +32,7 @@ content_type: task 수도 있다. 이런 경우에, 기본 스토리지 클래스를 변경하거나 완전히 비활성화 하여 스토리지의 동적 프로비저닝을 방지할 수 있다. -단순하게 기본 스토리지클래스를 삭제하는 경우, 사용자의 클러스터에서 구동중인 +기본 스토리지클래스를 삭제하는 경우, 사용자의 클러스터에서 구동 중인 애드온 매니저에 의해 자동으로 다시 생성될 수 있으므로 정상적으로 삭제가 되지 않을 수도 있다. 애드온 관리자 및 개별 애드온을 비활성화 하는 방법에 대한 자세한 내용은 설치 문서를 참조하자. @@ -70,7 +70,7 @@ content_type: task 1. 스토리지클래스를 기본값으로 표시한다. - 이전 과정과 유사하게, 어노테이션을 추가/설정 해야 한다. + 이전 과정과 유사하게, 어노테이션을 추가/설정해야 한다. `storageclass.kubernetes.io/is-default-class=true`. ```bash diff --git a/content/ko/docs/tasks/administer-cluster/coredns.md b/content/ko/docs/tasks/administer-cluster/coredns.md index 6f0caad9e40bb..414f681901fb3 100644 --- a/content/ko/docs/tasks/administer-cluster/coredns.md +++ b/content/ko/docs/tasks/administer-cluster/coredns.md @@ -33,8 +33,8 @@ Kube-dns의 배포나 교체에 관한 매뉴얼은 [CoreDNS GitHub 프로젝트 ### Kubeadm을 사용해 기존 클러스터 업그레이드하기 쿠버네티스 버전 1.10 이상에서, `kube-dns` 를 사용하는 클러스터를 업그레이드하기 위하여 -`kubeadm` 을 사용할 때 CoreDNS로 이동할 수도 있다. 이 경우, `kubeadm` 은 -`kube-dns` 컨피그맵(ConfigMap)을 기반으로 패더레이션, 스텁 도메인(stub domain), 업스트림 네임 서버의 +`kubeadm` 을 사용할 때 CoreDNS로 전환할 수도 있다. 이 경우, `kubeadm` 은 +`kube-dns` 컨피그맵(ConfigMap)을 기반으로 스텁 도메인(stub domain), 업스트림 네임 서버의 설정을 유지하며 CoreDNS 설정("Corefile")을 생성한다. 만약 kube-dns에서 CoreDNS로 이동하는 경우, 업그레이드 과정에서 기능 게이트의 `CoreDNS` 값을 `true` 로 설정해야 한다. @@ -44,8 +44,6 @@ kubeadm upgrade apply v1.11.0 --feature-gates=CoreDNS=true ``` 쿠버네티스 1.13 이상에서 기능 게이트의 `CoreDNS` 항목은 제거되었으며, CoreDNS가 기본적으로 사용된다. -업그레이드된 클러스터에서 kube-dns를 사용하려는 경우, [여기](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon)에 -설명된 지침 가이드를 참고하자. 1.11 미만 버전일 경우 업그레이드 과정에서 만들어진 파일이 Corefile을 **덮어쓴다**. **만약 컨피그맵을 사용자 정의한 경우, 기존의 컨피그맵을 저장해야 한다.** 새 컨피그맵이 @@ -54,26 +52,7 @@ kubeadm upgrade apply v1.11.0 --feature-gates=CoreDNS=true 만약 쿠버네티스 1.11 이상 버전에서 CoreDNS를 사용하는 경우, 업그레이드 과정에서, 기존의 Corefile이 유지된다. - -### Kubeadm을 사용해 CoreDNS가 아닌 kube-dns 설치하기 - -{{< note >}} -쿠버네티스 1.11 버전에서, CoreDNS는 GA(General Availability) 되었으며, -기본적으로 설치된다. -{{< /note >}} - -{{< warning >}} -쿠버네티스 1.18 버전에서, kubeadm을 통한 kube-dns는 사용 중단되었으며, 향후 버전에서 제거될 예정이다. -{{< /warning >}} - -1.13 보다 이전 버전에서 kube-dns를 설치하는경우, 기능 게이트의 `CoreDNS` -값을 `false` 로 변경해야 한다. - -``` -kubeadm init --feature-gates=CoreDNS=false -``` - -1.13 이후 버전에서는, [여기](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon)에 설명된 지침 가이드를 참고하자. +쿠버네티스 버전 1.21에서, kubeadm 의 `kube-dns` 지원 기능이 삭제되었다. ## CoreDNS 업그레이드하기 diff --git a/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md index ad760098446ce..9521bb1ec65fd 100644 --- a/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -31,7 +31,7 @@ DNS는 _애드온 관리자_ 인 [클러스터 애드온](http://releases.k8s.io CoreDNS 대신 `kube-dns` 를 계속 사용할 수도 있다. {{< note >}} -CoreDNS와 kube-dns 서비스 모두 `metadata.name` 필드에 `kube-dns` 로 이름이 지정된다. +CoreDNS 서비스는 `metadata.name` 필드에 `kube-dns` 로 이름이 지정된다. 이를 통해, 기존의 `kube-dns` 서비스 이름을 사용하여 클러스터 내부의 주소를 확인하는 워크로드에 대한 상호 운용성이 증가된다. `kube-dns` 로 서비스 이름을 사용하면, 해당 DNS 공급자가 어떤 공통 이름으로 실행되고 있는지에 대한 구현 세부 정보를 추상화한다. {{< /note >}} @@ -176,17 +176,14 @@ kube-dns는 스텁 도메인 및 네임서버(예: ns.foo.com)에 대한 FQDN을 CoreDNS는 kube-dns 이상의 기능을 지원한다. `StubDomains` 과 `upstreamNameservers` 를 지원하도록 생성된 kube-dns의 컨피그맵은 CoreDNS의 `forward` 플러그인으로 변환된다. -마찬가지로, kube-dns의 `Federations` 플러그인은 CoreDNS의 `federation` 플러그인으로 변환된다. ### 예시 -kube-dns에 대한 이 컨피그맵 예제는 federations, stubDomains 및 upstreamNameservers를 지정한다. +kube-dns에 대한 이 컨피그맵 예제는 stubDomains 및 upstreamNameservers를 지정한다. ```yaml apiVersion: v1 data: - federations: | - {"foo" : "foo.feddomain.com"} stubDomains: | {"abc.com" : ["1.2.3.4"], "my.cluster.local" : ["2.3.4.5"]} upstreamNameservers: | @@ -196,13 +193,6 @@ kind: ConfigMap CoreDNS에서는 동등한 설정으로 Corefile을 생성한다. -* federations 에 대응하는 설정: -``` -federation cluster.local { - foo foo.feddomain.com -} -``` - * stubDomains 에 대응하는 설정: ```yaml abc.com:53 { diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index ce453789cb120..16c84d451cecc 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -69,7 +69,7 @@ VXLAN/오버레이 네트워킹을 사용하는 경우 [KB4489899](https://suppo "Network": "10.244.0.0/16", "Backend": { "Type": "vxlan", - "VNI" : 4096, + "VNI": 4096, "Port": 4789 } } diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index 272da1e329961..3474aee3c2b74 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -168,36 +168,7 @@ controllerManager: ### 인증서 서명 요청(CSR) 생성 -`kubeadm certs renew --use-api` 로 쿠버네티스 인증서 API에 대한 인증서 서명 요청을 만들 수 있다. - -[cert-manager](https://github.com/jetstack/cert-manager)와 같은 외부 서명자를 설정하면, 인증서 서명 요청(CSR)이 자동으로 승인된다. -그렇지 않으면, [`kubectl certificate`](/ko/docs/setup/best-practices/certificates/) 명령을 사용하여 인증서를 수동으로 승인해야 한다. -다음의 kubeadm 명령은 승인할 인증서 이름을 출력한 다음, 승인이 발생하기를 차단하고 기다린다. - -```shell -sudo kubeadm certs renew apiserver --use-api & -``` -출력 결과는 다음과 비슷하다. -``` -[1] 2890 -[certs] certificate request "kubeadm-cert-kube-apiserver-ld526" created -``` - -### 인증서 서명 요청(CSR) 승인 - -외부 서명자를 설정하면, 인증서 서명 요청(CSR)이 자동으로 승인된다. - -그렇지 않으면, [`kubectl certificate`](/ko/docs/setup/best-practices/certificates/) 명령을 사용하여 인증서를 수동으로 승인해야 한다. 예를 들어 다음과 같다. - -```shell -kubectl certificate approve kubeadm-cert-kube-apiserver-ld526 -``` -출력 결과는 다음과 비슷하다. -```shell -certificatesigningrequest.certificates.k8s.io/kubeadm-cert-kube-apiserver-ld526 approved -``` - -`kubectl get csr` 명령으로 보류 중인 인증서 목록을 볼 수 있다. +쿠버네티스 API로 CSR을 작성하려면 [CertificateSigningRequest 생성](/docs/reference/access-authn-authz/certificate-signing-requests/#create-certificatesigningrequest)을 본다. ## 외부 CA로 인증서 갱신 diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index bf70b98e939bc..2227c49c9eae8 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -37,7 +37,7 @@ weight: 20 ### 추가 정보 -- kubelet 마이너 버전을 업그레이드하기 전에 [노드 드레이닝(draining)](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/)이 +- kubelet 마이너 버전을 업그레이드하기 전에 [노드 드레이닝(draining)](/docs/tasks/administer-cluster/safely-drain-node/)이 필요하다. 컨트롤 플레인 노드의 경우 CoreNDS 파드 또는 기타 중요한 워크로드를 실행할 수 있다. - 컨테이너 사양 해시 값이 변경되므로, 업그레이드 후 모든 컨테이너가 다시 시작된다. @@ -328,7 +328,7 @@ etcd 업그레이드가 실패하고 자동 롤백이 작동하지 않으면, - 컨트롤 플레인 이미지가 사용 가능한지 또는 머신으로 가져올 수 있는지 확인한다. - 컴포넌트 구성에 버전 업그레이드가 필요한 경우 대체 구성을 생성하거나 사용자가 제공한 것으로 덮어 쓰기한다. - 컨트롤 플레인 컴포넌트 또는 롤백 중 하나라도 나타나지 않으면 업그레이드한다. -- 새로운 `kube-dns` 와 `kube-proxy` 매니페스트를 적용하고 필요한 모든 RBAC 규칙이 생성되도록 한다. +- 새로운 `CoreDNS` 와 `kube-proxy` 매니페스트를 적용하고 필요한 모든 RBAC 규칙이 생성되도록 한다. - API 서버의 새 인증서와 키 파일을 작성하고 180일 후에 만료될 경우 이전 파일을 백업한다. `kubeadm upgrade node` 는 추가 컨트롤 플레인 노드에서 다음을 수행한다. diff --git a/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md b/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md index bee3c940697be..25abb14314cc6 100644 --- a/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md +++ b/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md @@ -18,7 +18,7 @@ weight: 10 **사전요구사항**: [gcloud](https://cloud.google.com/sdk/docs/quickstarts). -1. 캘리코로 GKE 클러스터를 시작하려면, `--enable-network-policy` 플래그를 추가하면 된다. +1. 캘리코로 GKE 클러스터를 시작하려면, `--enable-network-policy` 플래그를 추가한다. **문법** ```shell diff --git a/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md index 34cb102f83fe0..5a8295aff2f70 100644 --- a/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -9,18 +9,13 @@ weight: 100 이 페이지는 프라이빗 도커 레지스트리나 리포지터리로부터 이미지를 받아오기 위해 시크릿(Secret)을 사용하는 파드를 생성하는 방법을 보여준다. - - ## {{% heading "prerequisites" %}} - * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * 이 실습을 수행하기 위해, [도커 ID](https://docs.docker.com/docker-id/)와 비밀번호가 필요하다. - - ## 도커 로그인 @@ -106,7 +101,8 @@ kubectl create secret docker-registry regcred --docker-server=` 은 프라이빗 도커 저장소의 FQDN 주소이다. (도커허브(DockerHub)의 경우, https://index.docker.io/v1/) +* `` 은 프라이빗 도커 저장소의 FQDN 주소이다. + 도커허브(DockerHub)는 `https://index.docker.io/v2/` 를 사용한다. * `` 은 도커 사용자의 계정이다. * `` 은 도커 사용자의 비밀번호이다. * `` 은 도커 사용자의 이메일 주소이다. @@ -192,7 +188,8 @@ your.private.registry.example.com/janedoe/jdoe-private:v1 ``` 프라이빗 저장소에서 이미지를 받아오기 위하여, 쿠버네티스에서 자격 증명이 필요하다. -구성 파일의 `imagePullSecrets` 필드를 통해 쿠버네티스가 `regcred` 라는 시크릿으로부터 자격 증명을 가져올 수 있다. +구성 파일의 `imagePullSecrets` 필드를 통해 쿠버네티스가 +`regcred` 라는 시크릿으로부터 자격 증명을 가져올 수 있다. 시크릿을 사용해서 파드를 생성하고, 파드가 실행되는지 확인하자. @@ -201,16 +198,11 @@ kubectl apply -f my-private-reg-pod.yaml kubectl get pod private-reg ``` - - ## {{% heading "whatsnext" %}} - * [시크릿](/ko/docs/concepts/configuration/secret/)에 대해 더 배워 보기. * [프라이빗 레지스트리 사용](/ko/docs/concepts/containers/images/#프라이빗-레지스트리-사용)에 대해 더 배워 보기. * [서비스 어카운트에 풀 시크릿(pull secret) 추가하기](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account)에 대해 더 배워 보기. * [kubectl create secret docker-registry](/docs/reference/generated/kubectl/kubectl-commands/#-em-secret-docker-registry-em-)에 대해 읽어보기. * [시크릿](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core)에 대해 읽어보기. * [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core)의 `imagePullSecrets` 필드에 대해 읽어보기. - - diff --git a/content/ko/docs/tasks/configure-pod-container/static-pod.md b/content/ko/docs/tasks/configure-pod-container/static-pod.md index 41e1f6f7b0a4d..aea2fcffa1614 100644 --- a/content/ko/docs/tasks/configure-pod-container/static-pod.md +++ b/content/ko/docs/tasks/configure-pod-container/static-pod.md @@ -22,6 +22,7 @@ Kubelet 은 각각의 스태틱 파드에 대하여 쿠버네티스 API 서버 생성하려고 자동으로 시도한다. 즉, 노드에서 구동되는 파드는 API 서버에 의해서 볼 수 있지만, API 서버에서 제어될 수는 없다. +파드 이름에는 노드 호스트 이름 앞에 하이픈을 붙여 접미사로 추가된다. {{< note >}} 만약 클러스터로 구성된 쿠버네티스를 구동하고 있고, 스태틱 파드를 사용하여 @@ -30,21 +31,14 @@ API 서버에서 제어될 수는 없다. 을 사용하는 것이 바람직하다. {{< /note >}} - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} 이 페이지는 파드를 실행하기 위해 {{< glossary_tooltip term_id="docker" >}}를 사용하며, 노드에서 Fedora 운영 체제를 구동하고 있다고 가정한다. 다른 배포판이나 쿠버네티스 설치 지침과는 다소 상이할 수 있다. - - - - ## 스태틱 파드 생성하기 {#static-pod-creation} @@ -53,7 +47,9 @@ API 서버에서 제어될 수는 없다. ### 파일시스템이 호스팅 하는 스태틱 파드 매니페스트 {#configuration-files} -매니페스트는 특정 디렉터리에 있는 JSON 이나 YAML 형식의 표준 파드 정의이다. [kubelet 구성 파일](/docs/tasks/administer-cluster/kubelet-config-file)의 `staticPodPath: ` 필드를 사용하자. 이 디렉터리를 정기적으로 스캔하여, 디렉터리 안의 YAML/JSON 파일이 생성되거나 삭제되었을 때 스태틱 파드를 생성하거나 삭제한다. +매니페스트는 특정 디렉터리에 있는 JSON 이나 YAML 형식의 표준 파드 정의이다. +[kubelet 구성 파일](/docs/reference/config-api/kubelet-config.v1beta1/)의 `staticPodPath: ` 필드를 사용하자. +명시한 디렉터리를 정기적으로 스캔하여, 디렉터리 안의 YAML/JSON 파일이 생성되거나 삭제되었을 때 스태틱 파드를 생성하거나 삭제한다. Kubelet 이 특정 디렉터리를 스캔할 때 점(.)으로 시작하는 단어를 무시한다는 점을 유의하자. 예를 들어, 다음은 스태틱 파드로 간단한 웹 서버를 구동하는 방법을 보여준다. @@ -89,17 +85,18 @@ Kubelet 이 특정 디렉터리를 스캔할 때 점(.)으로 시작하는 단 3. 노드에서 kubelet 실행 시에 `--pod-manifest-path=/etc/kubelet.d/` 와 같이 인자를 제공하여 해당 디렉터리를 사용하도록 구성한다. Fedora 의 경우 이 줄을 포함하기 위하여 `/etc/kubernetes/kubelet` 파일을 다음과 같이 수정한다. - ``` - KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" - ``` - 혹은 [kubelet 구성 파일](/docs/tasks/administer-cluster/kubelet-config-file)에 `staticPodPath: ` 필드를 추가한다. + ``` + KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" + ``` + 혹은 [kubelet 구성 파일](/docs/reference/config-api/kubelet-config.v1beta1/)에 + `staticPodPath: ` 필드를 추가한다. 4. kubelet을 재시작한다. Fedora의 경우 아래와 같이 수행한다. - ```shell - # kubelet 이 동작하고 있는 노드에서 이 명령을 수행한다. - systemctl restart kubelet - ``` + ```shell + # kubelet 이 동작하고 있는 노드에서 이 명령을 수행한다. + systemctl restart kubelet + ``` ### 웹이 호스팅 하는 스태틱 파드 매니페스트 {#pods-created-via-http} diff --git a/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 2c7a95a136e0e..f8696993ffe15 100644 --- a/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -57,7 +57,7 @@ kubectl describe pods ${POD_NAME} 절대 스케줄 될 수 없다. 사용자는 `kubectl get nodes -o ` 명령으로 노드의 - 용량을 점검할 수 있다. 다음은 필요한 정보만을 추출하는 몇 가지 + 용량을 점검할 수 있다. 다음은 필요한 정보를 추출하는 몇 가지 명령의 예이다. ```shell diff --git a/content/ko/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md b/content/ko/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md deleted file mode 100644 index 54a2ae61b0542..0000000000000 --- a/content/ko/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -content_type: concept -title: 엘라스틱서치(Elasticsearch) 및 키바나(Kibana)를 사용한 로깅 ---- - - - -Google 컴퓨트 엔진(Compute Engine, GCE) 플랫폼에서, 기본 로깅 지원은 -[스택드라이버(Stackdriver) 로깅](https://cloud.google.com/logging/)을 대상으로 한다. 이는 -[스택드라이버 로깅으로 로깅하기](/docs/tasks/debug-application-cluster/logging-stackdriver)에 자세히 설명되어 있다. - -이 문서에서는 GCE에서 운영할 때 스택드라이버 로깅의 대안으로, -[엘라스틱서치](https://www.elastic.co/products/elasticsearch)에 로그를 수집하고 -[키바나](https://www.elastic.co/products/kibana)를 사용하여 볼 수 있도록 -클러스터를 설정하는 방법에 대해 설명한다. - -{{< note >}} -Google 쿠버네티스 엔진(Kubernetes Engine)에서 호스팅되는 쿠버네티스 클러스터에는 엘라스틱서치 및 키바나를 자동으로 배포할 수 없다. 수동으로 배포해야 한다. -{{< /note >}} - - - - - -클러스터 로깅에 엘라스틱서치, 키바나를 사용하려면 kube-up.sh를 사용하여 -클러스터를 생성할 때 아래와 같이 다음의 환경 변수를 -설정해야 한다. - -```shell -KUBE_LOGGING_DESTINATION=elasticsearch -``` - -또한 `KUBE_ENABLE_NODE_LOGGING=true`(GCE 플랫폼의 기본값)인지 확인해야 한다. - -이제, 클러스터를 만들 때, 각 노드에서 실행되는 Fluentd 로그 수집 데몬이 -엘라스틱서치를 대상으로 한다는 메시지가 나타난다. - -```shell -cluster/kube-up.sh -``` -``` -... -Project: kubernetes-satnam -Zone: us-central1-b -... calling kube-up -Project: kubernetes-satnam -Zone: us-central1-b -+++ Staging server tars to Google Storage: gs://kubernetes-staging-e6d0e81793/devel -+++ kubernetes-server-linux-amd64.tar.gz uploaded (sha1 = 6987c098277871b6d69623141276924ab687f89d) -+++ kubernetes-salt.tar.gz uploaded (sha1 = bdfc83ed6b60fa9e3bff9004b542cfc643464cd0) -Looking for already existing resources -Starting master and configuring firewalls -Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/zones/us-central1-b/disks/kubernetes-master-pd]. -NAME ZONE SIZE_GB TYPE STATUS -kubernetes-master-pd us-central1-b 20 pd-ssd READY -Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/regions/us-central1/addresses/kubernetes-master-ip]. -+++ Logging using Fluentd to elasticsearch -``` - -노드별 Fluentd 파드, 엘라스틱서치 파드 및 키바나 파드는 -클러스터가 활성화된 직후 kube-system 네임스페이스에서 모두 실행되어야 -한다. - -```shell -kubectl get pods --namespace=kube-system -``` -``` -NAME READY STATUS RESTARTS AGE -elasticsearch-logging-v1-78nog 1/1 Running 0 2h -elasticsearch-logging-v1-nj2nb 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-5oq0 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-6896 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-l1ds 1/1 Running 0 2h -fluentd-elasticsearch-kubernetes-node-lz9j 1/1 Running 0 2h -kibana-logging-v1-bhpo8 1/1 Running 0 2h -kube-dns-v3-7r1l9 3/3 Running 0 2h -monitoring-heapster-v4-yl332 1/1 Running 1 2h -monitoring-influx-grafana-v1-o79xf 2/2 Running 0 2h -``` - -`fluentd-elasticsearch` 파드는 각 노드에서 로그를 수집하여 -`elasticsearch-logging` 파드로 전송한다. 이 로그는 `elasticsearch-logging` 이라는 -[서비스](/ko/docs/concepts/services-networking/service/)의 일부이다. 이 -엘라스틱서치 파드는 로그를 저장하고 REST API를 통해 노출한다. -`kibana-logging` 파드는 엘라스틱서치에 저장된 로그를 읽기 위한 웹 UI를 -제공하며, `kibana-logging` 이라는 서비스의 일부이다. - -엘라스틱서치 및 키바나 서비스는 모두 `kube-system` 네임스페이스에 -있으며 공개적으로 접근 가능한 IP 주소를 통해 직접 노출되지 않는다. 이를 위해, -[클러스터에서 실행 중인 서비스 접근](/ko/docs/tasks/access-application-cluster/access-cluster/#클러스터에서-실행되는-서비스로-액세스)에 -대한 지침을 참고한다. - -브라우저에서 `elasticsearch-logging` 서비스에 접근하려고 하면, -다음과 같은 상태 페이지가 표시된다. - -![엘라스틱서치 상태](/images/docs/es-browser.png) - -원할 경우, 이제 엘라스틱서치 쿼리를 브라우저에 직접 입력할 수 -있다. 수행 방법에 대한 자세한 내용은 [엘라스틱서치의 문서](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-uri-request.html)를 -참조한다. - -또는, 키바나를 사용하여 클러스터의 로그를 볼 수도 있다(다시 -[클러스터에서 실행되는 서비스에 접근하기 위한 지침](/ko/docs/tasks/access-application-cluster/access-cluster/#클러스터에서-실행되는-서비스로-액세스)을 참고). -키바나 URL을 처음 방문하면 수집된 로그 보기를 -구성하도록 요청하는 페이지가 표시된다. 시계열 값에 -대한 옵션을 선택하고 `@timestamp` 를 선택한다. 다음 페이지에서 -`Discover` 탭을 선택하면 수집된 로그를 볼 수 있다. -로그를 정기적으로 새로 고치려면 새로 고침 간격을 5초로 -설정할 수 있다. - -키바나 뷰어에서 수집된 로그의 일반적인 보기는 다음과 같다. - -![키바나 로그](/images/docs/kibana-logs.png) - - - -## {{% heading "whatsnext" %}} - - -키바나는 로그를 탐색하기 위한 모든 종류의 강력한 옵션을 제공한다! 이를 파헤치는 방법에 대한 -아이디어는 [키바나의 문서](https://www.elastic.co/guide/en/kibana/current/discover.html)를 확인한다. diff --git a/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md b/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md index f46df1e51102a..77081a82f3c30 100644 --- a/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md +++ b/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md @@ -22,7 +22,7 @@ content_type: task ## kubectl 플러그인 설치 -플러그인은 이름이 `kubectl-` 로 시작되는 독립형 실행 파일이다. 플러그인을 설치하려면, 간단히 실행 파일을 `PATH` 에 지정된 디렉터리로 옮기면 된다. +플러그인은 이름이 `kubectl-` 로 시작되는 독립형 실행 파일이다. 플러그인을 설치하려면, 실행 파일을 `PATH` 에 지정된 디렉터리로 옮기면 된다. [Krew](https://krew.dev/)를 사용하여 오픈소스에서 사용 가능한 kubectl 플러그인을 검색하고 설치할 수도 있다. Krew는 쿠버네티스 SIG CLI 커뮤니티에서 관리하는 @@ -57,9 +57,9 @@ Krew [플러그인 인덱스](https://krew.sigs.k8s.io/plugins/)를 통해 사 플러그인 설치 또는 사전 로딩이 필요하지 않다. 플러그인 실행 파일은 `kubectl` 바이너리에서 상속된 환경을 받는다. -플러그인은 이름을 기반으로 구현할 명령 경로를 결정한다. 예를 -들어, 새로운 명령인 `kubectl foo` 를 제공하려는 플러그인은 단순히 이름이 -`kubectl-foo` 이고, `PATH` 의 어딘가에 있다. +플러그인은 이름을 기반으로 구현할 명령 경로를 결정한다. +예를 들어, `kubectl-foo` 라는 플러그인은 `kubectl foo` 명령을 제공한다. +`PATH` 어딘가에 플러그인 실행 파일을 설치해야 한다. ### 플러그인 예제 @@ -85,30 +85,31 @@ echo "I am a plugin named kubectl-foo" ### 플러그인 사용 -위의 플러그인을 사용하려면, 간단히 실행 가능하게 만든다. +플러그인을 사용하려면, 실행 가능하게 만든다. -``` +```shell sudo chmod +x ./kubectl-foo ``` 그리고 `PATH` 의 어느 곳에나 옮겨 놓는다. -``` +```shell sudo mv ./kubectl-foo /usr/local/bin ``` 이제 플러그인을 `kubectl` 명령으로 호출할 수 있다. -``` +```shell kubectl foo ``` + ``` I am a plugin named kubectl-foo ``` 모든 인수와 플래그는 그대로 실행 파일로 전달된다. -``` +```shell kubectl foo version ``` ``` @@ -120,6 +121,7 @@ kubectl foo version ```bash export KUBECONFIG=~/.kube/config kubectl foo config + ``` ``` /home//.kube/config @@ -128,6 +130,7 @@ kubectl foo config ```shell KUBECONFIG=/etc/kube/config kubectl foo config ``` + ``` /etc/kube/config ``` @@ -373,11 +376,8 @@ kubectl 플러그인의 배포 패키지를 컴파일된 패키지를 사용 가능하게 하거나, Krew를 사용하면 설치가 더 쉬워진다. - - ## {{% heading "whatsnext" %}} - * Go로 작성된 플러그인의 [자세한 예제](https://github.com/kubernetes/sample-cli-plugin)에 대해서는 샘플 CLI 플러그인 리포지터리를 확인한다. diff --git a/content/ko/docs/tasks/inject-data-application/define-environment-variable-container.md b/content/ko/docs/tasks/inject-data-application/define-environment-variable-container.md index 22372813e92e9..5e23ba831ef64 100644 --- a/content/ko/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/content/ko/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -70,8 +70,9 @@ weight: 20 {{< /note >}} {{< note >}} -환경 변수는 서로를 참조할 수 있으며 사이클이 가능하다. -사용하기 전에 순서에 주의한다. +환경 변수는 서로를 참조할 수 있는데, 이 때 순서에 주의해야 한다. +동일한 컨텍스트에서 정의된 다른 변수를 참조하는 변수는 목록의 뒤쪽에 나와야 한다. +또한, 순환 참조는 피해야 한다. {{< /note >}} ## 설정 안에서 환경 변수 사용하기 diff --git a/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md index e2b14354642c4..4addcdcfafb6b 100644 --- a/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -1,12 +1,16 @@ --- title: 크론잡(CronJob)으로 자동화된 작업 실행 -min-kubernetes-server-version: v1.8 +min-kubernetes-server-version: v1.21 content_type: task weight: 10 --- +쿠버네티스 버전 1.21에서 {{< glossary_tooltip text="크론잡" term_id="cronjob" >}}이 GA (General Availability)로 승격되었다. +이전 버전의 쿠버네티스를 사용하고 있다면, 해당 쿠버네티스 버전의 문서를 참고하여 정확한 정보를 확인할 수 있다. +이전 버전의 쿠버네티스는 `batch/v1` 크론잡 API를 지원하지 않는다. + 시간 기반의 스케줄에 따라 {{< glossary_tooltip text="크론잡" term_id="cronjob" >}}을 이용해서 {{< glossary_tooltip text="잡(Job)" term_id="job" >}}을 실행할 수 있다. 이러한 자동화된 잡은 리눅스 또는 유닉스 시스템에서 [크론](https://ko.wikipedia.org/wiki/Cron) 작업처럼 실행된다. @@ -168,13 +172,11 @@ kubectl delete cronjob hello 이러한 방식으로 기한을 맞추지 못한 잡은 실패한 작업으로 간주된다. 이 필드를 지정하지 않으면, 잡에 기한이 없다. -크론잡 컨트롤러는 크론 잡에 대해 얼마나 많은 스케줄이 누락되었는지를 계산한다. 누락된 스케줄이 100개를 초과 한다면, 크론 잡은 더이상 스케줄되지 않는다. `.spec.startingDeadlineSeconds` 이 설정되지 않았다면, 크론잡 컨트롤러는 `status.lastScheduleTime` 부터 지금까지 누락된 스케줄을 계산한다. - -예를 들어, 하나의 크론 잡이 1분마다 실행되도록 설정되어 있고, 크론잡의 `status.lastScheduleTime` 은 새벽 5:00시이지만, 지금은 오전 7:00시라고 가정하자. 즉 120개의 스케줄이 누락되었다는 것이고, 그래서 크론 잡은 더이상 스케줄되지 않는다. - -`.spec.startingDeadlineSeconds` 필드가 (null이 아닌) 값으로 설정되어 있다면, 크론잡 컨트롤러는 `.spec.startingDeadlineSeconds` 의 값으로부터 지금까지 얼마나 많은 잡이 누락되었는지를 계산한다. +`.spec.startingDeadlineSeconds` 필드가 (null이 아닌 값으로) 설정되어 있다면, +크론잡 컨트롤러는 잡 생성 완료 예상 시각과 현재 시각의 차이를 측정하고, +시각 차이가 설정한 값보다 커지면 잡 생성 동작을 스킵한다. -예를 들어, `200` 으로 설정되었다면, 지난 200초 동안 누락된 스케줄이 몇 번 발생했는지 계산한다. 이 경우, 지난 200초 동안 누락된 스케줄이 100개가 넘으면, 크론 잡이 더이상 스케줄되지 않는다. +예를 들어, `200` 으로 설정되었다면, 잡 생성 완료 예상 시각으로부터 200초까지는 잡이 생성될 수 있다. ### 동시성 정책 diff --git a/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md index aeaac4803d613..bd8bf3880852d 100644 --- a/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -2,7 +2,7 @@ title: 작업 대기열을 사용한 거친 병렬 처리 min-kubernetes-server-version: v1.8 content_type: task -weight: 30 +weight: 20 --- @@ -19,7 +19,7 @@ weight: 30 1. **메시지 대기열 서비스를 시작한다.** 이 예에서는, RabbitMQ를 사용하지만, 다른 메시지 대기열을 이용해도 된다. 실제로 사용할 때는, 한 번 메시지 대기열 서비스를 구축하고서 이를 여러 잡을 위해 재사용하기도 한다. 1. **대기열을 만들고, 메시지로 채운다.** 각 메시지는 수행할 하나의 작업을 나타낸다. - 이 예제에서, 메시지는 긴 계산을 수행할 정수일 뿐이다. + 이 예제에서, 메시지는 긴 계산을 수행할 정수다. 1. **대기열에서 작업을 수행하는 잡을 시작한다.** 잡은 여러 파드를 시작한다. 각 파드는 메시지 대기열에서 하나의 작업을 가져와서, 처리한 다음, 대기열이 비워질 때까지 반복한다. @@ -35,7 +35,7 @@ weight: 30 ## 메시지 대기열 서비스 시작 -이 문서의 예시에서는 RabbitMQ를 사용하지만, 다른 AMQP 타입의 메시지 서비스에 적용하는데 문제가 없을 것이다. +이 예시에서는 RabbitMQ를 사용하지만, 다른 AMQP 유형의 메시지 서비스를 사용하도록 예시를 조정할 수 있다. 실제로 사용할 때는, 클러스터에 메시지 대기열 서비스를 한 번 구축하고서, 여러 많은 잡이나 오래 동작하는 서비스에 재사용할 수 있다. @@ -141,13 +141,12 @@ root@temp-loe07:/# ``` 마지막 커맨드에서, `amqp-consume` 도구는 대기열로부터 하나의 메시지를 -받고(`-c 1`), 그 메시지를 임의의 명령 표준입력으로 전달한다. 이 경우에는, `cat` 프로그램이 표준입력으로부터 -받은 값을 바로 출력하고 있고, echo가 캐리지 리턴을 더해주어 +받고(`-c 1`), 그 메시지를 임의의 명령 표준입력으로 전달한다. 이 경우에는, `cat` 프로그램이 표준입력으로부터 받은 값을 출력하고, echo가 캐리지 리턴을 더해주어 출력 결과가 보여진다. ## 작업으로 대기열 채우기 -이제 몇 가지 "작업"으로 대기열을 채운다. 이 예제에서의 작업은 간단히 문자열을 +이제 몇 가지 "작업"으로 대기열을 채운다. 이 예제에서의 작업은 문자열을 출력하는 것이다. 실제로 사용할 때는, 메시지의 내용이 다음과 같을 수 있다. diff --git a/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md b/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md index 8788477c171b5..b85f687df7a4b 100644 --- a/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md +++ b/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md @@ -2,7 +2,7 @@ title: 작업 대기열을 사용한 정밀 병렬 처리 content_type: task min-kubernetes-server-version: v1.8 -weight: 40 +weight: 30 --- @@ -21,7 +21,7 @@ weight: 40 않기 때문에 Redis 및 사용자 지정의 작업 대기열 클라이언트 라이브러리를 사용한다. 실제로는 Redis와 같은 저장소를 한 번 설정하고 여러 작업과 다른 것들의 작업 대기열로 재사용한다. 1. **대기열을 만들고, 메시지로 채운다.** 각 메시지는 수행할 하나의 작업을 나타낸다. 이 - 예에서, 메시지는 긴 계산을 수행할 정수일 뿐이다. + 예에서, 메시지는 긴 계산을 수행할 정수다. 1. **대기열에서 작업을 수행하는 잡을 시작한다.** 잡은 여러 파드를 시작한다. 각 파드는 메시지 대기열에서 하나의 작업을 가져와서, 처리한 다음, 대기열이 비워질 때까지 반복한다. diff --git a/content/ko/docs/tasks/job/parallel-processing-expansion.md b/content/ko/docs/tasks/job/parallel-processing-expansion.md index 341739ba62c19..fbf105024d766 100644 --- a/content/ko/docs/tasks/job/parallel-processing-expansion.md +++ b/content/ko/docs/tasks/job/parallel-processing-expansion.md @@ -2,7 +2,7 @@ title: 확장을 사용한 병렬 처리 content_type: task min-kubernetes-server-version: v1.8 -weight: 20 +weight: 50 --- @@ -12,7 +12,7 @@ weight: 20 있다. 이 예에는 _apple_, _banana_ 그리고 _cherry_ 세 항목만 있다. -샘플 잡들은 단순히 문자열을 출력한 다음 일시 정지하는 각 항목을 처리한다. +샘플 잡들은 문자열을 출력한 다음 일시 정지하는 각 항목을 처리한다. 이 패턴이 보다 실질적인 유스케이스에 어떻게 부합하는지 알아 보려면 [실제 워크로드에서 잡 사용하기](#실제-워크로드에서-잡-사용하기)를 참고한다. diff --git a/content/ko/docs/tasks/manage-daemon/update-daemon-set.md b/content/ko/docs/tasks/manage-daemon/update-daemon-set.md index 659836833a568..ec29259de75a3 100644 --- a/content/ko/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/ko/docs/tasks/manage-daemon/update-daemon-set.md @@ -111,8 +111,8 @@ kubectl edit ds/fluentd-elasticsearch -n kube-system ##### 컨테이너 이미지만 업데이트 -데몬셋 템플릿에서 컨테이너 이미지를 업데이트해야 하는 -경우(예: `.spec.template.spec.containers[*].image`), `kubectl set image` 를 사용한다. +데몬셋 템플릿(예: `.spec.template.spec.containers[*].image`)에 의해 정의된 컨테이너 이미지만 업데이트하려면, +`kubectl set image` 를 사용한다. ```shell kubectl set image ds/fluentd-elasticsearch fluentd-elasticsearch=quay.io/fluentd_elasticsearch/fluentd:v2.6.0 -n kube-system @@ -168,7 +168,7 @@ kubectl get pods -l name=fluentd-elasticsearch -o wide -n kube-system 데몬셋 롤아웃이 진행되지 않는다. 이 문제를 해결하려면, 데몬셋 템플릿을 다시 업데이트한다. 이전의 비정상 롤아웃으로 인해 -새로운 롤아웃이 차단되지 않는다. +새로운 롤아웃이 차단되지는 않는다. #### 클럭 차이(skew) diff --git a/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md b/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md index 78c5dc2cbdb49..087399df01670 100644 --- a/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md +++ b/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md @@ -13,7 +13,7 @@ description: 클러스터의 노드별로 리소스로 사용할 GPU를 구성 쿠버네티스는 AMD 및 NVIDIA GPU(그래픽 프로세싱 유닛)를 노드들에 걸쳐 관리하기 위한 **실험적인** 지원을 포함한다. -이 페이지는 다른 쿠버네티스 버전 간에 걸쳐 사용자가 GPU들을 소비할 수 있는 방법과 +이 페이지는 여러 쿠버네티스 버전에서 사용자가 GPU를 활용할 수 있는 방법과 현재의 제약 사항을 설명한다. @@ -37,7 +37,7 @@ description: 클러스터의 노드별로 리소스로 사용할 GPU를 구성 `nvidia.com/gpu` 를 스케줄 가능한 리소스로써 노출시킨다. 사용자는 이 GPU들을 `cpu` 나 `memory` 를 요청하는 방식과 동일하게 -`.com/gpu` 를 요청함으로써 컨테이너를 통해 소비할 수 있다. +`.com/gpu` 를 요청함으로써 컨테이너에서 활용할 수 있다. 그러나 GPU를 사용할 때는 리소스 요구 사항을 명시하는 방식에 약간의 제약이 있다. diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md b/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md index 26fa2830ff9e2..f7dcbab677e4c 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md @@ -16,7 +16,7 @@ weight: 10 ## {{% heading "prerequisites" %}} -[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)를 설치한다. +[`kubectl`](/ko/docs/tasks/tools/)를 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md index 28655ed57b4f1..8b4d5afe36771 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md @@ -12,7 +12,7 @@ weight: 30 ## {{% heading "prerequisites" %}} -[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. +[`kubectl`](/ko/docs/tasks/tools/)을 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md index 9e4f8de77498c..678814de3d0a4 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md @@ -13,7 +13,7 @@ weight: 40 ## {{% heading "prerequisites" %}} -[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. +[`kubectl`](/ko/docs/tasks/tools/)을 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md index c9ed347c56066..e420acbed920a 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -7,7 +7,7 @@ weight: 20 [Kustomize](https://github.com/kubernetes-sigs/kustomize)는 -[kustomization 파일](https://kubernetes-sigs.github.io/kustomize/api-reference/glossary/#kustomization)을 +[kustomization 파일](https://kubectl.docs.kubernetes.io/references/kustomize/glossary/#kustomization)을 통해 쿠버네티스 오브젝트를 사용자가 원하는 대로 변경하는(customize) 독립형 도구이다. 1.14 이후로, kubectl도 @@ -29,7 +29,7 @@ kubectl apply -k ## {{% heading "prerequisites" %}} -[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. +[`kubectl`](/ko/docs/tasks/tools/)을 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/ko/docs/tasks/run-application/access-api-from-pod.md b/content/ko/docs/tasks/run-application/access-api-from-pod.md new file mode 100644 index 0000000000000..d12f3b2f00514 --- /dev/null +++ b/content/ko/docs/tasks/run-application/access-api-from-pod.md @@ -0,0 +1,111 @@ +--- +title: 파드 내에서 쿠버네티스 API에 접근 +content_type: task +weight: 120 +--- + + + +이 페이지는 파드 내에서 쿠버네티스 API에 접근하는 방법을 보여준다. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## 파드 내에서 API에 접근 {#accessing-the-api-from-within-a-pod} + +파드 내에서 API에 접근할 때, API 서버를 찾아 인증하는 것은 +위에서 설명한 외부 클라이언트 사례와 약간 다르다. + +파드에서 쿠버네티스 API를 사용하는 가장 쉬운 방법은 +공식 [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/) 중 하나를 사용하는 것이다. 이러한 +라이브러리는 API 서버를 자동으로 감지하고 인증할 수 있다. + +### 공식 클라이언트 라이브러리 사용 + +파드 내에서, 쿠버네티스 API에 연결하는 권장 방법은 다음과 같다. + + - Go 클라이언트의 경우, 공식 [Go 클라이언트 라이브러리](https://github.com/kubernetes/client-go/)를 사용한다. + `rest.InClusterConfig()` 기능은 API 호스트 검색과 인증을 자동으로 처리한다. + [여기 예제](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go)를 참고한다. + + - Python 클라이언트의 경우, 공식 [Python 클라이언트 라이브러리](https://github.com/kubernetes-client/python/)를 사용한다. + `config.load_incluster_config()` 기능은 API 호스트 검색과 인증을 자동으로 처리한다. + [여기 예제](https://github.com/kubernetes-client/python/blob/master/examples/in_cluster_config.py)를 참고한다. + + - 사용할 수 있는 다른 라이브러리가 많이 있다. [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/) 페이지를 참고한다. + +각각의 경우, 파드의 서비스 어카운트 자격 증명은 API 서버와 +안전하게 통신하는 데 사용된다. + +### REST API에 직접 접근 + +파드에서 실행되는 동안, 쿠버네티스 apiserver는 `default` 네임스페이스에서 `kubernetes`라는 +서비스를 통해 접근할 수 있다. 따라서, 파드는 `kubernetes.default.svc` +호스트 이름을 사용하여 API 서버를 쿼리할 수 있다. 공식 클라이언트 라이브러리는 +이를 자동으로 수행한다. + +API 서버를 인증하는 권장 방법은 [서비스 어카운트](/docs/tasks/configure-pod-container/configure-service-account/) +자격 증명을 사용하는 것이다. 기본적으로, 파드는 +서비스 어카운트와 연결되어 있으며, 해당 서비스 어카운트에 대한 자격 증명(토큰)은 +해당 파드에 있는 각 컨테이너의 파일시스템 트리의 +`/var/run/secrets/kubernetes.io/serviceaccount/token` 에 있다. + +사용 가능한 경우, 인증서 번들은 각 컨테이너의 +파일시스템 트리의 `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` 에 배치되며, +API 서버의 제공 인증서를 확인하는 데 사용해야 한다. + +마지막으로, 네임스페이스가 지정된 API 작업에 사용되는 기본 네임스페이스는 각 컨테이너의 +`/var/run/secrets/kubernetes.io/serviceaccount/namespace` 에 있는 파일에 배치된다. + +### kubectl 프록시 사용 + +공식 클라이언트 라이브러리 없이 API를 쿼리하려면, 파드에서 +새 사이드카 컨테이너의 [명령](/ko/docs/tasks/inject-data-application/define-command-argument-container/)으로 +`kubectl proxy` 를 실행할 수 있다. 이런 식으로, `kubectl proxy` 는 +API를 인증하고 이를 파드의 `localhost` 인터페이스에 노출시켜서, 파드의 +다른 컨테이너가 직접 사용할 수 있도록 한다. + +### 프록시를 사용하지 않고 접근 + +인증 토큰을 API 서버에 직접 전달하여 kubectl 프록시 사용을 +피할 수 있다. 내부 인증서는 연결을 보호한다. + +```shell +# 내부 API 서버 호스트 이름을 가리킨다 +APISERVER=https://kubernetes.default.svc + +# 서비스어카운트(ServiceAccount) 토큰 경로 +SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount + +# 이 파드의 네임스페이스를 읽는다 +NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + +# 서비스어카운트 베어러 토큰을 읽는다 +TOKEN=$(cat ${SERVICEACCOUNT}/token) + +# 내부 인증 기관(CA)을 참조한다 +CACERT=${SERVICEACCOUNT}/ca.crt + +# TOKEN으로 API를 탐색한다 +curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X GET ${APISERVER}/api +``` + +출력은 다음과 비슷하다. + +```json +{ + "kind": "APIVersions", + "versions": [ + "v1" + ], + "serverAddressByClientCIDRs": [ + { + "clientCIDR": "0.0.0.0/0", + "serverAddress": "10.0.1.149:443" + } + ] +} +``` diff --git a/content/ko/docs/tasks/run-application/delete-stateful-set.md b/content/ko/docs/tasks/run-application/delete-stateful-set.md index 8bb7ab89f2d4a..07b3396440dd9 100644 --- a/content/ko/docs/tasks/run-application/delete-stateful-set.md +++ b/content/ko/docs/tasks/run-application/delete-stateful-set.md @@ -37,8 +37,8 @@ kubectl delete statefulsets kubectl delete service ``` -kubectl을 통해 스테이트풀셋을 삭제하면 0으로 스케일이 낮아지고, 스테이트풀셋에 포함된 모든 파드가 삭제된다. -파드가 아닌 스테이트풀셋만 삭제하려면, `--cascade=false` 를 사용한다. +kubectl을 통해 스테이트풀셋을 삭제하면, 스테이트풀셋의 크기가 0으로 설정되고 이로 인해 스테이트풀셋에 포함된 모든 파드가 삭제된다. 파드가 아닌 스테이트풀셋만 삭제하려면, `--cascade=false` 옵션을 사용한다. +예시는 다음과 같다. ```shell kubectl delete -f --cascade=false @@ -60,7 +60,7 @@ PVC를 삭제할 때 데이터 손실될 수 있음에 주의하자. ### 스테이트풀셋의 완벽한 삭제 -연결된 파드를 포함해서 스테이트풀셋의 모든 것을 간단히 삭제하기 위해 다음과 같이 일련의 명령을 실행 한다. +연결된 파드를 포함해서 스테이트풀셋의 모든 것을 삭제하기 위해 다음과 같이 일련의 명령을 실행한다. ```shell grace=$(kubectl get pods --template '{{.spec.terminationGracePeriodSeconds}}') diff --git a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index b4ca1826d6278..61f1dbc7583f2 100644 --- a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -381,7 +381,7 @@ object: 외부 메트릭 사용시, 먼저 모니터링 시스템에 대한 이해가 있어야 한다. 이 설치는 사용자 정의 메트릭과 유사하다. 외부 메트릭을 사용하면 모니터링 시스템의 사용 가능한 메트릭에 기반하여 클러스터를 오토스케일링 할 수 있다. -위의 예제처럼 `name`과 `selector`를 갖는 `metric` 블록을 제공하고, +위의 예제처럼 `name`과 `selector`를 갖는 `metric` 블록을 명시하고, `Object` 대신에 `External` 메트릭 타입을 사용한다. 만일 여러 개의 시계열이 `metricSelector`와 일치하면, HorizontalPodAutoscaler가 값의 합을 사용한다. 외부 메트릭들은 `Value`와 `AverageValue` 대상 타입을 모두 지원하고, diff --git a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md index 42172a463e844..b4cc1b3be5f5e 100644 --- a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -23,9 +23,7 @@ Pod Autoscaler는 크기를 조정할 수 없는 오브젝트(예: 데몬셋(Dae Horizontal Pod Autoscaler는 쿠버네티스 API 리소스 및 컨트롤러로 구현된다. 리소스는 컨트롤러의 동작을 결정한다. -컨트롤러는 관찰된 평균 CPU 사용률이 사용자가 지정한 대상과 일치하도록 레플리케이션 -컨트롤러 또는 디플로이먼트에서 레플리카 개수를 주기적으로 조정한다. - +컨트롤러는 평균 CPU 사용률, 평균 메모리 사용률 또는 다른 커스텀 메트릭과 같은 관찰 대상 메트릭이 사용자가 지정한 목표값과 일치하도록 레플리케이션 컨트롤러 또는 디플로이먼트에서 레플리카 개수를 주기적으로 조정한다. @@ -190,7 +188,7 @@ Horizontal Pod Autoscaler는 모든 API 리소스와 마찬가지로 `kubectl` `kubectl get hpa`로 오토스케일러 목록을 조회할 수 있고, `kubectl describe hpa`로 세부 사항을 확인할 수 있다. 마지막으로 `kubectl delete hpa`를 사용하여 오토스케일러를 삭제할 수 있다. -또한 Horizontal Pod Autoscaler를 쉽게 생성 할 수 있는 `kubectl autoscale`이라는 특별한 명령이 있다. +또한 Horizontal Pod Autoscaler를 생성할 수 있는 `kubectl autoscale`이라는 특별한 명령이 있다. 예를 들어 `kubectl autoscale rs foo --min=2 --max=5 --cpu-percent=80`을 실행하면 레플리케이션 셋 *foo* 에 대한 오토스케일러가 생성되고, 목표 CPU 사용률은 `80 %`, 그리고 2와 5 사이의 레플리카 개수로 설정된다. @@ -220,9 +218,10 @@ v1.6 부터 클러스터 운영자는 `kube-controller-manager` 컴포넌트의 v1.12부터는 새로운 알고리즘 업데이트가 업스케일 지연에 대한 필요성을 제거하였다. -- `--horizontal-pod-autoscaler-downscale-delay` : 이 옵션 값은 - 오토스케일러가 현재의 작업이 완료된 후에 다른 다운스케일 작업을 - 수행하기까지 기다려야 하는 시간을 지정하는 지속 시간이다. +- `--horizontal-pod-autoscaler-downscale-delay` : 다운스케일이 + 안정화되기까지의 시간 간격을 지정한다. + Horizontal Pod Autoscaler는 이전의 권장하는 크기를 기억하고, + 이 시간 간격에서의 가장 큰 크기에서만 작동한다. 기본값은 5분(`5m0s`)이다. {{< note >}} @@ -354,7 +353,7 @@ API에 접속하려면 클러스터 관리자는 다음을 확인해야 한다. ## 구성가능한 스케일링 동작 지원 -[v1.18](https://github.com/kubernetes/enhancements/blob/master/keps/sig-autoscaling/20190307-configurable-scale-velocity-for-hpa.md) +[v1.18](https://github.com/kubernetes/enhancements/blob/master/keps/sig-autoscaling/853-configurable-hpa-scale-velocity/README.md) 부터 `v2beta2` API는 HPA `behavior` 필드를 통해 스케일링 동작을 구성할 수 있다. 동작은 `behavior` 필드 아래의 `scaleUp` 또는 `scaleDown` @@ -382,7 +381,12 @@ behavior: periodSeconds: 60 ``` -파드 수가 40개를 초과하면 두 번째 폴리시가 스케일링 다운에 사용된다. +`periodSeconds` 는 폴리시가 참(true)으로 유지되어야 하는 기간을 나타낸다. +첫 번째 정책은 _(파드들)_ 이 1분 내에 최대 4개의 레플리카를 스케일 다운할 수 있도록 허용한다. +두 번째 정책은 _비율_ 로 현재 레플리카의 최대 10%를 1분 내에 스케일 다운할 수 있도록 허용한다. + +기본적으로 가장 많은 변경을 허용하는 정책이 선택되기에 두 번째 정책은 +파드의 레플리카 수가 40개를 초과하는 경우에만 사용된다. 레플리카가 40개 이하인 경우 첫 번째 정책이 적용된다. 예를 들어 80개의 레플리카가 있고 대상을 10개의 레플리카로 축소해야 하는 경우 첫 번째 단계에서 8개의 레플리카가 스케일 다운 된다. 레플리카의 수가 72개일 때 다음 반복에서 파드의 10%는 7.2 이지만, 숫자는 8로 올림된다. 오토스케일러 컨트롤러의 @@ -390,10 +394,6 @@ behavior: 미만으로 떨어지면 첫 번째 폴리시 _(파드들)_ 가 적용되고 한번에 4개의 레플리카가 줄어든다. -`periodSeconds` 는 폴리시가 참(true)으로 유지되어야 하는 기간을 나타낸다. -첫 번째 정책은 1분 내에 최대 4개의 레플리카를 스케일 다운할 수 있도록 허용한다. -두 번째 정책은 현재 레플리카의 최대 10%를 1분 내에 스케일 다운할 수 있도록 허용한다. - 확장 방향에 대해 `selectPolicy` 필드를 확인하여 폴리시 선택을 변경할 수 있다. 레플리카의 수를 최소로 변경할 수 있는 폴리시를 선택하는 `최소(Min)`로 값을 설정한다. 값을 `Disabled` 로 설정하면 해당 방향으로 스케일링이 완전히 @@ -440,7 +440,7 @@ behavior: periodSeconds: 15 selectPolicy: Max ``` -안정화 윈도우의 스케일링 다운의 경우 _300_ 초(또는 제공된 +안정화 윈도우의 스케일링 다운의 경우 _300_ 초 (또는 제공된 경우`--horizontal-pod-autoscaler-downscale-stabilization` 플래그의 값)이다. 스케일링 다운에서는 현재 실행 중인 레플리카의 100%를 제거할 수 있는 단일 정책만 있으며, 이는 스케일링 대상을 최소 허용 레플리카로 축소할 수 있음을 의미한다. diff --git a/content/ko/docs/tasks/run-application/run-single-instance-stateful-application.md b/content/ko/docs/tasks/run-application/run-single-instance-stateful-application.md index f3debe87810b1..cf6c3188b7069 100644 --- a/content/ko/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/content/ko/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -65,6 +65,8 @@ MySQL을 실행하고 퍼시스턴트볼륨클레임을 참조하는 디플로 kubectl describe deployment mysql + 출력은 다음과 유사하다. + Name: mysql Namespace: default CreationTimestamp: Tue, 01 Nov 2016 11:18:45 -0700 @@ -105,6 +107,8 @@ MySQL을 실행하고 퍼시스턴트볼륨클레임을 참조하는 디플로 kubectl get pods -l app=mysql + 출력은 다음과 유사하다. + NAME READY STATUS RESTARTS AGE mysql-63082529-2z3ki 1/1 Running 0 3m @@ -112,6 +116,8 @@ MySQL을 실행하고 퍼시스턴트볼륨클레임을 참조하는 디플로 kubectl describe pvc mysql-pv-claim + 출력은 다음과 유사하다. + Name: mysql-pv-claim Namespace: default StorageClass: diff --git a/content/ko/docs/tasks/tls/certificate-rotation.md b/content/ko/docs/tasks/tls/certificate-rotation.md index b23bf2a600f56..037f99d87aace 100644 --- a/content/ko/docs/tasks/tls/certificate-rotation.md +++ b/content/ko/docs/tasks/tls/certificate-rotation.md @@ -70,6 +70,7 @@ kubelet은 쿠버네티스 API로 서명된 인증서를 가져와서 서명된 인증서의 만료가 다가오면 kubelet은 쿠버네티스 API를 사용하여 새로운 인증서 서명 요청을 자동으로 발행한다. +이는 인증서 유효 기간이 30%-10% 남은 시점에 언제든지 실행될 수 있다. 또한, 컨트롤러 관리자는 인증서 요청을 자동으로 승인하고 서명된 인증서를 인증서 서명 요청에 첨부한다. kubelet은 쿠버네티스 API로 서명된 새로운 인증서를 가져와서 디스크에 쓴다. diff --git a/content/ko/docs/tasks/tls/managing-tls-in-a-cluster.md b/content/ko/docs/tasks/tls/managing-tls-in-a-cluster.md new file mode 100644 index 0000000000000..f814db99191af --- /dev/null +++ b/content/ko/docs/tasks/tls/managing-tls-in-a-cluster.md @@ -0,0 +1,226 @@ +--- +title: 클러스터에서 TLS 인증서 관리 +content_type: task +--- + + + +쿠버네티스는 사용자가 제어하는 ​​인증 기관 (CA)에서 서명한 TLS 인증서를 +프로비저닝 할 수 있는 `certificates.k8s.io` API를 제공한다. +이러한 CA 및 인증서는 워크로드 간의 신뢰 관계를 구성하는 용도로 사용할 수 있다. + +`certificates.k8s.io` API는 [ACME 초안](https://github.com/ietf-wg-acme/acme/)과 +유사한 프로토콜을 사용한다. + +{{< note >}} +`certificates.k8s.io` API를 사용하여 생성된 인증서는 전용 CA로 서명된다. +이러한 목적을 위해 클러스터 루트 CA를 사용하도록 클러스터를 +구성할 수 있지만, 절대 이에 의존해서는 안된다. +해당 인증서가 클러스터 루트 CA에 대해 유효성을 검사한다고 가정하면 안된다. +{{< /note >}} + + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## 클러스터에서 TLS 신뢰 + +파드로 실행되는 애플리케이션에서 사용자 정의 CA를 신뢰하려면 +일반적으로 몇 가지 추가 애플리케이션 구성이 필요하다. +TLS 클라이언트 또는 서버가 신뢰하는 CA 인증서 목록에 +CA 인증서 번들을 추가해야 한다. +예를 들어 인증서 체인을 파싱하고, 파싱된 인증서를 [`tls.Config`](https://godoc.org/crypto/tls#Config) 구조체의 +`RootCAs` 필드에 추가하여, golang TLS 구성으로 이를 수행할 수 있다. + +CA 인증서를 파드에서 사용할 수 있는 +[ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap)으로 +배포할 수 있다. + +## 인증서 요청 + +다음 섹션에서는 DNS를 통해 액세스되는 쿠버네티스 서비스의 +TLS 인증서를 생성하는 방법을 보여준다. + +{{< note >}} +이 튜토리얼에서는 CFSSL을 사용한다. [여기를 클릭](https://blog.cloudflare.com/introducing-cfssl/)하여 Cloudflare의 PKI 및 TLS 툴킷을 자세히 알아본다. +{{< /note >}} + +## CFSSL 다운로드 및 설치 + +이 예제에 사용된 cfssl 도구는 +[https://github.com/cloudflare/cfssl/releases](https://github.com/cloudflare/cfssl/releases)에서 다운로드 할 수 있다. + +## 인증서 서명 요청 (CSR) 생성 + +다음 명령을 실행하여 개인 키 및 인증서 서명 요청(또는 CSR)을 +생성한다. + +```shell +cat < +Annotations: +CreationTimestamp: Tue, 21 Mar 2017 07:03:51 -0700 +Requesting User: yourname@example.com +Status: Pending +Subject: + Common Name: my-svc.my-namespace.svc.cluster.local + Serial Number: +Subject Alternative Names: + DNS Names: my-svc.my-namespace.svc.cluster.local + IP Addresses: 192.0.2.24 + 10.0.34.2 +Events: +``` + +## 인증서 서명 요청 승인 받기 + +인증서 서명 요청을 승인하는 것은 자동화된 승인 프로세스나 +클러스터 관리자에 의해 일회성으로 수행한다. 여기에 +관련된 내용에 대한 자세한 내용은 아래에서 설명한다. + +## 인증서 다운로드 및 사용 + +CSR이 서명되고 승인되면 다음이 표시된다. + +```shell +kubectl get csr +``` + +```none +NAME AGE REQUESTOR CONDITION +my-svc.my-namespace 10m yourname@example.com Approved,Issued +``` + +다음을 실행하여 발급된 인증서를 다운로드하고 `server.crt` 파일에 +저장할 수 있다. + +```shell +kubectl get csr my-svc.my-namespace -o jsonpath='{.status.certificate}' \ + | base64 --decode > server.crt +``` + +이제 `server.crt` 및 `server-key.pem`을 키페어(keypair)로 사용하여 +HTTPS 서버를 시작할 수 있다. + +## 인증서 서명 요청 승인 + +(적절한 권한이 있는) 쿠버네티스 관리자는 +`kubectl certificate approve` 과 `kubectl certificate deny` +명령을 사용하여 인증서 서명 요청을 수동으로 승인 (또는 거부) 할 수 있다. +그러나 이 API를 많이 사용한다면, +자동화된 인증서 컨트롤러 작성을 고려할 수 있다. + +위와 같이 kubectl을 사용하는 시스템이든 사람이든, 승인자의 역할은 +CSR이 다음 두 가지 요구 사항을 충족하는지 확인하는 것이다. + +1. CSR은 CSR에 서명하는 데 사용되는 개인 키를 제어하는 것이다. 이는 + 승인된 대상으로 가장하는 제 3자의 위협을 해결한다. 위의 예에서 + 이 단계는 파드(pod)가 CSR을 생성하는 데 + 사용되는 개인 키를 제어하는지 확인하는 것이다. +2. CSR은 요청된 상황에서 작동할 권한이 있다. 이것은 + 원하지 않는 대상이 클러스터에 합류(join)하는 위협을 + 해결한다. 위의 예에서, 이 단계는 + 파드가 요청된 서비스에 참여할 수 있는지 확인하는 것이다. + +이 두 가지 요구 사항이 충족되는 경우에만, 승인자가 CSR을 승인하고 +그렇지 않으면 CSR을 거부해야 한다. + +## 승인 허가에 대한 경고문 + +CSR을 승인하는 능력은 환경 내에서 누구를 신뢰하는지 결정한다. CSR 승인 +능력은 광범위하거나 가볍게 부여해서는 안된다. 이 권한을 +부여하기 전에 이전 섹션에서 언급한 +요청의 요구 사항과 특정 인증서 발급의 영향을 +완전히 이해해야 한다. + +## 클러스터 관리자를 위한 참고 사항 + +이 가이드에서는 서명자가 인증서 API를 제공하도록 설정되었다고 가정한다. 쿠버네티스 +컨트롤러 관리자는 서명자의 기본 구현을 제공한다. 이를 +활성화하려면 인증 기관(CA)의 키 쌍에 대한 경로와 함께 `--cluster-signing-cert-file` 와 +`--cluster-signing-key-file` 매개 변수를 +컨트롤러 관리자에 전달한다. diff --git a/content/ko/docs/tasks/tools/_index.md b/content/ko/docs/tasks/tools/_index.md index 74abf8d981c6c..990a9fd99b8b4 100755 --- a/content/ko/docs/tasks/tools/_index.md +++ b/content/ko/docs/tasks/tools/_index.md @@ -7,18 +7,19 @@ no_list: true ## kubectl -쿠버네티스 커맨드 라인 도구인 `kubectl` 사용하면 쿠버네티스 클러스터에 대해 명령을 -실행할 수 있다. `kubectl` 을 사용하여 애플리케이션을 배포하고, 클러스터 리소스를 검사 및 -관리하고, 로그를 볼 수 있다. + +쿠버네티스 커맨드 라인 도구인 [`kubectl`](/ko/docs/reference/kubectl/kubectl/)을 사용하면 +쿠버네티스 클러스터에 대해 명령을 실행할 수 있다. +`kubectl` 을 사용하여 애플리케이션을 배포하고, 클러스터 리소스를 검사 및 관리하고, +로그를 볼 수 있다. kubectl 전체 명령어를 포함한 추가 정보는 +[`kubectl` 레퍼런스 문서](/ko/docs/reference/kubectl/)에서 확인할 수 있다. -클러스터에 접근하기 위해 `kubectl` 을 다운로드 및 설치하고 설정하는 방법에 대한 정보는 -[`kubectl` 설치 및 설정](/ko/docs/tasks/tools/install-kubectl/)을 -참고한다. +`kubectl` 은 다양한 리눅스 플랫폼, macOS, 그리고 윈도우에 설치할 수 있다. +각각에 대한 설치 가이드는 다음과 같다. -kubectl 설치 및 설정 가이드 보기 - -[`kubectl` 레퍼런스 문서](/ko/docs/reference/kubectl/)를 -읽어볼 수도 있다. +- [리눅스에 `kubectl` 설치하기](/ko/docs/tasks/tools/install-kubectl-linux/) +- [macOS에 `kubectl` 설치하기](/ko/docs/tasks/tools/install-kubectl-macos/) +- [윈도우에 `kubectl` 설치하기](/ko/docs/tasks/tools/install-kubectl-windows/) ## kind @@ -52,7 +53,7 @@ kind를 시작하고 실행하기 위해 수행해야 하는 작업을 보여준 {{< glossary_tooltip term_id="kubeadm" text="kubeadm" >}} 도구를 사용하여 쿠버네티스 클러스터를 만들고 관리할 수 있다. 사용자 친화적인 방식으로 최소한의 실행 가능하고 안전한 클러스터를 설정하고 실행하는 데 필요한 작업을 수행한다. -[kubeadm 설치](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) 페이지는 kubeadm 설치하는 방법을 보여준다. +[kubeadm 설치](/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) 페이지는 kubeadm 설치하는 방법을 보여준다. 설치가 끝나면, [클러스터 생성](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/)이 가능하다. -kubeadm 설치 가이드 보기 +kubeadm 설치 가이드 보기 diff --git a/content/ko/docs/tasks/tools/included/_index.md b/content/ko/docs/tasks/tools/included/_index.md new file mode 100644 index 0000000000000..4ba9445002d2c --- /dev/null +++ b/content/ko/docs/tasks/tools/included/_index.md @@ -0,0 +1,6 @@ +--- +title: "포함된 도구들" +description: "메인 kubectl-installs-*.md 페이지에 포함될 스니펫." +headless: true +toc_hide: true +--- \ No newline at end of file diff --git a/content/ko/docs/tasks/tools/included/install-kubectl-gcloud.md b/content/ko/docs/tasks/tools/included/install-kubectl-gcloud.md new file mode 100644 index 0000000000000..f3deae981c1b2 --- /dev/null +++ b/content/ko/docs/tasks/tools/included/install-kubectl-gcloud.md @@ -0,0 +1,21 @@ +--- +title: "gcloud kubectl install" +description: "gcloud를 이용하여 kubectl을 설치하는 방법을 각 OS별 탭에 포함하기 위한 스니펫." +headless: true +--- + +Google Cloud SDK를 사용하여 kubectl을 설치할 수 있다. + +1. [Google Cloud SDK](https://cloud.google.com/sdk/)를 설치한다. + +1. `kubectl` 설치 명령을 실행한다. + + ```shell + gcloud components install kubectl + ``` + +1. 설치한 버전이 최신 버전인지 확인한다. + + ```shell + kubectl version --client + ``` \ No newline at end of file diff --git a/content/ko/docs/tasks/tools/included/kubectl-whats-next.md b/content/ko/docs/tasks/tools/included/kubectl-whats-next.md new file mode 100644 index 0000000000000..70532cd2eb3ff --- /dev/null +++ b/content/ko/docs/tasks/tools/included/kubectl-whats-next.md @@ -0,0 +1,12 @@ +--- +title: "다음 단계는 무엇인가?" +description: "kubectl을 설치한 다음 해야 하는 것에 대해 설명한다." +headless: true +--- + +* [Minikube 설치](https://minikube.sigs.k8s.io/docs/start/) +* 클러스터 생성에 대한 자세한 내용은 [시작하기](/ko/docs/setup/)를 참고한다. +* [애플리케이션을 시작하고 노출하는 방법에 대해 배운다.](/ko/docs/tasks/access-application-cluster/service-access-application-cluster/) +* 직접 생성하지 않은 클러스터에 접근해야 하는 경우, + [클러스터 접근 공유 문서](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)를 참고한다. +* [kubectl 레퍼런스 문서](/ko/docs/reference/kubectl/kubectl/) 읽기 diff --git a/content/ko/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md b/content/ko/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md new file mode 100644 index 0000000000000..b9597857bbe03 --- /dev/null +++ b/content/ko/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md @@ -0,0 +1,54 @@ +--- +title: "리눅스에서 bash 자동 완성 사용하기" +description: "리눅스에서 bash 자동 완성을 위한 몇 가지 선택적 구성에 대해 설명한다." +headless: true +--- + +### 소개 + +Bash의 kubectl 자동 완성 스크립트는 `kubectl completion bash` 명령으로 생성할 수 있다. 셸에서 자동 완성 스크립트를 소싱(sourcing)하면 kubectl 자동 완성 기능이 활성화된다. + +그러나, 자동 완성 스크립트는 [**bash-completion**](https://github.com/scop/bash-completion)에 의존하고 있으며, 이 소프트웨어를 먼저 설치해야 한다(`type _init_completion` 을 실행하여 bash-completion이 이미 설치되어 있는지 확인할 수 있음). + +### bash-completion 설치 + +bash-completion은 많은 패키지 관리자에 의해 제공된다([여기](https://github.com/scop/bash-completion#installation) 참고). `apt-get install bash-completion` 또는 `yum install bash-completion` 등으로 설치할 수 있다. + +위의 명령은 bash-completion의 기본 스크립트인 `/usr/share/bash-completion/bash_completion` 을 생성한다. 패키지 관리자에 따라, `~/.bashrc` 파일에서 이 파일을 수동으로 소스(source)해야 한다. + +확인하려면, 셸을 다시 로드하고 `type _init_completion` 을 실행한다. 명령이 성공하면, 이미 설정된 상태이고, 그렇지 않으면 `~/.bashrc` 파일에 다음을 추가한다. + +```bash +source /usr/share/bash-completion/bash_completion +``` + +셸을 다시 로드하고 `type _init_completion` 을 입력하여 bash-completion이 올바르게 설치되었는지 확인한다. + +### kubectl 자동 완성 활성화 + +이제 kubectl 자동 완성 스크립트가 모든 셸 세션에서 제공되도록 해야 한다. 이를 수행할 수 있는 두 가지 방법이 있다. + +- `~/.bashrc` 파일에서 자동 완성 스크립트를 소싱한다. + + ```bash + echo 'source <(kubectl completion bash)' >>~/.bashrc + ``` + +- 자동 완성 스크립트를 `/etc/bash_completion.d` 디렉터리에 추가한다. + + ```bash + kubectl completion bash >/etc/bash_completion.d/kubectl + ``` + +kubectl에 대한 앨리어스(alias)가 있는 경우, 해당 앨리어스로 작업하도록 셸 자동 완성을 확장할 수 있다. + +```bash +echo 'alias k=kubectl' >>~/.bashrc +echo 'complete -F __start_kubectl k' >>~/.bashrc +``` + +{{< note >}} +bash-completion은 `/etc/bash_completion.d` 에 있는 모든 자동 완성 스크립트를 소싱한다. +{{< /note >}} + +두 방법 모두 동일하다. 셸을 다시 로드하면, kubectl 자동 완성 기능이 작동할 것이다. diff --git a/content/ko/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md b/content/ko/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md new file mode 100644 index 0000000000000..7acb5d3621d2b --- /dev/null +++ b/content/ko/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md @@ -0,0 +1,89 @@ +--- +title: "macOS에서 bash 자동 완성 사용하기" +description: "macOS에서 bash 자동 완성을 위한 몇 가지 선택적 구성에 대해 설명한다." +headless: true +--- + +### 소개 + +Bash의 kubectl 자동 완성 스크립트는 `kubectl completion bash` 로 생성할 수 있다. 이 스크립트를 셸에 소싱하면 kubectl 자동 완성이 가능하다. + +그러나 kubectl 자동 완성 스크립트는 미리 [**bash-completion**](https://github.com/scop/bash-completion)을 설치해야 동작한다. + +{{< warning>}} +bash-completion에는 v1과 v2 두 가지 버전이 있다. v1은 Bash 3.2(macOS의 기본 설치 버전) 버전용이고, v2는 Bash 4.1 이상 버전용이다. kubectl 자동 완성 스크립트는 bash-completion v1과 Bash 3.2 버전에서는 **작동하지 않는다**. **bash-completion v2** 와 **Bash 4.1 이상 버전** 이 필요하다. 따라서, macOS에서 kubectl 자동 완성 기능을 올바르게 사용하려면, Bash 4.1 이상을 설치하고 사용해야 한다([*지침*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)). 다음의 내용에서는 Bash 4.1 이상(즉, 모든 Bash 버전 4.1 이상)을 사용한다고 가정한다. +{{< /warning >}} + +### Bash 업그레이드 + +여기의 지침에서는 Bash 4.1 이상을 사용한다고 가정한다. 다음을 실행하여 Bash 버전을 확인할 수 있다. + +```bash +echo $BASH_VERSION +``` + +너무 오래된 버전인 경우, Homebrew를 사용하여 설치/업그레이드할 수 있다. + +```bash +brew install bash +``` + +셸을 다시 로드하고 원하는 버전을 사용 중인지 확인한다. + +```bash +echo $BASH_VERSION $SHELL +``` + +Homebrew는 보통 `/usr/local/bin/bash` 에 설치한다. + +### bash-completion 설치 + +{{< note >}} +언급한 바와 같이, 이 지침에서는 Bash 4.1 이상을 사용한다고 가정한다. 이는 bash-completion v2를 설치한다는 것을 의미한다(Bash 3.2 및 bash-completion v1의 경우, kubectl 자동 완성이 작동하지 않음). +{{< /note >}} + +bash-completion v2가 이미 설치되어 있는지 `type_init_completion` 으로 확인할 수 있다. 그렇지 않은 경우, Homebrew로 설치할 수 있다. + +```bash +brew install bash-completion@2 +``` + +이 명령의 출력에 명시된 바와 같이, `~/.bash_profile` 파일에 다음을 추가한다. + +```bash +export BASH_COMPLETION_COMPAT_DIR="/usr/local/etc/bash_completion.d" +[[ -r "/usr/local/etc/profile.d/bash_completion.sh" ]] && . "/usr/local/etc/profile.d/bash_completion.sh" +``` + +셸을 다시 로드하고 bash-completion v2가 올바르게 설치되었는지 `type _init_completion` 으로 확인한다. + +### kubectl 자동 완성 활성화 + +이제 kubectl 자동 완성 스크립트가 모든 셸 세션에서 제공되도록 해야 한다. 이를 수행하는 방법에는 여러 가지가 있다. + +- 자동 완성 스크립트를 `~/.bash_profile` 파일에서 소싱한다. + + ```bash + echo 'source <(kubectl completion bash)' >>~/.bash_profile + ``` + +- 자동 완성 스크립트를 `/usr/local/etc/bash_completion.d` 디렉터리에 추가한다. + + ```bash + kubectl completion bash >/usr/local/etc/bash_completion.d/kubectl + ``` + +- kubectl에 대한 앨리어스가 있는 경우, 해당 앨리어스로 작업하기 위해 셸 자동 완성을 확장할 수 있다. + + ```bash + echo 'alias k=kubectl' >>~/.bash_profile + echo 'complete -F __start_kubectl k' >>~/.bash_profile + ``` + +- Homebrew로 kubectl을 설치한 경우([여기](/ko/docs/tasks/tools/install-kubectl-macos/#install-with-homebrew-on-macos)의 설명을 참고), kubectl 자동 완성 스크립트가 이미 `/usr/local/etc/bash_completion.d/kubectl` 에 있을 것이다. 이 경우, 아무 것도 할 필요가 없다. + + {{< note >}} + bash-completion v2의 Homebrew 설치는 `BASH_COMPLETION_COMPAT_DIR` 디렉터리의 모든 파일을 소싱하므로, 후자의 두 가지 방법이 적용된다. + {{< /note >}} + +어떤 경우든, 셸을 다시 로드하면, kubectl 자동 완성 기능이 작동할 것이다. \ No newline at end of file diff --git a/content/ko/docs/tasks/tools/included/optional-kubectl-configs-zsh.md b/content/ko/docs/tasks/tools/included/optional-kubectl-configs-zsh.md new file mode 100644 index 0000000000000..e81403300ba98 --- /dev/null +++ b/content/ko/docs/tasks/tools/included/optional-kubectl-configs-zsh.md @@ -0,0 +1,29 @@ +--- +title: "zsh 자동 완성" +description: "zsh 자동 완성을 위한 몇 가지 선택적 구성에 대해 설명한다." +headless: true +--- + +Zsh용 kubectl 자동 완성 스크립트는 `kubectl completion zsh` 명령으로 생성할 수 있다. 셸에서 자동 완성 스크립트를 소싱하면 kubectl 자동 완성 기능이 활성화된다. + +모든 셸 세션에서 사용하려면, `~/.zshrc` 파일에 다음을 추가한다. + +```zsh +source <(kubectl completion zsh) +``` + +kubectl에 대한 앨리어스가 있는 경우, 해당 앨리어스로 작업하도록 셸 자동 완성을 확장할 수 있다. + +```zsh +echo 'alias k=kubectl' >>~/.zshrc +echo 'complete -F __start_kubectl k' >>~/.zshrc +``` + +셸을 다시 로드하면, kubectl 자동 완성 기능이 작동할 것이다. + +`complete:13: command not found: compdef` 와 같은 오류가 발생하면, `~/.zshrc` 파일의 시작 부분에 다음을 추가한다. + +```zsh +autoload -Uz compinit +compinit +``` \ No newline at end of file diff --git a/content/ko/docs/tasks/tools/included/verify-kubectl.md b/content/ko/docs/tasks/tools/included/verify-kubectl.md new file mode 100644 index 0000000000000..b935582b7a447 --- /dev/null +++ b/content/ko/docs/tasks/tools/included/verify-kubectl.md @@ -0,0 +1,34 @@ +--- +title: "kubectl 설치 검증하기" +description: "kubectl을 검증하는 방법에 대해 설명한다." +headless: true +--- + +kubectl이 쿠버네티스 클러스터를 찾아 접근하려면, +[kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh)를 +사용하여 클러스터를 생성하거나 Minikube 클러스터를 성공적으로 배포할 때 자동으로 생성되는 +[kubeconfig 파일](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/)이 +필요하다. +기본적으로, kubectl 구성은 `~/.kube/config` 에 있다. + +클러스터 상태를 가져와서 kubectl이 올바르게 구성되어 있는지 확인한다. + +```shell +kubectl cluster-info +``` + +URL 응답이 표시되면, kubectl이 클러스터에 접근하도록 올바르게 구성된 것이다. + +다음과 비슷한 메시지가 표시되면, kubectl이 올바르게 구성되지 않았거나 쿠버네티스 클러스터에 연결할 수 없다. + +``` +The connection to the server was refused - did you specify the right host or port? +``` + +예를 들어, 랩톱에서 로컬로 쿠버네티스 클러스터를 실행하려면, Minikube와 같은 도구를 먼저 설치한 다음 위에서 언급한 명령을 다시 실행해야 한다. + +kubectl cluster-info가 URL 응답을 반환하지만 클러스터에 접근할 수 없는 경우, 올바르게 구성되었는지 확인하려면 다음을 사용한다. + +```shell +kubectl cluster-info dump +``` \ No newline at end of file diff --git a/content/ko/docs/tasks/tools/install-kubectl-linux.md b/content/ko/docs/tasks/tools/install-kubectl-linux.md new file mode 100644 index 0000000000000..96ab0a4024043 --- /dev/null +++ b/content/ko/docs/tasks/tools/install-kubectl-linux.md @@ -0,0 +1,203 @@ +--- + + +title: 리눅스에 kubectl 설치 및 설정 +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: 리눅스에 kubectl 설치하기 +--- + +## {{% heading "prerequisites" %}} + +클러스터의 마이너(minor) 버전 차이 내에 있는 kubectl 버전을 사용해야 한다. +예를 들어, v1.2 클라이언트는 v1.1, v1.2 및 v1.3의 마스터와 함께 작동해야 한다. +최신 버전의 kubectl을 사용하면 예기치 않은 문제를 피할 수 있다. + +## 리눅스에 kubectl 설치 + +다음과 같은 방법으로 리눅스에 kubectl을 설치할 수 있다. + +- [{{% heading "prerequisites" %}}](#시작하기-전에) +- [리눅스에 kubectl 설치](#리눅스에-kubectl-설치) + - [리눅스에서 curl을 사용하여 kubectl 바이너리 설치]{#install-kubectl-binary-with-curl-on-linux} + - [기본 패키지 관리 도구를 사용하여 설치]{#install-using-native-package-management} + - [다른 패키지 관리 도구를 사용하여 설치]{#install-using-other-package-management} + - [Google Cloud SDK를 사용하여 설치]{#install-on-linux-as-part-of-the-google-cloud-sdk} +- [kubectl 구성 확인](#kubectl-구성-확인) +- [선택적 kubectl 구성](#선택적-kubectl-구성) + - [셸 자동 완성 활성화](#셸-자동-완성-활성화) +- [{{% heading "whatsnext" %}}](#다음-내용) + +### 리눅스에서 curl을 사용하여 kubectl 바이너리 설치 {#install-kubectl-binary-with-curl-on-linux} + +1. 다음 명령으로 최신 릴리스를 다운로드한다. + + ```bash + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + ``` + + {{< note >}} +특정 버전을 다운로드하려면, `$(curl -L -s https://dl.k8s.io/release/stable.txt)` 명령 부분을 특정 버전으로 바꾼다. + +예를 들어, 리눅스에서 버전 {{< param "fullversion" >}}을 다운로드하려면, 다음을 입력한다. + + ```bash + curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl + ``` + {{< /note >}} + +1. 바이너리를 검증한다. (선택 사항) + + kubectl 체크섬(checksum) 파일을 다운로드한다. + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + ``` + + kubectl 바이너리를 체크섬 파일을 통해 검증한다. + + ```bash + echo "$(}} + 동일한 버전의 바이너리와 체크섬을 다운로드한다. + {{< /note >}} + +1. kubectl 설치 + + ```bash + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + ``` + + {{< note >}} + 대상 시스템에 root 접근 권한을 가지고 있지 않더라도, `~/.local/bin` 디렉터리에 kubectl을 설치할 수 있다. + + ```bash + mkdir -p ~/.local/bin/kubectl + mv ./kubectl ~/.local/bin/kubectl + # 그리고 ~/.local/bin/kubectl을 $PATH에 추가 + ``` + + {{< /note >}} + +1. 설치한 버전이 최신인지 확인한다. + + ```bash + kubectl version --client + ``` + +### 기본 패키지 관리 도구를 사용하여 설치 {#install-using-native-package-management} + +{{< tabs name="kubectl_install" >}} +{{% tab name="데비안 기반의 배포판" %}} + +1. `apt` 패키지 색인을 업데이트하고 쿠버네티스 `apt` 리포지터리를 사용하는 데 필요한 패키지들을 설치한다. + + ```shell + sudo apt-get update + sudo apt-get install -y apt-transport-https ca-certificates curl + ``` + +2. 구글 클라우드 공개 사이닝 키를 다운로드한다. + + ```shell + sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg + ``` + +3. 쿠버네티스 `apt` 리포지터리를 추가한다. + + ```shell + echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list + ``` + +4. 새 리포지터리의 `apt` 패키지 색인을 업데이트하고 kubectl을 설치한다. + + ```shell + sudo apt-get update + sudo apt-get install -y kubectl + ``` + +{{% /tab %}} + +{{< tab name="레드햇 기반의 배포판" codelang="bash" >}} +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOF +yum install -y kubectl +{{< /tab >}} +{{< /tabs >}} + +### 다른 패키지 관리 도구를 사용하여 설치 {#install-using-other-package-management} + +{{< tabs name="other_kubectl_install" >}} +{{% tab name="Snap" %}} +[snap](https://snapcraft.io/docs/core/install) 패키지 관리자를 지원하는 Ubuntu 또는 다른 리눅스 배포판을 사용하는 경우, kubectl을 [snap](https://snapcraft.io/) 애플리케이션으로 설치할 수 있다. + +```shell +snap install kubectl --classic + +kubectl version --client +``` + +{{% /tab %}} + +{{% tab name="Homebrew" %}} +리눅스 상에서 [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) 패키지 관리자를 사용한다면, [설치](https://docs.brew.sh/Homebrew-on-Linux#install)를 통해 kubectl을 사용할 수 있다. + +```shell +brew install kubectl + +kubectl version --client +``` + +{{% /tab %}} + +{{< /tabs >}} + +### Google Cloud SDK를 사용하여 설치 {#install-on-linux-as-part-of-the-google-cloud-sdk} + +{{< include "included/install-kubectl-gcloud.md" >}} + +## kubectl 구성 확인 + +{{< include "included/verify-kubectl.md" >}} + +## 선택적 kubectl 구성 + +### 셸 자동 완성 활성화 + +kubectl은 Bash 및 Zsh에 대한 자동 완성 지원을 제공하므로 입력을 위한 타이핑을 많이 절약할 수 있다. + +다음은 Bash 및 Zsh에 대한 자동 완성을 설정하는 절차이다. + +{{< tabs name="kubectl_autocompletion" >}} +{{< tab name="Bash" include="included/optional-kubectl-configs-bash-linux.md" />}} +{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} +{{< /tabs >}} + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/ko/docs/tasks/tools/install-kubectl-macos.md b/content/ko/docs/tasks/tools/install-kubectl-macos.md new file mode 100644 index 0000000000000..1fd37151a67af --- /dev/null +++ b/content/ko/docs/tasks/tools/install-kubectl-macos.md @@ -0,0 +1,160 @@ +--- + + +title: macOS에 kubectl 설치 및 설정 +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: macOS에 kubectl 설치하기 +--- + +## {{% heading "prerequisites" %}} + +클러스터의 마이너(minor) 버전 차이 내에 있는 kubectl 버전을 사용해야 한다. +예를 들어, v1.2 클라이언트는 v1.1, v1.2 및 v1.3의 마스터와 함께 작동해야 한다. +최신 버전의 kubectl을 사용하면 예기치 않은 문제를 피할 수 있다. + +## macOS에 kubectl 설치 + +다음과 같은 방법으로 macOS에 kubectl을 설치할 수 있다. + +- [macOS에서 curl을 사용하여 kubectl 바이너리 설치](#install-kubectl-binary-with-curl-on-macos) +- [macOS에서 Homebrew를 사용하여 설치](#install-with-homebrew-on-macos) +- [macOS에서 Macports를 사용하여 설치](#install-with-macports-on-macos) +- [macOS에서 Google Cloud SDK를 사용하여 설치](#install-on-macos-as-part-of-the-google-cloud-sdk) + +### macOS에서 curl을 사용하여 kubectl 바이너리 설치 {#install-kubectl-binary-with-curl-on-macos} + +1. 최신 릴리스를 다운로드한다. + + ```bash + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" + ``` + + {{< note >}} + 특정 버전을 다운로드하려면, `$(curl -L -s https://dl.k8s.io/release/stable.txt)` 명령 부분을 특정 버전으로 바꾼다. + + 예를 들어, macOS에서 버전 {{< param "fullversion" >}}을 다운로드하려면, 다음을 입력한다. + + ```bash + curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl + ``` + + {{< /note >}} + +1. 바이너리를 검증한다. (선택 사항) + + kubectl 체크섬 파일을 다운로드한다. + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl.sha256" + ``` + + kubectl 바이너리를 체크섬 파일을 통해 검증한다. + + ```bash + echo "$(}} + 동일한 버전의 바이너리와 체크섬을 다운로드한다. + {{< /note >}} + +1. kubectl 바이너리를 실행 가능하게 한다. + + ```bash + chmod +x ./kubectl + ``` + +1. kubectl 바이너리를 시스템 `PATH` 의 파일 위치로 옮긴다. + + ```bash + sudo mv ./kubectl /usr/local/bin/kubectl && \ + sudo chown root: /usr/local/bin/kubectl + ``` + +1. 설치한 버전이 최신 버전인지 확인한다. + + ```bash + kubectl version --client + ``` + +### macOS에서 Homebrew를 사용하여 설치 {#install-with-homebrew-on-macos} + +macOS에서 [Homebrew](https://brew.sh/) 패키지 관리자를 사용하는 경우, Homebrew로 kubectl을 설치할 수 있다. + +1. 설치 명령을 실행한다. + + ```bash + brew install kubectl + ``` + + 또는 + + ```bash + brew install kubernetes-cli + ``` + +1. 설치한 버전이 최신 버전인지 확인한다. + + ```bash + kubectl version --client + ``` + +### macOS에서 Macports를 사용하여 설치 {#install-with-macports-on-macos} + +macOS에서 [Macports](https://macports.org/) 패키지 관리자를 사용하는 경우, Macports로 kubectl을 설치할 수 있다. + +1. 설치 명령을 실행한다. + + ```bash + sudo port selfupdate + sudo port install kubectl + ``` + +1. 설치한 버전이 최신 버전인지 확인한다. + + ```bash + kubectl version --client + ``` + + +### Google Cloud SDK를 사용하여 설치 {#install-on-macos-as-part-of-the-google-cloud-sdk} + +{{< include "included/install-kubectl-gcloud.md" >}} + +## kubectl 구성 확인 + +{{< include "included/verify-kubectl.md" >}} + +## 선택적 kubectl 구성 + +### 셸 자동 완성 활성화 + +kubectl은 Bash 및 Zsh에 대한 자동 완성 지원을 제공하므로 입력을 위한 타이핑을 많이 절약할 수 있다. + +다음은 Bash 및 Zsh에 대한 자동 완성을 설정하는 절차이다. + +{{< tabs name="kubectl_autocompletion" >}} +{{< tab name="Bash" include="included/optional-kubectl-configs-bash-mac.md" />}} +{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} +{{< /tabs >}} + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/ko/docs/tasks/tools/install-kubectl-windows.md b/content/ko/docs/tasks/tools/install-kubectl-windows.md new file mode 100644 index 0000000000000..e1c67af9ce1a6 --- /dev/null +++ b/content/ko/docs/tasks/tools/install-kubectl-windows.md @@ -0,0 +1,179 @@ +--- + + +title: 윈도우에 kubectl 설치 및 설정 +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: 윈도우에 kubectl 설치하기 +--- + +## {{% heading "prerequisites" %}} + +클러스터의 마이너(minor) 버전 차이 내에 있는 kubectl 버전을 사용해야 한다. +예를 들어, v1.2 클라이언트는 v1.1, v1.2 및 v1.3의 마스터와 함께 작동해야 한다. +최신 버전의 kubectl을 사용하면 예기치 않은 문제를 피할 수 있다. + +## 윈도우에 kubectl 설치 + +다음과 같은 방법으로 윈도우에 kubectl을 설치할 수 있다. + +- [윈도우에서 curl을 사용하여 kubectl 바이너리 설치](#install-kubectl-binary-with-curl-on-windows) +- [PSGallery에서 PowerShell로 설치](#install-with-powershell-from-psgallery) +- [Chocolatey 또는 Scoop을 사용하여 윈도우에 설치](#install-on-windows-using-chocolatey-or-scoop) +- [Google Cloud SDK를 사용하여 설치](#install-on-windows-as-part-of-the-google-cloud-sdk) + + +### 윈도우에서 curl을 사용하여 kubectl 바이너리 설치 {#install-kubectl-binary-with-curl-on-windows} + +1. [최신 릴리스 {{< param "fullversion" >}}](https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe)를 다운로드한다. + + 또는 `curl` 을 설치한 경우, 다음 명령을 사용한다. + + ```powershell + curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe + ``` + + {{< note >}} + 최신의 안정 버전(예: 스크립팅을 위한)을 찾으려면, [https://dl.k8s.io/release/stable.txt](https://dl.k8s.io/release/stable.txt)를 참고한다. + {{< /note >}} + +1. 바이너리를 검증한다. (선택 사항) + + kubectl 체크섬 파일을 다운로드한다. + + ```powershell + curl -LO https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256 + ``` + + kubectl 바이너리를 체크섬 파일을 통해 검증한다. + + - 수동으로 `CertUtil` 의 출력과 다운로드한 체크섬 파일을 비교하기 위해서 커맨드 프롬프트를 사용한다. + + ```cmd + CertUtil -hashfile kubectl.exe SHA256 + type kubectl.exe.sha256 + ``` + + - `-eq` 연산자를 통해 `True` 또는 `False` 결과를 얻는 자동 검증을 위해서 PowerShell을 사용한다. + + ```powershell + $($(CertUtil -hashfile .\kubectl.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl.exe.sha256) + ``` + +1. 바이너리를 `PATH` 가 설정된 디렉터리에 추가한다. + +1. `kubectl` 의 버전이 다운로드한 버전과 같은지 확인한다. + + ```cmd + kubectl version --client + ``` + +{{< note >}} +[윈도우용 도커 데스크톱](https://docs.docker.com/docker-for-windows/#kubernetes)은 자체 버전의 `kubectl` 을 `PATH` 에 추가한다. +도커 데스크톱을 이전에 설치한 경우, 도커 데스크톱 설치 프로그램에서 추가한 `PATH` 항목 앞에 `PATH` 항목을 배치하거나 도커 데스크톱의 `kubectl` 을 제거해야 할 수도 있다. +{{< /note >}} + +### PSGallery에서 PowerShell로 설치 {#install-with-powershell-from-psgallery} + +윈도우에서 [Powershell Gallery](https://www.powershellgallery.com/) 패키지 관리자를 사용하는 경우, Powershell로 kubectl을 설치하고 업데이트할 수 있다. + +1. 설치 명령을 실행한다(`DownloadLocation` 을 지정해야 한다). + + ```powershell + Install-Script -Name install-kubectl -Scope CurrentUser -Force + install-kubectl.ps1 [-DownloadLocation ] + ``` + + {{< note >}} + `DownloadLocation` 을 지정하지 않으면, `kubectl` 은 사용자의 `temp` 디렉터리에 설치된다. + {{< /note >}} + + 설치 프로그램은 `$HOME/.kube` 를 생성하고 구성 파일을 작성하도록 지시한다. + +1. 설치한 버전이 최신 버전인지 확인한다. + + ```powershell + kubectl version --client + ``` + +{{< note >}} +설치 업데이트는 1 단계에서 나열한 두 명령을 다시 실행하여 수행한다. +{{< /note >}} + +### Chocolatey 또는 Scoop을 사용하여 윈도우에 설치 {#install-on-windows-using-chocolatey-or-scoop} + +1. 윈도우에 kubectl을 설치하기 위해서 [Chocolatey](https://chocolatey.org) 패키지 관리자나 [Scoop](https://scoop.sh) 커맨드 라인 설치 프로그램을 사용할 수 있다. + + {{< tabs name="kubectl_win_install" >}} + {{% tab name="choco" %}} + ```powershell + choco install kubernetes-cli + ``` + {{% /tab %}} + {{% tab name="scoop" %}} + ```powershell + scoop install kubectl + ``` + {{% /tab %}} + {{< /tabs >}} + + +1. 설치한 버전이 최신 버전인지 확인한다. + + ```powershell + kubectl version --client + ``` + +1. 홈 디렉터리로 이동한다. + + ```powershell + # cmd.exe를 사용한다면, 다음을 실행한다. cd %USERPROFILE% + cd ~ + ``` + +1. `.kube` 디렉터리를 생성한다. + + ```powershell + mkdir .kube + ``` + +1. 금방 생성한 `.kube` 디렉터리로 이동한다. + + ```powershell + cd .kube + ``` + +1. 원격 쿠버네티스 클러스터를 사용하도록 kubectl을 구성한다. + + ```powershell + New-Item config -type file + ``` + +{{< note >}} +메모장과 같은 텍스트 편집기를 선택하여 구성 파일을 편집한다. +{{< /note >}} + +### Google Cloud SDK를 사용하여 설치 {#install-on-windows-as-part-of-the-google-cloud-sdk} + +{{< include "included/install-kubectl-gcloud.md" >}} + +## kubectl 구성 확인 + +{{< include "included/verify-kubectl.md" >}} + +## 선택적 kubectl 구성 + +### 셸 자동 완성 활성화 + +kubectl은 Bash 및 Zsh에 대한 자동 완성 지원을 제공하므로 입력을 위한 타이핑을 많이 절약할 수 있다. + +다음은 Zsh에 대한 자동 완성을 설정하는 절차이다. + +{{< include "included/optional-kubectl-configs-zsh.md" >}} + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} \ No newline at end of file diff --git a/content/ko/docs/tasks/tools/install-kubectl.md b/content/ko/docs/tasks/tools/install-kubectl.md deleted file mode 100644 index 9d80451e1be1b..0000000000000 --- a/content/ko/docs/tasks/tools/install-kubectl.md +++ /dev/null @@ -1,529 +0,0 @@ ---- -title: kubectl 설치 및 설정 -content_type: task -weight: 10 -card: - name: tasks - weight: 20 - title: kubectl 설치 ---- - - -쿠버네티스 커맨드 라인 도구인 [kubectl](/ko/docs/reference/kubectl/kubectl/)을 사용하면, -쿠버네티스 클러스터에 대해 명령을 실행할 수 있다. -kubectl을 사용하여 애플리케이션을 배포하고, 클러스터 리소스를 검사 및 관리하며 -로그를 볼 수 있다. kubectl 작업의 전체 목록에 대해서는, -[kubectl 개요](/ko/docs/reference/kubectl/overview/)를 참고한다. - - -## {{% heading "prerequisites" %}} - -클러스터의 마이너(minor) 버전 차이 내에 있는 kubectl 버전을 사용해야 한다. -예를 들어, v1.2 클라이언트는 v1.1, v1.2 및 v1.3의 마스터와 함께 작동해야 한다. -최신 버전의 kubectl을 사용하면 예기치 않은 문제를 피할 수 있다. - - - -## 리눅스에 kubectl 설치 - -### 리눅스에서 curl을 사용하여 kubectl 바이너리 설치 - -1. 다음 명령으로 최신 릴리스를 다운로드한다. - - ``` - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" - ``` - - 특정 버전을 다운로드하려면, `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` 명령 부분을 특정 버전으로 바꾼다. - - 예를 들어, 리눅스에서 버전 {{< param "fullversion" >}}을 다운로드하려면, 다음을 입력한다. - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl - ``` - -2. kubectl 바이너리를 실행 가능하게 만든다. - - ``` - chmod +x ./kubectl - ``` - -3. 바이너리를 PATH가 설정된 디렉터리로 옮긴다. - - ``` - sudo mv ./kubectl /usr/local/bin/kubectl - ``` -4. 설치한 버전이 최신 버전인지 확인한다. - - ``` - kubectl version --client - ``` - -### 기본 패키지 관리 도구를 사용하여 설치 - -{{< tabs name="kubectl_install" >}} -{{< tab name="Ubuntu, Debian 또는 HypriotOS" codelang="bash" >}} -sudo apt-get update && sudo apt-get install -y apt-transport-https gnupg2 curl -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - -echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list -sudo apt-get update -sudo apt-get install -y kubectl -{{< /tab >}} - -{{< tab name="CentOS, RHEL 또는 Fedora" codelang="bash" >}}cat < /etc/yum.repos.d/kubernetes.repo -[kubernetes] -name=Kubernetes -baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -EOF -yum install -y kubectl -{{< /tab >}} -{{< /tabs >}} - -### 다른 패키지 관리 도구를 사용하여 설치 - -{{< tabs name="other_kubectl_install" >}} -{{% tab name="Snap" %}} -[snap](https://snapcraft.io/docs/core/install) 패키지 관리자를 지원하는 Ubuntu 또는 다른 리눅스 배포판을 사용하는 경우, kubectl을 [snap](https://snapcraft.io/) 애플리케이션으로 설치할 수 있다. - -```shell -snap install kubectl --classic - -kubectl version --client -``` - -{{% /tab %}} - -{{% tab name="Homebrew" %}} -리눅스 상에서 [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) 패키지 관리자를 사용한다면, [설치](https://docs.brew.sh/Homebrew-on-Linux#install)를 통해 kubectl을 사용할 수 있다. - -```shell -brew install kubectl - -kubectl version --client -``` - -{{% /tab %}} - -{{< /tabs >}} - - -## macOS에 kubectl 설치 - -### macOS에서 curl을 사용하여 kubectl 바이너리 설치 - -1. 최신 릴리스를 다운로드한다. - - ```bash - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl" - ``` - - 특정 버전을 다운로드하려면, `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` 명령 부분을 특정 버전으로 바꾼다. - - 예를 들어, macOS에서 버전 {{< param "fullversion" >}}을 다운로드하려면, 다음을 입력한다. - ```bash - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl - ``` - - kubectl 바이너리를 실행 가능하게 만든다. - - ```bash - chmod +x ./kubectl - ``` - -3. 바이너리를 PATH가 설정된 디렉터리로 옮긴다. - - ```bash - sudo mv ./kubectl /usr/local/bin/kubectl - ``` - -4. 설치한 버전이 최신 버전인지 확인한다. - - ```bash - kubectl version --client - ``` - -### macOS에서 Homebrew를 사용하여 설치 - -macOS에서 [Homebrew](https://brew.sh/) 패키지 관리자를 사용하는 경우, Homebrew로 kubectl을 설치할 수 있다. - -1. 설치 명령을 실행한다. - - ```bash - brew install kubectl - ``` - - 또는 - - ```bash - brew install kubernetes-cli - ``` - -2. 설치한 버전이 최신 버전인지 확인한다. - - ```bash - kubectl version --client - ``` - -### macOS에서 Macports를 사용하여 설치 - -macOS에서 [Macports](https://macports.org/) 패키지 관리자를 사용하는 경우, Macports로 kubectl을 설치할 수 있다. - -1. 설치 명령을 실행한다. - - ```bash - sudo port selfupdate - sudo port install kubectl - ``` - -2. 설치한 버전이 최신 버전인지 확인한다. - - ```bash - kubectl version --client - ``` - -## 윈도우에 kubectl 설치 - -### 윈도우에서 curl을 사용하여 kubectl 바이너리 설치 - -1. [이 링크](https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe)에서 최신 릴리스 {{< param "fullversion" >}}을 다운로드한다. - - 또는 `curl` 을 설치한 경우, 다음 명령을 사용한다. - - ```bash - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe - ``` - - 최신의 안정 버전(예: 스크립팅을 위한)을 찾으려면, [https://storage.googleapis.com/kubernetes-release/release/stable.txt](https://storage.googleapis.com/kubernetes-release/release/stable.txt)를 참고한다. - -2. 바이너리를 PATH가 설정된 디렉터리에 추가한다. - -3. `kubectl` 의 버전이 다운로드한 버전과 같은지 확인한다. - - ```bash - kubectl version --client - ``` - -{{< note >}} -[윈도우용 도커 데스크톱](https://docs.docker.com/docker-for-windows/#kubernetes)은 자체 버전의 `kubectl` 을 PATH에 추가한다. -도커 데스크톱을 이전에 설치한 경우, 도커 데스크톱 설치 프로그램에서 추가한 PATH 항목 앞에 PATH 항목을 배치하거나 도커 데스크톱의 `kubectl` 을 제거해야 할 수도 있다. -{{< /note >}} - -### PSGallery에서 Powershell로 설치 - -윈도우에서 [Powershell Gallery](https://www.powershellgallery.com/) 패키지 관리자를 사용하는 경우, Powershell로 kubectl을 설치하고 업데이트할 수 있다. - -1. 설치 명령을 실행한다(`DownloadLocation` 을 지정해야 한다). - - ```powershell - Install-Script -Name install-kubectl -Scope CurrentUser -Force - install-kubectl.ps1 [-DownloadLocation ] - ``` - - {{< note >}} - `DownloadLocation` 을 지정하지 않으면, `kubectl` 은 사용자의 임시 디렉터리에 설치된다. - {{< /note >}} - - 설치 프로그램은 `$HOME/.kube` 를 생성하고 구성 파일을 작성하도록 지시한다. - -2. 설치한 버전이 최신 버전인지 확인한다. - - ```powershell - kubectl version --client - ``` - -{{< note >}} -설치 업데이트는 1 단계에서 나열한 두 명령을 다시 실행하여 수행한다. -{{< /note >}} - -### Chocolatey 또는 Scoop을 사용하여 윈도우에 설치 - -1. 윈도우에 kubectl을 설치하기 위해서 [Chocolatey](https://chocolatey.org) 패키지 관리자나 [Scoop](https://scoop.sh) 커맨드 라인 설치 프로그램을 사용할 수 있다. - - {{< tabs name="kubectl_win_install" >}} - {{% tab name="choco" %}} - ```powershell - choco install kubernetes-cli - ``` - {{% /tab %}} - {{% tab name="scoop" %}} - ```powershell - scoop install kubectl - ``` - {{% /tab %}} - {{< /tabs >}} - - -2. 설치한 버전이 최신 버전인지 확인한다. - - ```powershell - kubectl version --client - ``` - -3. 홈 디렉터리로 이동한다. - - ```powershell - # cmd.exe를 사용한다면, 다음을 실행한다. cd %USERPROFILE% - cd ~ - ``` - -4. `.kube` 디렉터리를 생성한다. - - ```powershell - mkdir .kube - ``` - -5. 금방 생성한 `.kube` 디렉터리로 이동한다. - - ```powershell - cd .kube - ``` - -6. 원격 쿠버네티스 클러스터를 사용하도록 kubectl을 구성한다. - - ```powershell - New-Item config -type file - ``` - -{{< note >}} -메모장과 같은 텍스트 편집기를 선택하여 구성 파일을 편집한다. -{{< /note >}} - -## Google Cloud SDK의 일부로 다운로드 - -kubectl을 Google Cloud SDK의 일부로 설치할 수 있다. - -1. [Google Cloud SDK](https://cloud.google.com/sdk/)를 설치한다. - -2. `kubectl` 설치 명령을 실행한다. - - ```shell - gcloud components install kubectl - ``` - -3. 설치한 버전이 최신 버전인지 확인한다. - - ```shell - kubectl version --client - ``` - -## kubectl 구성 확인 - -kubectl이 쿠버네티스 클러스터를 찾아 접근하려면, -[kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh)를 -사용하여 클러스터를 생성하거나 Minikube 클러스터를 성공적으로 배포할 때 자동으로 생성되는 -[kubeconfig 파일](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/)이 -필요하다. -기본적으로, kubectl 구성은 `~/.kube/config` 에 있다. - -클러스터 상태를 가져와서 kubectl이 올바르게 구성되어 있는지 확인한다. - -```shell -kubectl cluster-info -``` - -URL 응답이 표시되면, kubectl이 클러스터에 접근하도록 올바르게 구성된 것이다. - -다음과 비슷한 메시지가 표시되면, kubectl이 올바르게 구성되지 않았거나 쿠버네티스 클러스터에 연결할 수 없다. - -``` -The connection to the server was refused - did you specify the right host or port? -``` - -예를 들어, 랩톱에서 로컬로 쿠버네티스 클러스터를 실행하려면, Minikube와 같은 도구를 먼저 설치한 다음 위에서 언급한 명령을 다시 실행해야 한다. - -kubectl cluster-info가 URL 응답을 반환하지만 클러스터에 접근할 수 없는 경우, 올바르게 구성되었는지 확인하려면 다음을 사용한다. - -```shell -kubectl cluster-info dump -``` - -## 선택적 kubectl 구성 - -### 셸 자동 완성 활성화 - -kubectl은 Bash 및 Zsh에 대한 자동 완성 지원을 제공하므로 입력을 위한 타이핑을 많이 절약할 수 있다. - -다음은 Bash(리눅스와 macOS의 다른 점 포함) 및 Zsh에 대한 자동 완성을 설정하는 절차이다. - -{{< tabs name="kubectl_autocompletion" >}} - -{{% tab name="리눅스에서의 Bash" %}} - -### 소개 - -Bash의 kubectl 완성 스크립트는 `kubectl completion bash` 명령으로 생성할 수 있다. 셸에서 완성 스크립트를 소싱(sourcing)하면 kubectl 자동 완성 기능이 활성화된다. - -그러나, 완성 스크립트는 [**bash-completion**](https://github.com/scop/bash-completion)에 의존하고 있으며, 이 소프트웨어를 먼저 설치해야 한다(`type _init_completion` 을 실행하여 bash-completion이 이미 설치되어 있는지 확인할 수 있음). - -### bash-completion 설치 - -bash-completion은 많은 패키지 관리자에 의해 제공된다([여기](https://github.com/scop/bash-completion#installation) 참고). `apt-get install bash-completion` 또는 `yum install bash-completion` 등으로 설치할 수 있다. - -위의 명령은 bash-completion의 기본 스크립트인 `/usr/share/bash-completion/bash_completion` 을 생성한다. 패키지 관리자에 따라, `~/.bashrc` 파일에서 이 파일을 수동으로 소스(source)해야 한다. - -확인하려면, 셸을 다시 로드하고 `type _init_completion` 을 실행한다. 명령이 성공하면, 이미 설정된 상태이고, 그렇지 않으면 `~/.bashrc` 파일에 다음을 추가한다. - -```bash -source /usr/share/bash-completion/bash_completion -``` - -셸을 다시 로드하고 `type _init_completion` 을 입력하여 bash-completion이 올바르게 설치되었는지 확인한다. - -### kubectl 자동 완성 활성화 - -이제 kubectl 완성 스크립트가 모든 셸 세션에서 제공되도록 해야 한다. 이를 수행할 수 있는 두 가지 방법이 있다. - -- `~/.bashrc` 파일에서 완성 스크립트를 소싱한다. - - ```bash - echo 'source <(kubectl completion bash)' >>~/.bashrc - ``` -- 완성 스크립트를 `/etc/bash_completion.d` 디렉터리에 추가한다. - - ```bash - kubectl completion bash >/etc/bash_completion.d/kubectl - ``` -kubectl에 대한 앨리어스(alias)가 있는 경우, 해당 앨리어스로 작업하도록 셸 완성을 확장할 수 있다. - -```bash -echo 'alias k=kubectl' >>~/.bashrc -echo 'complete -F __start_kubectl k' >>~/.bashrc -``` - -{{< note >}} -bash-completion은 `/etc/bash_completion.d` 에 있는 모든 완성 스크립트를 소싱한다. -{{< /note >}} - -두 방법 모두 동일하다. 셸을 다시 로드한 후, kubectl 자동 완성 기능이 작동해야 한다. - -{{% /tab %}} - - -{{% tab name="macOS에서의 Bash" %}} - - -### 소개 - -Bash의 kubectl 완성 스크립트는 `kubectl completion bash` 로 생성할 수 있다. 이 스크립트를 셸에 소싱하면 kubectl 완성이 가능하다. - -그러나 kubectl 완성 스크립트는 미리 [**bash-completion**](https://github.com/scop/bash-completion)을 설치해야 동작한다. - -{{< warning>}} -bash-completion에는 v1과 v2 두 가지 버전이 있다. v1은 Bash 3.2(macOS의 기본 설치 버전) 버전용이고, v2는 Bash 4.1 이상 버전용이다. kubectl 완성 스크립트는 bash-completion v1과 Bash 3.2 버전에서는 **작동하지 않는다**. **bash-completion v2** 와 **Bash 4.1 이상 버전** 이 필요하다. 따라서, macOS에서 kubectl 완성 기능을 올바르게 사용하려면, Bash 4.1 이상을 설치하고 사용해야한다([*지침*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)). 다음의 내용에서는 Bash 4.1 이상(즉, 모든 Bash 버전 4.1 이상)을 사용한다고 가정한다. -{{< /warning >}} - -### Bash 업그레이드 - -여기의 지침에서는 Bash 4.1 이상을 사용한다고 가정한다. 다음을 실행하여 Bash 버전을 확인할 수 있다. - -```bash -echo $BASH_VERSION -``` - -너무 오래된 버전인 경우, Homebrew를 사용하여 설치/업그레이드할 수 있다. - -```bash -brew install bash -``` - -셸을 다시 로드하고 원하는 버전을 사용 중인지 확인한다. - -```bash -echo $BASH_VERSION $SHELL -``` - -Homebrew는 보통 `/usr/local/bin/bash` 에 설치한다. - -### bash-completion 설치 - -{{< note >}} -언급한 바와 같이, 이 지침에서는 Bash 4.1 이상을 사용한다고 가정한다. 이는 bash-completion v2를 설치한다는 것을 의미한다(Bash 3.2 및 bash-completion v1의 경우, kubectl 완성이 작동하지 않음). -{{< /note >}} - -bash-completion v2가 이미 설치되어 있는지 `type_init_completion` 으로 확인할 수 있다. 그렇지 않은 경우, Homebrew로 설치할 수 있다. - -```bash -brew install bash-completion@2 -``` - -이 명령의 출력에 명시된 바와 같이, `~/.bash_profile` 파일에 다음을 추가한다. - -```bash -export BASH_COMPLETION_COMPAT_DIR="/usr/local/etc/bash_completion.d" -[[ -r "/usr/local/etc/profile.d/bash_completion.sh" ]] && . "/usr/local/etc/profile.d/bash_completion.sh" -``` - -셸을 다시 로드하고 bash-completion v2가 올바르게 설치되었는지 `type _init_completion` 으로 확인한다. - -### kubectl 자동 완성 활성화 - -이제 kubectl 완성 스크립트가 모든 셸 세션에서 제공되도록 해야 한다. 이를 수행하는 방법에는 여러 가지가 있다. - -- 완성 스크립트를 `~/.bash_profile` 파일에서 소싱한다. - - ```bash - echo 'source <(kubectl completion bash)' >>~/.bash_profile - - ``` - -- 완성 스크립트를 `/usr/local/etc/bash_completion.d` 디렉터리에 추가한다. - - ```bash - kubectl completion bash >/usr/local/etc/bash_completion.d/kubectl - ``` - -- kubectl에 대한 앨리어스가 있는 경우, 해당 앨리어스로 작업하기 위해 셸 완성을 확장할 수 있다. - - ```bash - echo 'alias k=kubectl' >>~/.bash_profile - echo 'complete -F __start_kubectl k' >>~/.bash_profile - ``` - -- Homebrew로 kubectl을 설치한 경우([위](#macos에서-homebrew를-사용하여-설치)의 설명을 참고), kubectl 완성 스크립트는 이미 `/usr/local/etc/bash_completion.d/kubectl` 에 있어야 한다. 이 경우, 아무 것도 할 필요가 없다. - - {{< note >}} - bash-completion v2의 Homebrew 설치는 `BASH_COMPLETION_COMPAT_DIR` 디렉터리의 모든 파일을 소싱하므로, 후자의 두 가지 방법이 적용된다. - {{< /note >}} - -어쨌든, 셸을 다시 로드 한 후에, kubectl 완성이 작동해야 한다. -{{% /tab %}} - -{{% tab name="Zsh" %}} - -Zsh용 kubectl 완성 스크립트는 `kubectl completion zsh` 명령으로 생성할 수 있다. 셸에서 완성 스크립트를 소싱하면 kubectl 자동 완성 기능이 활성화된다. - -모든 셸 세션에서 사용하려면, `~/.zshrc` 파일에 다음을 추가한다. - -```zsh -source <(kubectl completion zsh) -``` - -kubectl에 대한 앨리어스가 있는 경우, 해당 앨리어스로 작업하도록 셸 완성을 확장할 수 있다. - -```zsh -echo 'alias k=kubectl' >>~/.zshrc -echo 'complete -F __start_kubectl k' >>~/.zshrc -``` - -셸을 다시 로드 한 후, kubectl 자동 완성 기능이 작동해야 한다. - -`complete:13: command not found: compdef` 와 같은 오류가 발생하면, `~/.zshrc` 파일의 시작 부분에 다음을 추가한다. - -```zsh -autoload -Uz compinit -compinit -``` -{{% /tab %}} -{{< /tabs >}} - -## {{% heading "whatsnext" %}} - -* [Minikube 설치](https://minikube.sigs.k8s.io/docs/start/) -* 클러스터 생성에 대한 자세한 내용은 [시작하기](/ko/docs/setup/)를 참고한다. -* [애플리케이션을 시작하고 노출하는 방법에 대해 배운다.](/ko/docs/tasks/access-application-cluster/service-access-application-cluster/) -* 직접 생성하지 않은 클러스터에 접근해야하는 경우, - [클러스터 접근 공유 문서](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)를 참고한다. -* [kubectl 레퍼런스 문서](/ko/docs/reference/kubectl/kubectl/) 읽기 diff --git a/content/ko/docs/tutorials/_index.md b/content/ko/docs/tutorials/_index.md index a0af5ff5b517c..8d3fd54010fff 100644 --- a/content/ko/docs/tutorials/_index.md +++ b/content/ko/docs/tutorials/_index.md @@ -27,13 +27,15 @@ content_type: concept ## 구성 +* [예제: Java 마이크로서비스 구성하기](/ko/docs/tutorials/configuration/configure-java-microservice/) + * [컨피그 맵을 사용해서 Redis 설정하기](/ko/docs/tutorials/configuration/configure-redis-using-configmap/) ## 상태 유지를 하지 않는(stateless) 애플리케이션 * [외부 IP 주소를 노출하여 클러스터의 애플리케이션에 접속하기](/ko/docs/tutorials/stateless-application/expose-external-ip-address/) -* [예시: Redis를 사용한 PHP 방명록 애플리케이션 배포하기](/ko/docs/tutorials/stateless-application/guestbook/) +* [예시: MongoDB를 사용한 PHP 방명록 애플리케이션 배포하기](/ko/docs/tutorials/stateless-application/guestbook/) ## 상태 유지가 필요한(stateful) 애플리케이션 diff --git a/content/ko/docs/tutorials/clusters/apparmor.md b/content/ko/docs/tutorials/clusters/apparmor.md index 74008f9961c0f..43b07e293bcde 100644 --- a/content/ko/docs/tutorials/clusters/apparmor.md +++ b/content/ko/docs/tutorials/clusters/apparmor.md @@ -103,7 +103,7 @@ AppArmor를 이용하면 컨테이너가 수행할 수 있는 작업을 제한 AppArmor 지원이 포함된 Kubelet (>= v1.4)이면 어떤 전제 조건이 충족되지 않으면 AppArmor와 함께한 파드를 거부한다. 노드 상에 AppArmor 지원 여부는 -노드 준비 조건 메시지를 확인하여(이후 릴리즈에서는 삭제될 것 같지만) 검증할 수 있다. +노드 준비 조건 메시지를 확인하여(이후 릴리스에서는 삭제될 것 같지만) 검증할 수 있다. ```shell kubectl get nodes -o=jsonpath=$'{range .items[*]}{@.metadata.name}: {.status.conditions[?(@.reason=="KubeletReady")].message}\n{end}' @@ -168,8 +168,7 @@ k8s-apparmor-example-deny-write (enforce) *이 예시는 AppArmor를 지원하는 클러스터를 이미 구성하였다고 가정한다.* -먼저 노드에서 사용하려는 프로파일을 적재해야 한다. 사용할 프로파일은 단순히 -파일 쓰기를 거부할 것이다. +먼저 노드에서 사용하려는 프로파일을 적재해야 한다. 사용할 프로파일은 파일 쓰기를 거부한다. ```shell #include @@ -323,7 +322,7 @@ Events: 23s 23s 1 {kubelet e2e-test-stclair-node-pool-t1f5} Warning AppArmor Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded ``` -파드 상태는 Failed이며 오류메시지는 `Pod Cannot enforce AppArmor: profile +파드 상태는 Pending이며, 오류 메시지는 `Pod Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded`이다. 이벤트도 동일한 메시지로 기록되었다. ## 관리 {#administration} @@ -397,7 +396,7 @@ AppArmor가 일반 사용자 버전이 되면 제거된다. AppArmor는 일반 사용자 버전(general available)으로 준비되면 현재 어노테이션으로 지정되는 옵션은 필드로 변경될 것이다. 모든 업그레이드와 다운그레이드 방법은 전환을 통해 지원하기에는 매우 미묘하니 전환이 필요할 때에 상세히 설명할 것이다. -최소 두 번의 릴리즈에 대해서는 필드와 어노테이션 모두를 지원할 것이고, +최소 두 번의 릴리스에 대해서는 필드와 어노테이션 모두를 지원할 것이고, 그 이후부터는 어노테이션은 명확히 거부된다. ## 프로파일 제작 {#authoring-profiles} diff --git a/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md index 34095ef424379..c1b21d1404e5b 100644 --- a/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -1,22 +1,23 @@ --- -title: 컨피그 맵을 사용해서 Redis 설정하기 + + + +title: 컨피그맵을 사용해서 Redis 설정하기 content_type: tutorial --- -이 페이지에서는 컨피그 맵을 사용해서 Redis를 설정하는 방법에 대한 실세계 예제를 제공하고, [컨피그 맵을 사용해서 컨테이너 설정하기](/docs/tasks/configure-pod-container/configure-pod-configmap/) 태스크로 빌드를 한다. +이 페이지에서는 컨피그맵(ConfigMap)을 사용해서 Redis를 설정하는 방법에 대한 실세계 예제를 제공하고, [컨피그맵을 사용해서 컨테이너 설정하기](/docs/tasks/configure-pod-container/configure-pod-configmap/) 태스크로 빌드를 한다. ## {{% heading "objectives" %}} -* 다음을 포함하는 `kustomization.yaml` 파일을 생성한다. - * 컨피그 맵 생성자 - * 컨피그 맵을 사용하는 파드 리소스 -* `kubectl apply -k ./`를 실행하여 작업한 디렉터리를 적용한다. -* 구성이 잘 적용되었는지 확인한다. +* Redis 설정값으로 컨피그맵을 생성한다. +* 생성된 컨피그맵을 마운트하고 사용하는 Redis 파드를 생성한다. +* 설정이 잘 적용되었는지 확인한다. @@ -26,91 +27,227 @@ content_type: tutorial {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * 예시는 `kubectl` 1.14 이상 버전에서 동작한다. -* [컨피그 맵을 사용해서 컨테이너 설정하기](/docs/tasks/configure-pod-container/configure-pod-configmap/)를 이해한다. +* [컨피그맵을 사용해서 컨테이너 설정하기](/docs/tasks/configure-pod-container/configure-pod-configmap/)를 이해한다. -## 실세상 예제: 컨피그 맵을 사용해서 Redis 설정하기 - -아래의 단계를 통해서 컨피그 맵에 저장된 데이터를 사용해서 Redis 캐시를 설정할 수 있다. +## 실세상 예제: 컨피그맵을 사용해서 Redis 설정하기 -첫째, `redis-config` 파일에서 컨피그 맵을 포함한 `kustomization.yaml`를 생성한다. +아래 단계를 통해서, 컨피그맵에 저장된 데이터를 사용하는 Redis 캐시를 설정한다. -{{< codenew file="pods/config/redis-config" >}} +우선, 비어 있는 설정으로 컨피그맵을 생성한다. ```shell -curl -OL https://k8s.io/examples/pods/config/redis-config - -cat <./kustomization.yaml -configMapGenerator: -- name: example-redis-config - files: - - redis-config +cat <./example-redis-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-redis-config +data: + redis-config: "" EOF ``` -`kustomization.yaml`에 파드 리소스 구성을 추가한다. +위에서 생성한 컨피그맵을 Redis 파드 매니페스트와 함께 적용한다. + +```shell +kubectl apply -f example-redis-config.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +``` + +Redis 파드 매니페스트의 내용을 검토하고 다음의 사항을 염두에 둔다. + +* `config` 라는 이름의 볼륨은 `spec.volumes[1]` 에 의해서 생성된다. +* `spec.volumes[1].items[0]` 내부의 `key` 와 `path` 는 `config` 볼륨에 `redis.conf` 라는 파일명으로 지정된 + `example-redis-config` 컨피그맵의 `redis-config` 키를 노출시킨다. +* 그리고 `config` 볼륨은 `spec.containers[0].volumeMounts[1]` 에 의해서 `/redis-master` 에 마운트된다. + +이 내용은 위의 `example-redis-config` 컨피그맵의 `data.redis-config` 내부 데이터를 파드 안에 있는 +`/redis-master/redis.conf` 파일의 내용으로 노출시키는 순효과(net effect)를 낸다. {{< codenew file="pods/config/redis-pod.yaml" >}} +생성된 오브젝트를 확인한다. + ```shell -curl -OL https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl get pod/redis configmap/example-redis-config +``` -cat <>./kustomization.yaml -resources: -- redis-pod.yaml -EOF +다음의 결과를 볼 수 있다. + +```shell +NAME READY STATUS RESTARTS AGE +pod/redis 1/1 Running 0 8s + +NAME DATA AGE +configmap/example-redis-config 1 14s ``` -컨피그 맵과 파드 오브젝트를 생성하도록 kustomization 디렉터리를 적용한다. +`example-redis-config` 컨피그맵의 `redis-config` 키를 공란으로 둔 것을 기억하자. ```shell -kubectl apply -k . +kubectl describe configmap/example-redis-config ``` -생성된 오브젝트를 확인한다. +`redis-config` 키가 비어 있는 것을 확인할 수 있다. + ```shell -> kubectl get -k . -NAME DATA AGE -configmap/example-redis-config-dgh9dg555m 1 52s +Name: example-redis-config +Namespace: default +Labels: +Annotations: + +Data +==== +redis-config: +``` -NAME READY STATUS RESTARTS AGE -pod/redis 1/1 Running 0 52s +`kubectl exec` 를 사용하여 파드에 접속하고, 현재 설정 확인을 위해서 `redis-cli` 도구를 실행한다. + +```shell +kubectl exec -it redis -- redis-cli ``` -이 예제에서는 설정 볼륨이 `/redis-master`에 마운트되어 있다. -`redis-config` 키를 `redis.conf`라는 이름의 파일에 추가하기 위해 `path`를 사용한다. -따라서, Redis 설정을 위한 파일 경로는 `/redis-master/redis.conf`이다. -이곳이 이미지가 Redis 마스터를 위한 설정 파일을 찾는 곳이다. +`maxmemory` 를 확인한다. -설정이 올바르게 적용되었는지 확인하기 위해서, -`kubectl exec`를 사용해 파드 속에서 `redis-cli` 툴을 실행해 본다. +```shell +127.0.0.1:6379> CONFIG GET maxmemory +``` + +기본값인 0을 볼 수 있을 것이다. + +```shell +1) "maxmemory" +2) "0" +``` + +유사하게, `maxmemory-policy` 를 확인한다. + +```shell +127.0.0.1:6379> CONFIG GET maxmemory-policy +``` + +이것도 기본값인 `noeviction` 을 보여줄 것이다. + +```shell +1) "maxmemory-policy" +2) "noeviction" +``` + +이제 `example-redis-config` 컨피그맵에 몇 가지 설정값을 추가해 본다. + +{{< codenew file="pods/config/example-redis-config.yaml" >}} + +갱신된 컨피그맵을 적용한다. + +```shell +kubectl apply -f example-redis-config.yaml +``` + +컨피그맵이 갱신된 것을 확인한다. + +```shell +kubectl describe configmap/example-redis-config +``` + +방금 추가한 설정값을 확인할 수 있을 것이다. + +```shell +Name: example-redis-config +Namespace: default +Labels: +Annotations: + +Data +==== +redis-config: +---- +maxmemory 2mb +maxmemory-policy allkeys-lru +``` + +설정이 적용되었는지 확인하려면, `kubectl exec` 를 통한 `redis-cli` 로 Redis 파드를 다시 확인한다. ```shell kubectl exec -it redis -- redis-cli +``` + +`maxmemory` 를 확인한다. + +```shell 127.0.0.1:6379> CONFIG GET maxmemory +``` + +기본값인 0을 볼 수 있을 것이다. + +```shell 1) "maxmemory" -2) "2097152" +2) "0" +``` + +유사하게, `maxmemory-policy` 도 기본 설정인 `noeviction` 을 보여줄 것이다. + +```shell 127.0.0.1:6379> CONFIG GET maxmemory-policy +``` + +위의 명령은 다음을 반환한다. + +```shell 1) "maxmemory-policy" -2) "allkeys-lru" +2) "noeviction" ``` -생성된 파드를 삭제한다. +파드는 연관된 컨피그맵에서 갱신된 값을 인지하기 위해서 재시작이 필요하므로 +해당 설정값이 변경되지 않은 상태이다. 파드를 삭제하고 다시 생성한다. + ```shell kubectl delete pod redis +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +``` + +이제 마지막으로 설정값을 다시 확인해 본다. + +```shell +kubectl exec -it redis -- redis-cli ``` +`maxmemory` 를 확인한다. +```shell +127.0.0.1:6379> CONFIG GET maxmemory +``` -## {{% heading "whatsnext" %}} +이것은 이제 갱신된 값인 2097152를 반환한다. + +```shell +1) "maxmemory" +2) "2097152" +``` +유사하게, `maxmemory-policy` 도 갱신되어 있다. -* [컨피그 맵](/docs/tasks/configure-pod-container/configure-pod-configmap/) 배우기. +```shell +127.0.0.1:6379> CONFIG GET maxmemory-policy +``` +이것은 원하는 값인 `allkeys-lru` 를 반환한다. +```shell +1) "maxmemory-policy" +2) "allkeys-lru" +``` + +생성된 자원을 삭제하여 작업을 정리한다. + +```shell +kubectl delete pod/redis configmap/example-redis-config +``` + +## {{% heading "whatsnext" %}} +* [컨피그맵](/docs/tasks/configure-pod-container/configure-pod-configmap/) 배우기. diff --git a/content/ko/docs/tutorials/hello-minikube.md b/content/ko/docs/tutorials/hello-minikube.md index 8f3f515f3193d..e5831046dfb46 100644 --- a/content/ko/docs/tutorials/hello-minikube.md +++ b/content/ko/docs/tutorials/hello-minikube.md @@ -48,7 +48,7 @@ Katacode는 무료로 브라우저에서 쿠버네티스 환경을 제공한다. {{< kat-button >}} {{< note >}} - minikube를 로컬에 설치했다면 `minikube start`를 실행한다. + minikube를 로컬에 설치했다면 `minikube start`를 실행한다. `minikube dashboard` 명령을 실행하기 전에, 새 터미널을 열고, 그 터미널에서 `minikube dashboard` 명령을 실행한 후, 원래의 터미널로 돌아온다. {{< /note >}} 2. 브라우저에서 쿠버네티스 대시보드를 열어보자. @@ -61,6 +61,22 @@ Katacode는 무료로 브라우저에서 쿠버네티스 환경을 제공한다. 4. Katacoda 환경에서는: 30000 을 입력하고 **Display Port** 를 클릭. +{{< note >}} +`minikube dashboard` 명령을 내리면 대시보드 애드온과 프록시가 활성화되고 해당 프록시로 접속하는 기본 웹 브라우저 창이 열린다. 대시보드에서 디플로이먼트나 서비스와 같은 쿠버네티스 자원을 생성할 수 있다. + +root 환경에서 명령어를 실행하고 있다면, [URL을 이용하여 대시보드 접속하기](#open-dashboard-with-url)를 참고한다. + +`Ctrl+C` 를 눌러 프록시를 종료할 수 있다. 대시보드는 종료되지 않고 실행 상태로 남아 있다. +{{< /note >}} + +## URL을 이용하여 대시보드 접속하기 {#open-dashboard-with-url} + +자동으로 웹 브라우저가 열리는 것을 원치 않는다면, 다음과 같은 명령어를 실행하여 대시보드 접속 URL을 출력할 수 있다: + +```shell +minikube dashboard --url +``` + ## 디플로이먼트 만들기 쿠버네티스 [*파드*](/ko/docs/concepts/workloads/pods/)는 관리와 @@ -138,7 +154,7 @@ Katacode는 무료로 브라우저에서 쿠버네티스 환경을 제공한다. `k8s.gcr.io/echoserver` 이미지 내의 애플리케이션 코드는 TCP 포트 8080에서만 수신한다. `kubectl expose`를 사용하여 다른 포트를 노출한 경우, 클라이언트는 다른 포트에 연결할 수 없다. -2. 방금 생성한 서비스 살펴보기 +2. 생성한 서비스 살펴보기 ```shell kubectl get services @@ -213,7 +229,7 @@ minikube 툴은 활성화하거나 비활성화할 수 있고 로컬 쿠버네 metrics-server was successfully enabled ``` -3. 방금 생성한 파드와 서비스를 확인한다. +3. 생성한 파드와 서비스를 확인한다. ```shell kubectl get pod,svc -n kube-system diff --git a/content/ko/docs/tutorials/kubernetes-basics/_index.html b/content/ko/docs/tutorials/kubernetes-basics/_index.html index 4be0e942317dd..f1ab5935819c5 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/_index.html +++ b/content/ko/docs/tutorials/kubernetes-basics/_index.html @@ -41,7 +41,7 @@

    쿠버네티스 기초

    쿠버네티스가 어떤 도움이 될까?

    -

    오늘날의 웹서비스에 대해서, 사용자는 애플리케이션이 24/7 가용하기를 바라고, 개발자는 하루에도 몇 번이고 새로운 버전의 애플리케이션을 배포하기를 바란다. 컨테이너화를 통해 소프트웨어를 패키지하면 애플리케이션을 다운타임 없이 쉽고 빠르게 릴리스 및 업데이트할 수 있게 되어서 이런 목표를 달성하는데 도움이 된다. 쿠버네티스는 이렇게 컨테이너화된 애플리케이션을 원하는 곳 어디에든 또 언제든 구동시킬 수 있다는 확신을 갖는데 도움을 주며, 그 애플리케이션이 작동하는데 필요한 자원과 도구를 찾는 것을 도와준다. 쿠버네티스는 구글의 컨테이너 오케스트레이션 부문의 축적된 경험으로 설계되고 커뮤니티로부터 도출된 최고의 아이디어가 결합된 운영 수준의 오픈 소스 플랫폼이다.

    +

    오늘날의 웹서비스에 대해서, 사용자는 애플리케이션이 24/7 가용하기를 바라고, 개발자는 하루에도 몇 번이고 새로운 버전의 애플리케이션을 배포하기를 바란다. 컨테이너화를 통해 소프트웨어를 패키지하면 애플리케이션을 다운타임 없이 릴리스 및 업데이트할 수 있게 되어서 이런 목표를 달성하는데 도움이 된다. 쿠버네티스는 이렇게 컨테이너화된 애플리케이션을 원하는 곳 어디에든 또 언제든 구동시킬 수 있다는 확신을 갖는데 도움을 주며, 그 애플리케이션이 작동하는데 필요한 자원과 도구를 찾는 것을 도와준다. 쿠버네티스는 구글의 컨테이너 오케스트레이션 부문의 축적된 경험으로 설계되고 커뮤니티로부터 도출된 최고의 아이디어가 결합된 운영 수준의 오픈 소스 플랫폼이다.

    diff --git a/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index f51d68e866305..da8cce3e17de7 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -33,7 +33,7 @@

    쿠버네티스 클러스터

    쿠버네티스 클러스터는 두 가지 형태의 자원으로 구성된다.

      -
    • 마스터는 클러스터를 조율한다.
    • +
    • 컨트롤 플레인은 클러스터를 조율한다.
    • 노드는 애플리케이션을 구동하는 작업자(worker)이다.

    @@ -71,20 +71,20 @@

    클러스터 다이어그램

    -

    마스터는 클러스터 관리를 담당한다. 마스터는 애플리케이션을 스케줄링하거나, 애플리케이션의 항상성을 유지하거나, 애플리케이션을 스케일링하고, 새로운 변경사항을 순서대로 반영(rolling out)하는 일과 같은 클러스터 내 모든 활동을 조율한다.

    -

    노드는 쿠버네티스 클러스터 내 워커 머신으로 동작하는 VM 또는 물리적인 컴퓨터다. 각 노드는 노드를 관리하고 쿠버네티스 마스터와 통신하는 Kubelet이라는 에이전트를 갖는다. 노드는 컨테이너 운영을 담당하는 containerd 또는 도커와 같은 툴도 갖는다. 운영 트래픽을 처리하는 쿠버네티스 클러스터는 최소 세 대의 노드를 가져야 한다.

    +

    컨트롤 플레인은 클러스터 관리를 담당한다. 컨트롤 플레인은 애플리케이션을 스케줄링하거나, 애플리케이션의 항상성을 유지하거나, 애플리케이션을 스케일링하고, 새로운 변경사항을 순서대로 반영(rolling out)하는 일과 같은 클러스터 내 모든 활동을 조율한다.

    +

    노드는 쿠버네티스 클러스터 내 워커 머신으로 동작하는 VM 또는 물리적인 컴퓨터다. 각 노드는 노드를 관리하고 쿠버네티스 컨트롤 플레인과 통신하는 Kubelet이라는 에이전트를 갖는다. 노드는 컨테이너 운영을 담당하는 containerd 또는 도커와 같은 툴도 갖는다. 운영 트래픽을 처리하는 쿠버네티스 클러스터는 최소 세 대의 노드를 가져야 한다.

    -

    마스터는 실행 중인 애플리케이션을 호스팅하기 위해 사용되는 노드와 클러스터를 관리한다.

    +

    컨트롤 플레인은 실행 중인 애플리케이션을 호스팅하기 위해 사용되는 노드와 클러스터를 관리한다.

    -

    애플리케이션을 쿠버네티스에 배포하기 위해서는, 마스터에 애플리케이션 컨테이너의 구동을 지시하면 된다. 그러면 마스터는 컨테이너를 클러스터의 어느 노드에 구동시킬지 스케줄한다. 노드는 마스터가 제공하는 쿠버네티스 API를 통해서 마스터와 통신한다. 최종 사용자도 쿠버네티스 API를 사용해서 클러스터와 직접 상호작용(interact)할 수 있다.

    +

    애플리케이션을 쿠버네티스에 배포하기 위해서는, 컨트롤 플레인에 애플리케이션 컨테이너의 구동을 지시하면 된다. 그러면 컨트롤 플레인은 컨테이너를 클러스터의 어느 노드에 구동시킬지 스케줄한다. 노드는 컨트롤 플레인이 제공하는 쿠버네티스 API를 통해서 컨트롤 플레인과 통신한다. 최종 사용자도 쿠버네티스 API를 사용해서 클러스터와 직접 상호작용(interact)할 수 있다.

    쿠버네티스 클러스터는 물리 및 가상 머신 모두에 설치될 수 있다. 쿠버네티스 개발을 시작하려면 Minikube를 사용할 수 있다. Minikube는 가벼운 쿠버네티스 구현체이며, 로컬 머신에 VM을 만들고 하나의 노드로 구성된 간단한 클러스터를 생성한다. Minikube는 리눅스, 맥, 그리고 윈도우 시스템에서 구동이 가능하다. Minikube CLI는 클러스터에 대해 시작, 중지, 상태 조회 및 삭제 등의 기본적인 부트스트래핑(bootstrapping) 기능을 제공한다. 하지만, 본 튜토리얼에서는 Minikube가 미리 설치된 채로 제공되는 온라인 터미널을 사용할 것이다.

    diff --git a/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 5b41fe207a4fd..4c250c127271f 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -31,7 +31,7 @@

    쿠버네티스 디플로이먼트

    일단 쿠버네티스 클러스터를 구동시키면, 그 위에 컨테이너화된 애플리케이션을 배포할 수 있다. 그러기 위해서, 쿠버네티스 디플로이먼트 설정을 만들어야 한다. 디플로이먼트는 쿠버네티스가 애플리케이션의 인스턴스를 어떻게 생성하고 업데이트해야 하는지를 지시한다. 디플로이먼트가 만들어지면, - 쿠버네티스 마스터가 해당 디플로이먼트에 포함된 애플리케이션 인스턴스가 클러스터의 개별 노드에서 실행되도록 스케줄한다. + 쿠버네티스 컨트롤 플레인이 해당 디플로이먼트에 포함된 애플리케이션 인스턴스가 클러스터의 개별 노드에서 실행되도록 스케줄한다.

    애플리케이션 인스턴스가 생성되면, 쿠버네티스 디플로이먼트 컨트롤러는 지속적으로 이들 인스턴스를 diff --git a/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html index ebc880dbddd35..aac6298a7add5 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -37,7 +37,7 @@

    쿠버네티스 서비스들에 대한 개요

  • ClusterIP (기본값) - 클러스터 내에서 내부 IP 에 대해 서비스를 노출해준다. 이 방식은 오직 클러스터 내에서만 서비스가 접근될 수 있도록 해준다.
  • NodePort - NAT가 이용되는 클러스터 내에서 각각 선택된 노드들의 동일한 포트에 서비스를 노출시켜준다. <NodeIP>:<NodePort>를 이용하여 클러스터 외부로부터 서비스가 접근할 수 있도록 해준다. ClusterIP의 상위 집합이다.
  • LoadBalancer - (지원 가능한 경우) 기존 클라우드에서 외부용 로드밸런서를 생성하고 서비스에 고정된 공인 IP를 할당해준다. NodePort의 상위 집합이다.
  • -
  • ExternalName - 이름으로 CNAME 레코드를 반환함으로써 임의의 이름(스펙에서 externalName으로 명시)을 이용하여 서비스를 노출시켜준다. 프록시는 사용되지 않는다. 이 방식은 kube-dns 버전 1.7 이상에서 지원 가능하다.
  • +
  • ExternalName - CNAME 레코드 및 값을 반환함으로써 서비스를 externalName 필드의 내용(예를 들면, `foo.bar.example.com`)에 매핑한다. 어떠한 종류의 프록시도 설정되지 않는다. 이 방식은 kube-dns v1.7 이상 또는 CoreDNS 버전 0.0.8 이상을 필요로 한다.
  • 다른 서비스 타입들에 대한 추가 정보는 소스 IP 이용하기 튜토리얼에서 확인 가능하다. 또한 서비스들로 애플리케이션에 접속하기도 참고해 보자.

    부가적으로, spec에 selector를 정의하지 않고 말아넣은 서비스들의 몇 가지 유즈케이스들이 있음을 주의하자. selector 없이 생성된 서비스는 상응하는 엔드포인트 오브젝트들 또한 생성하지 않는다. 이로써 사용자들로 하여금 하나의 서비스를 특정한 엔드포인트에 매핑 시킬수 있도록 해준다. selector를 생략하게 되는 또 다른 가능성은 여러분이 type: ExternalName을 이용하겠다고 확고하게 의도하는 경우이다.

    @@ -64,12 +64,6 @@

    서비스와 레이블

    -
    -
    -

    -
    -
    -

    서비스는 파드 셋에 걸쳐서 트래픽을 라우트한다. 여러분의 애플리케이션에 영향을 주지 않으면서 쿠버네티스에서 파드들이 죽게도 하고, 복제가 되게도 해주는 추상적 개념이다. 종속적인 파드들 사이에서의 디스커버리와 라우팅은 (하나의 애플리케이션에서 프로트엔드와 백엔드 컴포넌트와 같은) 쿠버네티스 서비스들에 의해 처리된다.

    diff --git a/content/ko/docs/tutorials/services/source-ip.md b/content/ko/docs/tutorials/services/source-ip.md index 9d47599589959..6874d6669b858 100644 --- a/content/ko/docs/tutorials/services/source-ip.md +++ b/content/ko/docs/tutorials/services/source-ip.md @@ -412,7 +412,7 @@ client_address=198.51.100.79 HTTP [Forwarded](https://tools.ietf.org/html/rfc7239#section-5.2) 또는 [X-FORWARDED-FOR](https://en.wikipedia.org/wiki/X-Forwarded-For) 헤더 또는 -[프록시 프로토콜](https://www.haproxy.org/download/1.5/doc/proxy-protocol.txt)과 +[프록시 프로토콜](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt)과 같은 로드밸런서와 백엔드 간에 합의된 프로토콜을 사용해야 한다. 두 번째 범주의 로드밸런서는 서비스의 `service.spec.healthCheckNodePort` 필드의 저장된 포트를 가르키는 HTTP 헬스 체크를 생성하여 @@ -426,7 +426,7 @@ HTTP 헬스 체크를 생성하여 서비스를 삭제한다. ```shell -kubectl delete svc -l run=source-ip-app +kubectl delete svc -l app=source-ip-app ``` 디플로이먼트, 레플리카셋 그리고 파드를 삭제한다. diff --git a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md index 35eb5407246fd..8b0a258ae6246 100644 --- a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md @@ -434,7 +434,7 @@ web-4 0/1 ContainerCreating 0 0s web-4 1/1 Running 0 19s ``` -스테이트풀셋 컨트롤러는 레플리카개수를 스케일링한다. +스테이트풀셋 컨트롤러는 레플리카 개수를 스케일링한다. [스테이트풀셋 생성](#차례대로-파드-생성하기)으로 스테이트풀셋 컨트롤러는 각 파드을 순차적으로 각 순번에 따라 생성하고 후속 파드 시작 전에 이전 파드가 Running과 Ready 상태가 될 때까지 @@ -921,7 +921,7 @@ web-2 0/1 Terminating 0 3m `web` 스테이트풀셋이 다시 생성될 때 먼저 `web-0` 시작한다. `web-1`은 이미 Running과 Ready 상태이므로 `web-0`이 Running과 Ready 상태로 -전환될 때는 단순히 이 파드에 적용됬다. 스테이트풀셋에`replicas`를 2로 하고 +전환될 때는 이 파드에 적용됐다. 스테이트풀셋에 `replicas`를 2로 하고 `web-0`을 재생성했다면 `web-1`이 이미 Running과 Ready 상태이고, `web-2`은 종료되었을 것이다. @@ -932,6 +932,7 @@ web-2 0/1 Terminating 0 3m ```shell for i in 0 1; do kubectl exec -i -t "web-$i" -- curl http://localhost/; done ``` + ``` web-0 web-1 @@ -957,6 +958,7 @@ kubectl get pods -w -l app=nginx ```shell kubectl delete statefulset web ``` + ``` statefulset.apps "web" deleted ``` @@ -966,6 +968,7 @@ statefulset.apps "web" deleted ```shell kubectl get pods -w -l app=nginx ``` + ``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 11m @@ -997,6 +1000,7 @@ web-1 0/1 Terminating 0 29m ```shell kubectl delete service nginx ``` + ``` service "nginx" deleted ``` @@ -1006,6 +1010,7 @@ service "nginx" deleted ```shell kubectl apply -f web.yaml ``` + ``` service/nginx created statefulset.apps/web created @@ -1017,6 +1022,7 @@ statefulset.apps/web created ```shell for i in 0 1; do kubectl exec -i -t "web-$i" -- curl http://localhost/; done ``` + ``` web-0 web-1 @@ -1031,13 +1037,16 @@ web-1 ```shell kubectl delete service nginx ``` + ``` service "nginx" deleted ``` + 그리고 `web` 스테이트풀셋을 삭제한다. ```shell kubectl delete statefulset web ``` + ``` statefulset "web" deleted ``` @@ -1058,9 +1067,10 @@ statefulset "web" deleted ### Parallel 파드 관리 `Parallel` 파드 관리는 스테이트풀셋 컨트롤러가 모든 파드를 -병렬로 시작하고 종료하는 것으로 다른 파드를 시작/종료하기 전에 +병렬로 시작하고 종료하는 것으로, 다른 파드를 시작/종료하기 전에 파드가 Running과 Ready 상태로 전환되거나 완전히 종료되기까지 기다리지 않음을 뜻한다. +이 옵션은 스케일링 동작에만 영향을 미치며, 업데이트 동작에는 영향을 미치지 않는다. {{< codenew file="application/web/web-parallel.yaml" >}} @@ -1105,7 +1115,7 @@ web-1 1/1 Running 0 10s 스테이트풀셋 컨트롤러는 `web-0`와 `web-1`를 둘 다 동시에 시작했다. 두 번째 터미널을 열어 놓고 다른 터미널창에서 스테이트풀셋을 -스케일링 하자. +스케일링하자. ```shell kubectl scale statefulset/web --replicas=4 diff --git a/content/ko/docs/tutorials/stateful-application/cassandra.md b/content/ko/docs/tutorials/stateful-application/cassandra.md index 8273f3bcd9fbb..7b1888a15e5df 100644 --- a/content/ko/docs/tutorials/stateful-application/cassandra.md +++ b/content/ko/docs/tutorials/stateful-application/cassandra.md @@ -114,7 +114,7 @@ cassandra ClusterIP None 9042/TCP 45s kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml ``` -클러스터에 맞게 `cassandra-statefulset.yaml` 를 수정해야 하는 경우 다음을 다운로드 한 다음 +클러스터에 맞게 `cassandra-statefulset.yaml` 를 수정해야 하는 경우 다음을 다운로드한 다음 수정된 버전을 저장한 폴더에서 해당 매니페스트를 적용한다. https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml ```shell @@ -270,7 +270,7 @@ kubectl apply -f cassandra-statefulset.yaml 기반하였고 OpenJDK 8을 포함한다. 이 이미지는 아파치 데비안 리포의 표준 카산드라 설치본을 포함한다. -환경변수를 이용하여 `cassandra.yaml`에 삽입된 값을 바꿀 수 있다. +환경 변수를 이용하여 `cassandra.yaml`에 삽입된 값을 바꿀 수 있다. | 환경 변수 | 기본값 | | ------------- |:-------------: | diff --git a/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md b/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md index 5c27b55183d4f..4c5f690d70307 100644 --- a/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md +++ b/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md @@ -91,7 +91,7 @@ EOF ## MySQL과 WordPress에 필요한 리소스 구성 추가하기 -다음 매니페스트는 MySQL 디플로이먼트 단일 인스턴스를 기술한다. MySQL 컨케이너는 퍼시스턴트볼륨을 /var/lib/mysql에 마운트한다. `MYSQL_ROOT_PASSWORD` 환경변수는 시크릿에서 가져와 데이터베이스 암호로 설정한다. +다음 매니페스트는 MySQL 디플로이먼트 단일 인스턴스를 기술한다. MySQL 컨케이너는 퍼시스턴트볼륨을 /var/lib/mysql에 마운트한다. `MYSQL_ROOT_PASSWORD` 환경 변수는 시크릿에서 가져와 데이터베이스 암호로 설정한다. {{< codenew file="application/wordpress/mysql-deployment.yaml" >}} diff --git a/content/ko/docs/tutorials/stateful-application/zookeeper.md b/content/ko/docs/tutorials/stateful-application/zookeeper.md index b1e6dbe5238b7..3fca0a6749a4e 100644 --- a/content/ko/docs/tutorials/stateful-application/zookeeper.md +++ b/content/ko/docs/tutorials/stateful-application/zookeeper.md @@ -15,17 +15,17 @@ weight: 40 이 튜토리얼을 시작하기 전에 다음 쿠버네티스 개념에 친숙해야 한다. -- [파드](/ko/docs/concepts/workloads/pods/) -- [클러스터 DNS](/ko/docs/concepts/services-networking/dns-pod-service/) -- [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스) -- [퍼시스턴트볼륨](/ko/docs/concepts/storage/persistent-volumes/) -- [퍼시스턴트볼륨 프로비저닝](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/) -- [스테이트풀셋](/ko/docs/concepts/workloads/controllers/statefulset/) -- [PodDisruptionBudget](/ko/docs/concepts/workloads/pods/disruptions/#파드-disruption-budgets) -- [파드안티어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#어피니티-affinity-와-안티-어피니티-anti-affinity) -- [kubectl CLI](/ko/docs/reference/kubectl/kubectl/) - -최소한 4개의 노드가 있는 클러스터가 필요하며, 각 노드는 적어도 2 개의 CPU와 4 GiB 메모리가 필요하다. 이 튜토리얼에서 클러스터 노드를 통제(cordon)하고 비우게(drain) 할 것이다. **이것은 클러스터를 종료하여 노드의 모든 파드를 퇴출(evict)하는 것으로, 모든 파드는 임시로 언스케줄된다는 의미이다.** 이 튜토리얼을 위해 전용 클러스터를 이용하거나, 다른 테넌트에 간섭을 하는 혼란이 발생하지 않도록 해야 합니다. +- [파드](/ko/docs/concepts/workloads/pods/) +- [클러스터 DNS](/ko/docs/concepts/services-networking/dns-pod-service/) +- [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스) +- [퍼시스턴트볼륨](/ko/docs/concepts/storage/persistent-volumes/) +- [퍼시스턴트볼륨 프로비저닝](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/) +- [스테이트풀셋](/ko/docs/concepts/workloads/controllers/statefulset/) +- [PodDisruptionBudget](/ko/docs/concepts/workloads/pods/disruptions/#파드-disruption-budgets) +- [파드안티어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#어피니티-affinity-와-안티-어피니티-anti-affinity) +- [kubectl CLI](/ko/docs/reference/kubectl/kubectl/) + +반드시 최소한 4개의 노드가 있는 클러스터가 필요하며, 각 노드는 적어도 2 개의 CPU와 4 GiB 메모리가 필요하다. 이 튜토리얼에서 클러스터 노드를 통제(cordon)하고 비우게(drain) 할 것이다. **이것은 클러스터를 종료하여 노드의 모든 파드를 축출(evict)하는 것으로, 모든 파드는 임시로 언스케줄된다는 의미이다.** 이 튜토리얼을 위해 전용 클러스터를 이용하거나, 다른 테넌트에 간섭을 하는 혼란이 발생하지 않도록 해야 합니다. 이 튜토리얼은 클러스터가 동적으로 퍼시스턴트볼륨을 프로비저닝하도록 구성한다고 가정한다. 그렇게 설정되어 있지 않다면 @@ -37,15 +37,15 @@ weight: 40 이 튜토리얼을 마치면 다음에 대해 알게 된다. -- 어떻게 스테이트풀셋을 이용하여 ZooKeeper 앙상블을 배포하는가. -- 어떻게 앙상블을 일관되게 설정하는가. -- 어떻게 ZooKeeper 서버 디플로이먼트를 앙상블 안에서 퍼뜨리는가. -- 어떻게 PodDisruptionBudget을 이용하여 계획된 점검 기간 동안 서비스 가용성을 보장하는가. +- 어떻게 스테이트풀셋을 이용하여 ZooKeeper 앙상블을 배포하는가. +- 어떻게 앙상블을 일관되게 설정하는가. +- 어떻게 ZooKeeper 서버 디플로이먼트를 앙상블 안에서 퍼뜨리는가. +- 어떻게 PodDisruptionBudget을 이용하여 계획된 점검 기간 동안 서비스 가용성을 보장하는가. -### ZooKeeper 기본 {#zookeeper-basics} +### ZooKeeper [아파치 ZooKeeper](https://zookeeper.apache.org/doc/current/)는 분산 애플리케이션을 위한 분산 오픈 소스 코디네이션 서비스이다. @@ -438,8 +438,8 @@ datadir-zk-2 Bound pvc-bee0817e-bcb1-11e6-994f-42010a800002 20Gi R ```shell volumeMounts: - - name: datadir - mountPath: /var/lib/zookeeper +- name: datadir + mountPath: /var/lib/zookeeper ``` `zk` 스테이트풀셋이 (재)스케줄링될 때 항상 동일한 `퍼시스턴트볼륨`을 @@ -462,6 +462,7 @@ ZooKeeper 앙상블에 서버는 리더 선출과 쿼럼을 구성하기 위한 ```shell kubectl get sts zk -o yaml ``` + ``` … command: @@ -551,11 +552,9 @@ kubectl logs zk-0 --tail 20 2016-12-06 19:34:46,230 [myid:1] - INFO [Thread-1142:NIOServerCnxn@1008] - Closed socket connection for client /127.0.0.1:52768 (no session established for client) ``` -쿠버네티스는 더 강력하지만 조금 복잡한 로그 통합을 -[스택드라이버](/docs/tasks/debug-application-cluster/logging-stackdriver/)와 -[Elasticsearch와 Kibana](/ko/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/)를 지원한다. -클러스터 수준의 로그 적재(ship)와 통합을 위해서는 로그 순환과 적재를 위해 -[사이드카](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) 컨테이너를 배포하는 것을 고려한다. +쿠버네티스는 많은 로그 솔루션과 통합된다. 클러스터와 애플리케이션에 +가장 적합한 로그 솔루션을 선택할 수 있다. 클러스터 수준의 +로그 적재(ship)와 통합을 위해서는 로그 순환과 적재를 위해 [사이드카 컨테이너](/ko/docs/concepts/cluster-administration/logging/#로깅-에이전트와-함께-사이드카-컨테이너-사용)를 배포하는 것을 고려한다. ### 권한 없는 사용자를 위해 구성하기 @@ -623,6 +622,7 @@ drwxr-sr-x 3 zookeeper zookeeper 4096 Dec 5 20:45 /var/lib/zookeeper/data ```shell kubectl patch sts zk --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources/requests/cpu", "value":"0.3"}]' ``` + ``` statefulset.apps/zk patched ``` @@ -632,6 +632,7 @@ statefulset.apps/zk patched ```shell kubectl rollout status sts/zk ``` + ``` waiting for statefulset rolling update to complete 0 pods at revision zk-5db4499664... Waiting for 1 pods to be ready... @@ -872,8 +873,8 @@ kubernetes-node-2g2d ## 생존 유지 -**이 섹션에서는 노드를 통제(cordon)하고 비운다(drain). 공유된 클러스터에서 이 튜토리얼을 진행한다면, -다른 테넌트에 부정적인 영향을 비치지 않음을 보증해야 한다.** +이 섹션에서는 노드를 통제(cordon)하고 비운다(drain). 공유된 클러스터에서 이 튜토리얼을 진행한다면, +다른 테넌트에 부정적인 영향을 비치지 않음을 보증해야 한다. 이전 섹션은 계획되지 않은 노드 실패에서 살아남도록 어떻게 파드를 확산할 것인가에 대해 알아보았다. @@ -1008,6 +1009,7 @@ zk-1 0/1 Pending 0 0s ```shell kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data ``` + ``` node "kubernetes-node-i4c4" cordoned @@ -1051,6 +1053,7 @@ numChildren = 0 ```shell kubectl uncordon kubernetes-node-pb41 ``` + ``` node "kubernetes-node-pb41" uncordoned ``` @@ -1060,6 +1063,7 @@ node "kubernetes-node-pb41" uncordoned ```shell kubectl get pods -w -l app=zk ``` + ``` NAME READY STATUS RESTARTS AGE zk-0 1/1 Running 2 1h @@ -1125,7 +1129,6 @@ drain으로 노드를 통제하고 유지보수를 위해 노드를 오프라인 - `kubectl uncordon`은 클러스터 내에 모든 노드를 통제 해제한다. -- 이 튜토리얼에서 사용한 퍼시스턴트 볼륨을 위한 - 퍼시스턴트 스토리지 미디어를 삭제하자. +- 반드시 이 튜토리얼에서 사용한 퍼시스턴트 볼륨을 위한 퍼시스턴트 스토리지 미디어를 삭제하자. 귀하의 환경과 스토리지 구성과 프로비저닝 방법에서 필요한 절차를 따라서 모든 스토리지가 재확보되도록 하자. diff --git a/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md index a8a2c0653a430..2b8ffff7ff1a2 100644 --- a/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -11,7 +11,7 @@ weight: 10 ## {{% heading "prerequisites" %}} - * [kubectl](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. + * [kubectl](/ko/docs/tasks/tools/)을 설치한다. * Google Kubernetes Engine 또는 Amazon Web Services와 같은 클라우드 공급자를 사용하여 쿠버네티스 클러스터를 생성한다. 이 튜토리얼은 [외부 로드 밸런서](/docs/tasks/access-application-cluster/create-external-load-balancer/)를 생성하는데, diff --git a/content/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md b/content/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md deleted file mode 100644 index faf5fd530354f..0000000000000 --- a/content/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md +++ /dev/null @@ -1,457 +0,0 @@ ---- -title: "예제: PHP / Redis 방명록 예제에 로깅과 메트릭 추가" -content_type: tutorial -weight: 21 -card: - name: tutorials - weight: 31 - title: "예제: PHP / Redis 방명록 예제에 로깅과 메트릭 추가" ---- - - -이 튜토리얼은 [Redis를 이용한 PHP 방명록](/ko/docs/tutorials/stateless-application/guestbook) 튜토리얼을 기반으로 한다. Elastic의 경량 로그, 메트릭, 네트워크 데이터 오픈소스 배송기인 *Beats* 를 방명록과 동일한 쿠버네티스 클러스터에 배포한다. Beats는 데이터를 수집하고 구문분석하여 Elasticsearch에 색인화하므로, Kibana에서 동작 정보를 결과로 보며 분석할 수 있다. 이 예시는 다음과 같이 구성되어 있다. - -* [Redis를 이용한 PHP 방명록](/ko/docs/tutorials/stateless-application/guestbook)을 실행한 인스턴스 -* Elasticsearch와 Kibana -* Filebeat -* Metricbeat -* Packetbeat - -## {{% heading "objectives" %}} - -* Redis를 이용한 PHP 방명록 시작. -* kube-state-metrics 설치. -* 쿠버네티스 시크릿 생성. -* Beats 배포. -* 로그와 메트릭의 대시보드 보기. - -## {{% heading "prerequisites" %}} - - -{{< include "task-tutorial-prereqs.md" >}} -{{< version-check >}} - -추가로 다음이 필요하다. - -* 실행 중인 [Redis를 이용한 PHP 방명록](/ko/docs/tutorials/stateless-application/guestbook) 튜토리얼의 배포본. - -* 실행 중인 Elasticsearch와 Kibana 디플로이먼트. [Elastic Cloud의 Elasticsearch 서비스](https://cloud.elastic.co)를 사용하거나, - [파일을 내려받아](https://www.elastic.co/guide/en/elastic-stack-get-started/current/get-started-elastic-stack.html) - 워크스테이션이나 서버에서 운영하거나, [Elastic의 Helm 차트](https://github.com/elastic/helm-charts)를 이용한다. - - - - - -## Redis를 이용한 PHP 방명록 시작 - -이 튜토리얼은 [Redis를 이용한 PHP 방명록](/ko/docs/tutorials/stateless-application/guestbook)을 기반으로 한다. 방명록 애플리케이션을 실행 중이라면, 이를 모니터링할 수 있다. 실행되지 않은 경우라면 지침을 따라 방명록을 배포하고 **정리하기** 단계는 수행하지 말자. 방명록을 실행할 때 이 페이지로 돌아오자. - -## 클러스터 롤 바인딩 추가 - -[클러스터 단위 롤 바인딩](/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding)을 생성하여, 클러스터 수준(kube-system 안에)으로 kube-state-metrics와 Beats를 배포할 수 있게 한다. - -```shell -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole=cluster-admin --user= -``` - -## kube-state-metrics 설치 - -[*kube-state-metrics*](https://github.com/kubernetes/kube-state-metrics)는 쿠버네티스 API 서버를 모니터링하며 오브젝트 상태에 대한 메트릭을 생성하는 간단한 서비스이다. 이런 메트릭을 Metricbeat이 보고한다. 방명록이 실행된 쿠버네티스 클러스터에서 kube-state-metrics을 추가한다. - -```shell -git clone https://github.com/kubernetes/kube-state-metrics.git kube-state-metrics -kubectl apply -f kube-state-metrics/examples/standard -``` - -### kube-state-metrics 실행 여부 확인 - -```shell -kubectl get pods --namespace=kube-system -l app.kubernetes.io/name=kube-state-metrics -``` - -출력 - -``` -NAME READY STATUS RESTARTS AGE -kube-state-metrics-89d656bf8-vdthm 1/1 Running 0 21s -``` - -## Elastic의 예제를 GitHub 리포지터리에 클론한다. - -```shell -git clone https://github.com/elastic/examples.git -``` - -나머지 커맨드는 `examples/beats-k8s-send-anywhere` 디렉터리의 파일을 참조할 것이라서, 그쪽으로 현재 디렉터리를 변경한다. - -```shell -cd examples/beats-k8s-send-anywhere -``` - -## 쿠버네티스 시크릿 만들기 - -쿠버네티스 {{< glossary_tooltip text="시크릿" term_id="secret" >}}은 암호나 토큰, 키 같이 소량의 민감한 데이터를 포함하는 오브젝트이다. 이러한 정보는 다른 방식으로도 파드 스펙이나 이미지에 넣을 수 있을 것이다. 시크릿 오브젝트에 넣으면 이것이 어떻게 사용되는지 다양하게 제어할 수 있고, 우발적인 노출 사고의 위험이 줄일 수 있다. - -{{< note >}} -여기에는 방식이 나뉘는데, 하나는 *자체 관리(Self managed)* 로 Elasticsearch와 Kibana(Elastic의 Helm 차트를 이용하여 사용자 서버를 구동하는)를 사용하는 경우이고 다른 경우는 Elastic Cloud의 Elasticsearch 서비스의 *관리 서비스(Managed service)* 를 사용하는 방식이다. 이 튜토리얼에서는 사용할 Elasticsearch와 Kibana 시스템의 종류에 따라 시크릿을 만들어야 한다. -{{< /note >}} - -{{< tabs name="tab_with_md" >}} -{{% tab name="자체 관리(Self Managed)" %}} - -### 자체 관리 - -Elastic Cloud의 Elasticsearch 서비스로 연결한다면 **관리 서비스** 탭으로 전환한다. - -### 자격증명(credentials) 설정 - -자체 관리 Elasticsearch와 Kibana(자체 관리는 사실상 Elastic Cloud의 관리 서비스 Elasticsearch와 다르다) 서비스에 접속할 때에 4개 파일을 수정하여 쿠버네티스 시크릿을 생성한다. 파일은 다음과 같다. - -1. `ELASTICSEARCH_HOSTS` -1. `ELASTICSEARCH_PASSWORD` -1. `ELASTICSEARCH_USERNAME` -1. `KIBANA_HOST` - -이 정보를 Elasticsearch 클러스터와 Kibana 호스트에 지정한다. 여기 예시(또는 [*이 구성*](https://stackoverflow.com/questions/59892896/how-to-connect-from-minikube-to-elasticsearch-installed-on-host-local-developme/59892897#59892897)을 본다)가 있다. - -#### `ELASTICSEARCH_HOSTS` - -1. Elastic의 Elasticsearch Helm 차트에서 노드 그룹(nodeGroup). - - ``` - ["http://elasticsearch-master.default.svc.cluster.local:9200"] - ``` - -1. Mac을 위한 Docker에서 Beats를 운영 중인 Mac에서 운영하는 단일 Elasticsearch 노드. - - ``` - ["http://host.docker.internal:9200"] - ``` - -1. VM이나 물리 장비에서 운영 중인 두 개의 ELASTICSEARCH 노드. - - ``` - ["http://host1.example.com:9200", "http://host2.example.com:9200"] - ``` - -`ELASTICSEARCH_HOSTS` 를 수정한다. - -```shell -vi ELASTICSEARCH_HOSTS -``` - -#### `ELASTICSEARCH_PASSWORD` -화이트 스페이스나 인용 부호나 `<` 또는 `>` 도 없는 암호이다. - -``` -<사용자시크릿암호> -``` - -`ELASTICSEARCH_PASSWORD` 를 수정한다. - -```shell -vi ELASTICSEARCH_PASSWORD -``` - -#### `ELASTICSEARCH_USERNAME` -화이트 스페이스나 인용 부호나 `<` 또는 `>` 도 없는 이름이다. - -``` - -``` - -`ELASTICSEARCH_USERNAME` 을 수정한다. - -```shell -vi ELASTICSEARCH_USERNAME -``` - -#### `KIBANA_HOST` - -1.Elastic의 Kibana Helm 차트의 인스턴스이다. 하위 도메인 `default`는 기본 네임스페이스를 참조한다. 다른 네임스페이스를 사용하여 Helm 차트를 배포한 경우 하위 도메인이 다릅니다. - - ``` - "kibana-kibana.default.svc.cluster.local:5601" - ``` - -1. Mac 용 Docker에서 실행하는 Beats가 있는 Mac에서 실행하는 Kibana 인스턴스 - - ``` - "host.docker.internal:5601" - ``` -1. 가상머신이나 물리적 하드웨어에서 실행 중인 두 개의 Elasticsearch 노드 - - ``` - "host1.example.com:5601" - ``` - -`KIBANA_HOST` 를 편집한다. - -```shell -vi KIBANA_HOST -``` - -### 쿠버네티스 시크릿 만들기 - -이 커맨드는 방금 편집한 파일을 기반으로 쿠버네티스의 시스템 수준의 네임스페이스(`kube-system`)에 시크릿을 만든다. - -```shell -kubectl create secret generic dynamic-logging \ - --from-file=./ELASTICSEARCH_HOSTS \ - --from-file=./ELASTICSEARCH_PASSWORD \ - --from-file=./ELASTICSEARCH_USERNAME \ - --from-file=./KIBANA_HOST \ - --namespace=kube-system -``` - -{{% /tab %}} -{{% tab name="관리 서비스(Managed service)" %}} - -## 관리 서비스 - -이 탭은 Elastic Cloud에서 Elasticsearch 서비스 만에 대한 것으로, 이미 자체 관리 Elasticsearch와 Kibana 배포로 시크릿을 생성했다면, [Beats 배포](#deploy-the-beats)를 계속한다. - -### 자격증명(credentials) 설정 - -Elastic Cloud에서 관리되는 Elastic 서비스에 연결할 때, 쿠버네티스 시크릿을 생성하기 위해 편집할 두 파일이 있다. 파일은 다음과 같다. - -1. `ELASTIC_CLOUD_AUTH` -1. `ELASTIC_CLOUD_ID` - -디플로이먼트를 생성할 때에 Elasticsearch 콘솔에서 제공한 정보로 이를 설정한다. 여기 예시들이 있다. - -#### `ELASTIC_CLOUD_ID` - -``` -devk8s:ABC123def456ghi789jkl123mno456pqr789stu123vwx456yza789bcd012efg345hijj678klm901nop345zEwOTJjMTc5YWQ0YzQ5OThlN2U5MjAwYTg4NTIzZQ== -``` - -#### `ELASTIC_CLOUD_AUTH` - -사용자 이름, 콜론(`:`) 및 비밀번호인데, 공백 또는 따옴표는 없다. - -``` -elastic:VFxJJf9Tjwer90wnfTghsn8w -``` - -### 필요 파일 편집하기 - -```shell -vi ELASTIC_CLOUD_ID -vi ELASTIC_CLOUD_AUTH -``` - -### 쿠버네티스 시크릿 생성하기 - -이 커맨드는 방금 편집한 파일을 기반으로 쿠버네티스의 시스템 수준의 네임스페이스(`kube-system`)에 시크릿을 생성한다. - -```shell -kubectl create secret generic dynamic-logging \ - --from-file=./ELASTIC_CLOUD_ID \ - --from-file=./ELASTIC_CLOUD_AUTH \ - --namespace=kube-system -``` - -{{% /tab %}} -{{< /tabs >}} - -## Beats 배포하기 {#deploy-the-beats} - -각 Beat마다 메니페스트 파일을 제공한다. 이 메니페스트 파일은 앞서 생성한 시크릿을 사용하여, Elasticsearch 및 Kibana 서버에 연결하도록 Beats를 구성한다. - -### Filebeat에 대해 - -Filebeat는 쿠버네티스 노드와 해당 노두에서 실행되는 각 파드에서 실행되는 컨테이너의 로그를 수집한다. Filebeat는 {{< glossary_tooltip text="데몬 셋" term_id="daemonset" >}}으로 배포한다. Filebeat는 쿠버네티스 클러스터에서 실행 중인 애플리케이션을 자동 검색할 수 있다. 시작시에 Filebeat는 기존 컨테이너를 검색하고 이에 적절한 구성을 시작하고 새 시작/종료 이벤트를 감시한다. - -아래 내용은 Filebeat가 방명록 애플리케이션과 함께 배포된 Redis 컨테이너에서 Redis 로그를 찾아 구문분석할 수 있게 하는 자동 검색 구성이다. 이 구성은 `filebeat-kubernetes.yaml`파일에 있다. - -```yaml -- condition.contains: - kubernetes.labels.app: redis - config: - - module: redis - log: - input: - type: docker - containers.ids: - - ${data.kubernetes.container.id} - slowlog: - enabled: true - var.hosts: ["${data.host}:${data.port}"] -``` - -이것은 `redis` 컨테이너가 `app` 문자열을 포함하는 레이블로 감지될 때에 Filebeat 모듈 `redis`를 적용하도록 Filebeat를 구성한다. Redis 모듈은 Docker 입력 유형을 사용하여 컨테이너에서 `로그` 스트림을 수집할 수 있다(이 Redis 컨테이너의 STDOUT 스트림과 연관된 쿠버네티스 노드에서 파일 읽기). 또한 이 모듈은 컨테이너 메타 데이터에 제공되는 적절한 파드 호스트와 포트에 연결하여 Redis의 `slowlog` 항목을 수집할 수 있다. - -### Filebeat 배포 - -```shell -kubectl create -f filebeat-kubernetes.yaml -``` - -#### 확인 - -```shell -kubectl get pods -n kube-system -l k8s-app=filebeat-dynamic -``` - -### Metricbeat에 대해 - -Metricbeat 자동 검색은 Filebeat와 같은 방식으로 구성된다. 다음은 Redis 컨테이너에 대한 Metricbeat의 자동 검색 구성이다. 이 구성은 `metricbeat-kubernetes.yaml`에 있다. - -```yaml -- condition.equals: - kubernetes.labels.tier: backend - config: - - module: redis - metricsets: ["info", "keyspace"] - period: 10s - - # Redis hosts - hosts: ["${data.host}:${data.port}"] -``` - -이것은 컨테이너가 `tier` 레이블이 `backend` 문자열과 같은 레이블로 감지될 때에 Metricbeat 모듈 `redis`를 적용하도록 Metricbeat를 구성한다. `redis` 모듈은 컨테이너 메타데이터에 제공되는 적절한 파드 호스트와 포트에 연결하여 컨테이너에서 `info` 및 `keyspace` 메트릭을 수집할 수 있다. - -### Metricbeat 배포 - -```shell -kubectl create -f metricbeat-kubernetes.yaml -``` - -#### 확인 - -```shell -kubectl get pods -n kube-system -l k8s-app=metricbeat -``` - -### Packetbeat에 대해 - -Packetbeat 구성은 Filebeat와 Metricbeat와는 다르다. 컨테이너 레이블과 일치시킬 패턴을 지정하지 않고, 구성은 관련 프로토콜 및 포트 번호를 기반으로 한다. 아래는 포트 번호의 하위 집합이다. - -{{< note >}} -비표준 포트로 서비스를 실행했다면 해당 포트를 `filebeat.yaml`에 적절한 유형에 추가하고, Packetbeat 데몬 셋을 삭제하고 생성한다. -{{< /note >}} - -```yaml -packetbeat.interfaces.device: any - -packetbeat.protocols: -- type: dns - ports: [53] - include_authorities: true - include_additionals: true - -- type: http - ports: [80, 8000, 8080, 9200] - -- type: mysql - ports: [3306] - -- type: redis - ports: [6379] - -packetbeat.flows: - timeout: 30s - period: 10s -``` - -#### Packetbeat 배포하기 - -```shell -kubectl create -f packetbeat-kubernetes.yaml -``` - -#### 확인하기 - -```shell -kubectl get pods -n kube-system -l k8s-app=packetbeat-dynamic -``` - -## Kibana에서 보기 - -브라우저에서 Kibana를 열고, **대시보드** 애플리케이션을 열어보자. 검색창에 kubernetes를 입력하고 쿠버네티스를 위한 Metricbeat 대시보드를 클릭한다. 이 대시보드에는 노드 상태, 배포 등의 보고 내용이 있다. - -대시보드 페이지에 Packetbeat를 검색하고 Packetbeat의 개요 페이지를 살펴보자. - -마찬가지로 Apache와 Redis를 위한 대시보드를 확인한다. 각 로그와 메트릭에 대한 대시보드가 표시된다. 이 Apache Metricbeat 대시보드는 비어 있다. Apache Filebeat 대시보드를 보고, 맨 아래로 스크롤하여 Apache 오류 로그를 확인한다. Apache에서 보여줄 메트릭이 없는 이유를 알려줄 것이다. - -Metricbeat에서 Apache 메트릭을 가져올 수 있게 하려면, mod-status 구성 파일을 포함한 컨피그맵을 추가하고 방명록을 재배포하여 서버 상태를 활성화해야 한다. - - -## 디플로이먼트를 확장하고 모니터링중인 새 파드를 확인하기 - -기존 디플로이먼트를 확인한다. - -```shell -kubectl get deployments -``` - -출력 - -``` -NAME READY UP-TO-DATE AVAILABLE AGE -frontend 3/3 3 3 3h27m -redis-master 1/1 1 1 3h27m -redis-slave 2/2 2 2 3h27m -``` - -front의 디플로이먼트를 두 개의 파드로 축소한다. - -```shell -kubectl scale --replicas=2 deployment/frontend -``` - -출력 - -``` -deployment.extensions/frontend scaled -``` - -frontend의 파드를 최대 3개의 파드로 확장한다. - -```shell -kubectl scale --replicas=3 deployment/frontend -``` - -## Kibana에서 변화 확인하기 - -스크린 캡처를 확인하여, 표시된 필터를 추가하고 해당 열을 뷰에 추가한다. ScalingReplicaSet 항목이 표시되고, 여기에서 이벤트 목록의 맨 위에 풀링되는 이미지, 마운트된 볼륨, 파드 시작 등을 보여준다. -![Kibana 디스커버리](https://raw.githubusercontent.com/elastic/examples/master/beats-k8s-send-anywhere/scaling-up.png) - -## {{% heading "cleanup" %}} - -디플로이먼트와 서비스를 삭제하면 실행중인 파드도 삭제된다. 한 커맨드로 여러 개의 리소스를 삭제하기 위해 레이블을 이용한다. - -1. 다음 커맨드를 실행하여 모든 파드, 디플로이먼트, 서비스를 삭제한다. - - ```shell - kubectl delete deployment -l app=redis - kubectl delete service -l app=redis - kubectl delete deployment -l app=guestbook - kubectl delete service -l app=guestbook - kubectl delete -f filebeat-kubernetes.yaml - kubectl delete -f metricbeat-kubernetes.yaml - kubectl delete -f packetbeat-kubernetes.yaml - kubectl delete secret dynamic-logging -n kube-system - ``` - -1. 실행 중인 파드가 없음을 확인하기 위해 파드 목록을 조회한다. - - ```shell - kubectl get pods - ``` - - 출력은 다음과 같아야 한다. - - ``` - No resources found. - ``` - -## {{% heading "whatsnext" %}} - -* [리소스 모니터링 도구](/ko/docs/tasks/debug-application-cluster/resource-usage-monitoring/)를 공부한다. -* [로깅 아키텍처](/ko/docs/concepts/cluster-administration/logging/)를 더 읽어본다. -* [애플리케이션 검사 및 디버깅](/ko/docs/tasks/debug-application-cluster/)을 더 읽어본다. -* [애플리케이션 문제 해결](/ko/docs/tasks/debug-application-cluster/resource-usage-monitoring/)을 더 읽어본다. diff --git a/content/ko/docs/tutorials/stateless-application/guestbook.md b/content/ko/docs/tutorials/stateless-application/guestbook.md index cce67800a6d68..1a984319d854b 100644 --- a/content/ko/docs/tutorials/stateless-application/guestbook.md +++ b/content/ko/docs/tutorials/stateless-application/guestbook.md @@ -1,26 +1,25 @@ --- -title: "예시: Redis를 사용한 PHP 방명록 애플리케이션 배포하기" +title: "예시: MongoDB를 사용한 PHP 방명록 애플리케이션 배포하기" + + content_type: tutorial weight: 20 card: name: tutorials weight: 30 - title: "상태를 유지하지 않는 예제: Redis를 사용한 PHP 방명록" + title: "상태를 유지하지 않는 예제: MongoDB를 사용한 PHP 방명록" +min-kubernetes-server-version: v1.14 --- -이 튜토리얼에서는 쿠버네티스와 [Docker](https://www.docker.com/)를 사용하여 간단한 멀티 티어 웹 애플리케이션을 빌드하고 배포하는 방법을 보여준다. 이 예제는 다음과 같은 구성으로 이루어져 있다. +이 튜토리얼에서는 쿠버네티스와 [Docker](https://www.docker.com/)를 사용하여 간단한 _(운영 준비가 아닌)_ 멀티 티어 웹 애플리케이션을 빌드하고 배포하는 방법을 보여준다. 이 예제는 다음과 같은 구성으로 이루어져 있다. -* 방명록을 저장하는 단일 인스턴스 [Redis](https://redis.io/) 마스터 -* 읽기를 제공하는 여러 개의 [복제된 Redis](https://redis.io/topics/replication) 인스턴스 +* 방명록을 저장하는 단일 인스턴스 [MongoDB](https://www.mongodb.com/) * 여러 개의 웹 프론트엔드 인스턴스 - - ## {{% heading "objectives" %}} -* Redis 마스터를 시작 -* Redis 슬레이브를 시작 +* Mongo 데이터베이스를 시작 * 방명록 프론트엔드를 시작 * 프론트엔드 서비스를 노출하고 확인 * 정리 하기 @@ -37,24 +36,30 @@ card: -## Redis 마스터를 실행하기 +## Mongo 데이터베이스를 실행 -방명록 애플리케이션은 Redis를 사용하여 데이터를 저장한다. Redis 마스터 인스턴스에 데이터를 기록하고 여러 Redis 슬레이브 인스턴스에서 데이터를 읽는다. +방명록 애플리케이션은 MongoDB를 사용해서 데이터를 저장한다. -### Redis 마스터의 디플로이먼트를 생성하기 +### Mongo 디플로이먼트를 생성하기 -아래의 매니페스트 파일은 단일 복제본 Redis 마스터 파드를 실행하는 디플로이먼트 컨트롤러를 지정한다. +아래의 매니페스트 파일은 단일 복제본 Mongo 파드를 실행하는 디플로이먼트 컨트롤러를 지정한다. -{{< codenew file="application/guestbook/redis-master-deployment.yaml" >}} +{{< codenew file="application/guestbook/mongo-deployment.yaml" >}} 1. 매니페스트 파일을 다운로드한 디렉터리에서 터미널 창을 시작한다. -1. `redis-master-deployment.yaml` 파일을 통해 Redis 마스터의 디플로이먼트에 적용한다. +1. `mongo-deployment.yaml` 파일을 통해 MongoDB 디플로이먼트에 적용한다. + + ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml ``` -1. 파드의 목록을 질의하여 Redis 마스터 파드가 실행 중인지 확인한다. + +1. 파드의 목록을 질의하여 MongoDB 파드가 실행 중인지 확인한다. ```shell kubectl get pods @@ -64,32 +69,34 @@ card: ```shell NAME READY STATUS RESTARTS AGE - redis-master-1068406935-3lswp 1/1 Running 0 28s + mongo-5cfd459dd4-lrcjb 1/1 Running 0 28s ``` -1. Redis 마스터 파드에서 로그를 보려면 다음 명령어를 실행한다. +2. MongoDB 파드에서 로그를 보려면 다음 명령어를 실행한다. ```shell - kubectl logs -f POD-NAME + kubectl logs -f deployment/mongo ``` -{{< note >}} -POD-NAME을 해당 파드 이름으로 수정해야 한다. -{{< /note >}} +### MongoDB 서비스 생성하기 -### Redis 마스터 서비스 생성하기 +방명록 애플리케이션에서 데이터를 쓰려면 MongoDB와 통신해야 한다. MongoDB 파드로 트래픽을 프록시하려면 [서비스](/ko/docs/concepts/services-networking/service/)를 적용해야 한다. 서비스는 파드에 접근하기 위한 정책을 정의한다. -방명록 애플리케이션에서 데이터를 쓰려면 Redis 마스터와 통신해야 한다. Redis 마스터 파드로 트래픽을 프록시하려면 [서비스](/ko/docs/concepts/services-networking/service/)를 적용해야 한다. 서비스는 파드에 접근하기 위한 정책을 정의한다. +{{< codenew file="application/guestbook/mongo-service.yaml" >}} -{{< codenew file="application/guestbook/redis-master-service.yaml" >}} +1. `mongo-service.yaml` 파일을 통해 MongoDB 서비스에 적용한다. -1. `redis-master-service.yaml` 파일을 통해 Redis 마스터 서비스에 적용한다. + ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml ``` -1. 서비스의 목록을 질의하여 Redis 마스터 서비스가 실행 중인지 확인한다. + +1. 서비스의 목록을 질의하여 MongoDB 서비스가 실행 중인지 확인한다. ```shell kubectl get service @@ -100,77 +107,17 @@ POD-NAME을 해당 파드 이름으로 수정해야 한다. ```shell NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 443/TCP 1m - redis-master ClusterIP 10.0.0.151 6379/TCP 8s + mongo ClusterIP 10.0.0.151 27017/TCP 8s ``` {{< note >}} -이 매니페스트 파일은 이전에 정의된 레이블과 일치하는 레이블 집합을 가진 `redis-master`라는 서비스를 생성하므로, 서비스는 네트워크 트래픽을 Redis 마스터 파드로 라우팅한다. +이 매니페스트 파일은 이전에 정의된 레이블과 일치하는 레이블 집합을 가진 `mongo`라는 서비스를 생성하므로, 서비스는 네트워크 트래픽을 MongoDB 파드로 라우팅한다. {{< /note >}} -## Redis 슬레이브 실행하기 - -Redis 마스터는 단일 파드이지만, 복제된 Redis 슬레이브를 추가하여 트래픽 요구 사항을 충족시킬 수 있다. - -### Redis 슬레이브의 디플로이먼트 생성하기 - -디플로이먼트는 매니페스트 파일에 설정된 구성에 따라 확장된다. 이 경우, 디플로이먼트 오브젝트는 두 개의 복제본을 지정한다. - -실행 중인 복제본이 없으면, 이 디플로이먼트는 컨테이너 클러스터에 있는 두 개의 복제본을 시작한다. 반대로 두 개 이상의 복제본이 실행 중이면, 두 개의 복제본이 실행될 때까지 축소된다. - -{{< codenew file="application/guestbook/redis-slave-deployment.yaml" >}} - -1. `redis-slave-deployment.yaml` 파일을 통해 Redis 슬레이브의 디플로이먼트에 적용한다. - - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-slave-deployment.yaml - ``` - -1. 파드의 목록을 질의하여 Redis 슬레이브 파드가 실행 중인지 확인한다. - - ```shell - kubectl get pods - ``` - - 결과는 아래와 같은 형태로 나타난다. - - ```shell - NAME READY STATUS RESTARTS AGE - redis-master-1068406935-3lswp 1/1 Running 0 1m - redis-slave-2005841000-fpvqc 0/1 ContainerCreating 0 6s - redis-slave-2005841000-phfv9 0/1 ContainerCreating 0 6s - ``` - -### Redis 슬레이브 서비스 생성하기 - -방명록 애플리케이션은 Redis 슬레이브와 통신하여 데이터를 읽는다. Redis 슬레이브를 확인할 수 있도록 하기 위해 서비스를 설정해야 한다. 서비스는 파드 집합에 투명한 로드 밸런싱을 제공한다. - -{{< codenew file="application/guestbook/redis-slave-service.yaml" >}} - -1. `redis-slave-service.yaml` 파일을 통해 Redis 슬레이브 서비스에 적용한다. - - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-slave-service.yaml - ``` - -1. 서비스의 목록을 질의하여 Redis 슬레이브 서비스가 실행 중인지 확인한다. - - ```shell - kubectl get services - ``` - - 결과는 아래와 같은 형태로 나타난다. - - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.1 443/TCP 2m - redis-master ClusterIP 10.0.0.151 6379/TCP 1m - redis-slave ClusterIP 10.0.0.223 6379/TCP 6s - ``` - ## 방명록 프론트엔드를 설정하고 노출하기 -방명록 애플리케이션에는 PHP로 작성된 HTTP 요청을 처리하는 웹 프론트엔드가 있다. 쓰기 요청을 위한 `redis-master` 서비스와 읽기 요청을 위한 `redis-slave` 서비스에 연결하도록 설정된다. +방명록 애플리케이션에는 PHP로 작성된 HTTP 요청을 처리하는 웹 프론트엔드가 있다. 방명록 항목들을 저장하기 위해 `mongo` 서비스에 연결하도록 구성 한다. ### 방명록 프론트엔드의 디플로이먼트 생성하기 @@ -178,14 +125,20 @@ Redis 마스터는 단일 파드이지만, 복제된 Redis 슬레이브를 추 1. `frontend-deployment.yaml` 파일을 통해 프론트엔드의 디플로이먼트에 적용한다. + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml ``` + 1. 파드의 목록을 질의하여 세 개의 프론트엔드 복제본이 실행되고 있는지 확인한다. ```shell - kubectl get pods -l app=guestbook -l tier=frontend + kubectl get pods -l app.kubernetes.io/name=guestbook -l app.kubernetes.io/component=frontend ``` 결과는 아래와 같은 형태로 나타난다. @@ -199,22 +152,28 @@ Redis 마스터는 단일 파드이지만, 복제된 Redis 슬레이브를 추 ### 프론트엔드 서비스 생성하기 -서비스의 기본 유형은 [ClusterIP](/ko/docs/concepts/services-networking/service/#publishing-services-service-types)이기 때문에 적용한 redis-slave 및 redis-master 서비스는 컨테이너 클러스터 내에서만 접근할 수 있다. `ClusterIP`는 서비스가 가리키는 파드 집합에 대한 단일 IP 주소를 제공한다. 이 IP 주소는 클러스터 내에서만 접근할 수 있다. +서비스의 기본 유형은 [ClusterIP](/ko/docs/concepts/services-networking/service/#publishing-services-service-types)이기 때문에 적용한 `mongo` 서비스는 컨테이너 클러스터 내에서만 접근할 수 있다. `ClusterIP`는 서비스가 가리키는 파드 집합에 대한 단일 IP 주소를 제공한다. 이 IP 주소는 클러스터 내에서만 접근할 수 있다. -게스트가 방명록에 접근할 수 있도록 하려면, 외부에서 볼 수 있도록 프론트엔드 서비스를 구성해야 한다. 그렇게 하면 클라이언트가 컨테이너 클러스터 외부에서 서비스를 요청할 수 있다. Minikube는 `NodePort`를 통해서만 서비스를 노출할 수 있다. +게스트가 방명록에 접근할 수 있도록 하려면, 외부에서 볼 수 있도록 프론트엔드 서비스를 구성해야 한다. 그렇게 하면 클라이언트가 쿠버네티스 클러스터 외부에서 서비스를 요청할 수 있다. 그러나 쿠버네티스 사용자는 `ClusterIP`를 사용하더라도 `kubectl port-forward`를 사용해서 서비스에 접근할 수 있다. {{< note >}} -Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우드 공급자는 외부 로드 밸런서를 지원한다. 클라우드 공급자가 로드 밸런서를 지원하고 이를 사용하려면 `type : NodePort`를 삭제하거나 주석 처리하고 `type : LoadBalancer`의 주석을 제거해야 한다. +Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우드 공급자는 외부 로드 밸런서를 지원한다. 클라우드 공급자가 로드 밸런서를 지원하고 이를 사용하려면 `type : LoadBalancer`의 주석을 제거해야 한다. {{< /note >}} {{< codenew file="application/guestbook/frontend-service.yaml" >}} 1. `frontend-service.yaml` 파일을 통해 프론트엔드 서비스에 적용시킨다. + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml ``` + 1. 서비스의 목록을 질의하여 프론트엔드 서비스가 실행 중인지 확인한다. ```shell @@ -225,29 +184,27 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 ``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend NodePort 10.0.0.112 80:31323/TCP 6s + frontend ClusterIP 10.0.0.112 80/TCP 6s kubernetes ClusterIP 10.0.0.1 443/TCP 4m - redis-master ClusterIP 10.0.0.151 6379/TCP 2m - redis-slave ClusterIP 10.0.0.223 6379/TCP 1m + mongo ClusterIP 10.0.0.151 6379/TCP 2m ``` -### `NodePort`를 통해 프론트엔드 서비스 확인하기 +### `kubectl port-forward`를 통해 프론트엔드 서비스 확인하기 -애플리케이션을 Minikube 또는 로컬 클러스터에 배포한 경우, 방명록을 보려면 IP 주소를 찾아야 한다. - -1. 프론트엔드 서비스의 IP 주소를 얻기 위해 아래 명령어를 실행한다. +1. 다음 명령어를 실행해서 로컬 머신의 `8080` 포트를 서비스의 `80` 포트로 전달한다. ```shell - minikube service frontend --url + kubectl port-forward svc/frontend 8080:80 ``` 결과는 아래와 같은 형태로 나타난다. ``` - http://192.168.99.100:31323 + Forwarding from 127.0.0.1:8080 -> 80 + Forwarding from [::1]:8080 -> 80 ``` -1. IP 주소를 복사하고, 방명록을 보기 위해 브라우저에서 페이지를 로드한다. +1. 방명록을 보기위해 브라우저에서 [http://localhost:8080](http://localhost:8080) 페이지를 로드한다. ### `LoadBalancer`를 통해 프론트엔드 서비스 확인하기 @@ -262,15 +219,15 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 결과는 아래와 같은 형태로 나타난다. ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.51.242.136 109.197.92.229 80:32372/TCP 1m + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m ``` 1. IP 주소를 복사하고, 방명록을 보기 위해 브라우저에서 페이지를 로드한다. ## 웹 프론트엔드 확장하기 -서버가 디플로이먼트 컨트롤러를 사용하는 서비스로 정의되어 있기 때문에 확장 또는 축소가 쉽다. +서버가 디플로이먼트 컨르롤러를 사용하는 서비스로 정의되어 있기에 필요에 따라 확장 또는 축소할 수 있다. 1. 프론트엔드 파드의 수를 확장하기 위해 아래 명령어를 실행한다. @@ -293,9 +250,7 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 frontend-3823415956-k22zn 1/1 Running 0 54m frontend-3823415956-w9gbt 1/1 Running 0 54m frontend-3823415956-x2pld 1/1 Running 0 5s - redis-master-1068406935-3lswp 1/1 Running 0 56m - redis-slave-2005841000-fpvqc 1/1 Running 0 55m - redis-slave-2005841000-phfv9 1/1 Running 0 55m + mongo-1068406935-3lswp 1/1 Running 0 56m ``` 1. 프론트엔드 파드의 수를 축소하기 위해 아래 명령어를 실행한다. @@ -316,9 +271,7 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 NAME READY STATUS RESTARTS AGE frontend-3823415956-k22zn 1/1 Running 0 1h frontend-3823415956-w9gbt 1/1 Running 0 1h - redis-master-1068406935-3lswp 1/1 Running 0 1h - redis-slave-2005841000-fpvqc 1/1 Running 0 1h - redis-slave-2005841000-phfv9 1/1 Running 0 1h + mongo-1068406935-3lswp 1/1 Running 0 1h ``` @@ -330,20 +283,18 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 1. 모든 파드, 디플로이먼트, 서비스를 삭제하기 위해 아래 명령어를 실행한다. ```shell - kubectl delete deployment -l app=redis - kubectl delete service -l app=redis - kubectl delete deployment -l app=guestbook - kubectl delete service -l app=guestbook + kubectl delete deployment -l app.kubernetes.io/name=mongo + kubectl delete service -l app.kubernetes.io/name=mongo + kubectl delete deployment -l app.kubernetes.io/name=guestbook + kubectl delete service -l app.kubernetes.io/name=guestbook ``` 결과는 아래와 같은 형태로 나타난다. ``` - deployment.apps "redis-master" deleted - deployment.apps "redis-slave" deleted - service "redis-master" deleted - service "redis-slave" deleted - deployment.apps "frontend" deleted + deployment.apps "mongo" deleted + service "mongo" deleted + deployment.apps "frontend" deleted service "frontend" deleted ``` @@ -363,7 +314,6 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 ## {{% heading "whatsnext" %}} -* [ELK 로깅과 모니터링](/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk/)을 방명록 애플리케이션에 추가하기 * [쿠버네티스 기초](/ko/docs/tutorials/kubernetes-basics/) 튜토리얼을 완료 * [MySQL과 Wordpress을 위한 퍼시스턴트 볼륨](/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/#visit-your-new-wordpress-blog)을 사용하여 블로그 생성하는데 쿠버네티스 이용하기 * [애플리케이션 접속](/ko/docs/concepts/services-networking/connect-applications-service/)에 대해 더 알아보기 diff --git a/content/ko/examples/application/guestbook/frontend-deployment.yaml b/content/ko/examples/application/guestbook/frontend-deployment.yaml index 23d64be6442cc..613c654aa97b3 100644 --- a/content/ko/examples/application/guestbook/frontend-deployment.yaml +++ b/content/ko/examples/application/guestbook/frontend-deployment.yaml @@ -3,22 +3,24 @@ kind: Deployment metadata: name: frontend labels: - app: guestbook + app.kubernetes.io/name: guestbook + app.kubernetes.io/component: frontend spec: selector: matchLabels: - app: guestbook - tier: frontend + app.kubernetes.io/name: guestbook + app.kubernetes.io/component: frontend replicas: 3 template: metadata: labels: - app: guestbook - tier: frontend + app.kubernetes.io/name: guestbook + app.kubernetes.io/component: frontend spec: containers: - - name: php-redis - image: gcr.io/google-samples/gb-frontend:v4 + - name: guestbook + image: paulczar/gb-frontend:v5 + # image: gcr.io/google-samples/gb-frontend:v4 resources: requests: cpu: 100m @@ -26,13 +28,5 @@ spec: env: - name: GET_HOSTS_FROM value: dns - # Using `GET_HOSTS_FROM=dns` requires your cluster to - # provide a dns service. As of Kubernetes 1.3, DNS is a built-in - # service launched automatically. However, if the cluster you are using - # does not have a built-in DNS service, you can instead - # access an environment variable to find the master - # service's host. To do so, comment out the 'value: dns' line above, and - # uncomment the line below: - # value: env ports: - containerPort: 80 diff --git a/content/ko/examples/application/guestbook/frontend-service.yaml b/content/ko/examples/application/guestbook/frontend-service.yaml index 6f283f347b93f..34ad3771d755f 100644 --- a/content/ko/examples/application/guestbook/frontend-service.yaml +++ b/content/ko/examples/application/guestbook/frontend-service.yaml @@ -3,16 +3,14 @@ kind: Service metadata: name: frontend labels: - app: guestbook - tier: frontend + app.kubernetes.io/name: guestbook + app.kubernetes.io/component: frontend spec: - # comment or delete the following line if you want to use a LoadBalancer - type: NodePort # if your cluster supports it, uncomment the following to automatically create # an external load-balanced IP for the frontend service. # type: LoadBalancer ports: - port: 80 selector: - app: guestbook - tier: frontend + app.kubernetes.io/name: guestbook + app.kubernetes.io/component: frontend diff --git a/content/ko/examples/application/guestbook/mongo-deployment.yaml b/content/ko/examples/application/guestbook/mongo-deployment.yaml new file mode 100644 index 0000000000000..04908ce25b1dc --- /dev/null +++ b/content/ko/examples/application/guestbook/mongo-deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongo + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend +spec: + selector: + matchLabels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend + spec: + containers: + - name: mongo + image: mongo:4.2 + args: + - --bind_ip + - 0.0.0.0 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 27017 diff --git a/content/ko/examples/application/guestbook/mongo-service.yaml b/content/ko/examples/application/guestbook/mongo-service.yaml new file mode 100644 index 0000000000000..b9cef607bcf79 --- /dev/null +++ b/content/ko/examples/application/guestbook/mongo-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: mongo + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend +spec: + ports: + - port: 27017 + targetPort: 27017 + selector: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend diff --git a/content/ko/examples/application/guestbook/redis-master-deployment.yaml b/content/ko/examples/application/guestbook/redis-master-deployment.yaml deleted file mode 100644 index 478216d1accfa..0000000000000 --- a/content/ko/examples/application/guestbook/redis-master-deployment.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis-master - labels: - app: redis -spec: - selector: - matchLabels: - app: redis - role: master - tier: backend - replicas: 1 - template: - metadata: - labels: - app: redis - role: master - tier: backend - spec: - containers: - - name: master - image: k8s.gcr.io/redis:e2e # or just image: redis - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 diff --git a/content/ko/examples/application/guestbook/redis-master-service.yaml b/content/ko/examples/application/guestbook/redis-master-service.yaml deleted file mode 100644 index 65cef2191c493..0000000000000 --- a/content/ko/examples/application/guestbook/redis-master-service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: redis-master - labels: - app: redis - role: master - tier: backend -spec: - ports: - - name: redis - port: 6379 - targetPort: 6379 - selector: - app: redis - role: master - tier: backend diff --git a/content/ko/examples/application/guestbook/redis-slave-deployment.yaml b/content/ko/examples/application/guestbook/redis-slave-deployment.yaml deleted file mode 100644 index 1a7b04386a4a5..0000000000000 --- a/content/ko/examples/application/guestbook/redis-slave-deployment.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis-slave - labels: - app: redis -spec: - selector: - matchLabels: - app: redis - role: slave - tier: backend - replicas: 2 - template: - metadata: - labels: - app: redis - role: slave - tier: backend - spec: - containers: - - name: slave - image: gcr.io/google_samples/gb-redisslave:v3 - resources: - requests: - cpu: 100m - memory: 100Mi - env: - - name: GET_HOSTS_FROM - value: dns - # Using `GET_HOSTS_FROM=dns` requires your cluster to - # provide a dns service. As of Kubernetes 1.3, DNS is a built-in - # service launched automatically. However, if the cluster you are using - # does not have a built-in DNS service, you can instead - # access an environment variable to find the master - # service's host. To do so, comment out the 'value: dns' line above, and - # uncomment the line below: - # value: env - ports: - - containerPort: 6379 diff --git a/content/ko/examples/application/guestbook/redis-slave-service.yaml b/content/ko/examples/application/guestbook/redis-slave-service.yaml deleted file mode 100644 index 238fd63fb6a29..0000000000000 --- a/content/ko/examples/application/guestbook/redis-slave-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: redis-slave - labels: - app: redis - role: slave - tier: backend -spec: - ports: - - port: 6379 - selector: - app: redis - role: slave - tier: backend diff --git a/content/ko/examples/application/job/cronjob.yaml b/content/ko/examples/application/job/cronjob.yaml index 3ca130289e27f..da905a9048c57 100644 --- a/content/ko/examples/application/job/cronjob.yaml +++ b/content/ko/examples/application/job/cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: hello @@ -12,7 +12,7 @@ spec: - name: hello image: busybox imagePullPolicy: IfNotPresent - args: + command: - /bin/sh - -c - date; echo Hello from the Kubernetes cluster diff --git a/content/ko/examples/application/zookeeper/zookeeper.yaml b/content/ko/examples/application/zookeeper/zookeeper.yaml index a858a72613d5f..4d893b369bde4 100644 --- a/content/ko/examples/application/zookeeper/zookeeper.yaml +++ b/content/ko/examples/application/zookeeper/zookeeper.yaml @@ -27,7 +27,7 @@ spec: selector: app: zk --- -apiVersion: policy/v1beta1 +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: zk-pdb diff --git a/content/ko/examples/pods/config/example-redis-config.yaml b/content/ko/examples/pods/config/example-redis-config.yaml new file mode 100644 index 0000000000000..5b093b12136b8 --- /dev/null +++ b/content/ko/examples/pods/config/example-redis-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-redis-config +data: + redis-config: | + maxmemory 2mb + maxmemory-policy allkeys-lru diff --git a/content/ko/examples/policy/priority-class-resourcequota.yaml b/content/ko/examples/policy/priority-class-resourcequota.yaml new file mode 100644 index 0000000000000..7350d00c8f397 --- /dev/null +++ b/content/ko/examples/policy/priority-class-resourcequota.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: pods-cluster-services +spec: + scopeSelector: + matchExpressions: + - operator : In + scopeName: PriorityClass + values: ["cluster-services"] \ No newline at end of file diff --git a/content/ko/examples/service/networking/namespaced-params.yaml b/content/ko/examples/service/networking/namespaced-params.yaml new file mode 100644 index 0000000000000..dd567247874f4 --- /dev/null +++ b/content/ko/examples/service/networking/namespaced-params.yaml @@ -0,0 +1,12 @@ +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: external-lb +spec: + controller: example.com/ingress-controller + parameters: + apiGroup: k8s.example.com + kind: IngressParameters + name: external-lb + namespace: external-configuration + scope: Namespace diff --git a/content/pl/docs/concepts/overview/kubernetes-api.md b/content/pl/docs/concepts/overview/kubernetes-api.md index cd376dab1dcd9..731b22cacc156 100644 --- a/content/pl/docs/concepts/overview/kubernetes-api.md +++ b/content/pl/docs/concepts/overview/kubernetes-api.md @@ -4,7 +4,7 @@ content_type: concept weight: 30 description: > API Kubernetesa służy do odpytywania i zmiany stanu obiektów Kubernetesa. - Sercem warstwy sterowania Kubernetesa jest serwer API i udostępniane przez niego HTTP API. Przez ten serwer odbywa się komunikacja pomiędzy użytkownikami, różnymi częściami składowymi klastra oraz komponentami zewnętrznymi. + Sercem warstwy sterowania Kubernetesa jest serwer API i udostępniane po HTTP API. Przez ten serwer odbywa się komunikacja pomiędzy użytkownikami, różnymi częściami składowymi klastra oraz komponentami zewnętrznymi. card: name: concepts weight: 30 @@ -14,13 +14,16 @@ card: Sercem {{< glossary_tooltip text="warstwy sterowania" term_id="control-plane" >}} Kubernetes jest {{< glossary_tooltip text="serwer API" term_id="kube-apiserver" >}}. Serwer udostępnia -API poprzez HTTP, umożliwiając wzajemną komunikację pomiędzy użytkownikami, częściami składowymi klastra i komponentami zewnętrznymi. +API poprzez HTTP, umożliwiając wzajemną komunikację pomiędzy użytkownikami, częściami składowymi klastra +i komponentami zewnętrznymi. -API Kubernetes pozwala na sprawdzanie i zmianę stanu obiektów (przykładowo: pody, _Namespaces_, _ConfigMaps_, _Events_). +API Kubernetesa pozwala na sprawdzanie i zmianę stanu obiektów +(przykładowo: pody, _Namespaces_, _ConfigMaps_, _Events_). Większość operacji może zostać wykonana poprzez interfejs linii komend (CLI) [kubectl](/docs/reference/kubectl/overview/) lub inne -programy, takie jak [kubeadm](/docs/reference/setup-tools/kubeadm/), które używają +programy, takie jak +[kubeadm](/docs/reference/setup-tools/kubeadm/), które używają API. Możesz też korzystać z API bezpośrednio przez wywołania typu REST. Jeśli piszesz aplikację używającą API Kubernetesa, @@ -66,54 +69,77 @@ Aby wybrać format odpowiedzi, użyj nagłówków żądania zgodnie z tabelą: -W Kubernetesie zaimplementowany jest alternatywny format serializacji na potrzeby API oparty o Protobuf, -który jest przede wszystkim przeznaczony na potrzeby wewnętrznej komunikacji w klastrze -i opisany w [design proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/protobuf.md). -Pliki IDL dla każdego ze schematów można znaleźć w pakietach Go, które definiują obiekty API. +W Kubernetesie zaimplementowany jest alternatywny format serializacji na potrzeby API oparty o +Protobuf, który jest przede wszystkim przeznaczony na potrzeby wewnętrznej komunikacji w klastrze. +Więcej szczegółów znajduje się w dokumencie [Kubernetes Protobuf serialization](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/protobuf.md). +oraz w plikach *Interface Definition Language* (IDL) dla każdego ze schematów +zamieszczonych w pakietach Go, które definiują obiekty API. -## Zmiany API +## Przechowywanie stanu -Z naszego doświadczenia wynika, że każdy system, który odniósł sukces, musi się nieustająco rozwijać w miarę zmieniających się potrzeb. -Dlatego Kubernetes został tak zaprojektowany, aby API mogło się zmieniać i rozrastać. -Projekt Kubernetes dąży do tego, aby nie wprowadzać zmian niezgodnych z istniejącymi aplikacjami klienckimi -i utrzymywać zgodność przez wystarczająco długi czas, aby inne projekty zdążyły się dostosować do zmian. - -W ogólności, nowe zasoby i pola definiujące zasoby API są dodawane stosunkowo często. Usuwanie zasobów lub pól -jest regulowane przez [API deprecation policy](/docs/reference/using-api/deprecation-policy/). -Definicja zmiany zgodnej (kompatybilnej) oraz metody wprowadzania zmian w API opisano w szczegółach -w [API change document](https://git.k8s.io/community/contributors/devel/sig-architecture/api_changes.md). +Kubernetes przechowuje serializowany stan swoich obiektów w +{{< glossary_tooltip term_id="etcd" >}}. ## Grupy i wersje API Aby ułatwić usuwanie poszczególnych pól lub restrukturyzację reprezentacji zasobów, Kubernetes obsługuje -równocześnie wiele wersji API, każde poprzez osobną ścieżkę API, na przykład: `/api/v1` lub -`/apis/rbac.authorization.k8s.io/v1alpha1`. +równocześnie wiele wersji API, każde poprzez osobną ścieżkę API, +na przykład: `/api/v1` lub `/apis/rbac.authorization.k8s.io/v1alpha1`. -Rozdział wersji wprowadzony jest na poziomie całego API, a nie na poziomach poszczególnych zasobów lub pól, aby być pewnym, -że API odzwierciedla w sposób przejrzysty i spójny zasoby systemowe i ich zachowania i pozwala -na kontrolowany dostęp do tych API, które są w fazie wycofywania lub fazie eksperymentalnej. +Rozdział wersji wprowadzony jest na poziomie całego API, a nie na poziomach poszczególnych zasobów lub pól, +aby być pewnym, że API odzwierciedla w sposób przejrzysty i spójny zasoby systemowe +i ich zachowania oraz pozwala na kontrolowany dostęp do tych API, które są w fazie wycofywania +lub fazie eksperymentalnej. -Aby ułatwić rozbudowę API Kubernetes, wprowadziliśmy [*grupy API*](https://git.k8s.io/community/contributors/design-proposals/api-machinery/api-group.md), -które mogą być [włączane i wyłączane](/docs/reference/using-api/#enabling-or-disabling). +Aby ułatwić rozbudowę API Kubernetes, wprowadziliśmy +[*grupy API*](https://git.k8s.io/community/contributors/design-proposals/api-machinery/api-group.md), które mogą +być [włączane i wyłączane](/docs/reference/using-api/#enabling-or-disabling). Zasoby API są rozróżniane poprzez przynależność do grupy API, typ zasobu, przestrzeń nazw (_namespace_, -o ile ma zastosowanie) oraz nazwę. Serwer API może obsługiwać -te same dane poprzez różne wersje API i przeprowadzać konwersję między -różnymi wersjami API w sposób niewidoczny dla użytkownika. Wszystkie te różne wersje -reprezentują w rzeczywistości ten sam zasób. Załóżmy przykładowo, że istnieją dwie -wersje `v1` i `v1beta1` tego samego zasobu. Obiekt utworzony przez -wersję `v1beta1` może być odczytany, zaktualizowany i skasowany zarówno przez wersję +o ile ma zastosowanie) oraz nazwę. Serwer API może przeprowadzać konwersję między +różnymi wersjami API w sposób niewidoczny dla użytkownika: wszystkie te różne wersje +reprezentują w rzeczywistości ten sam zasób. Serwer API może udostępniać te same dane +poprzez kilka różnych wersji API. + +Załóżmy przykładowo, że istnieją dwie wersje `v1` i `v1beta1` tego samego zasobu. +Obiekt utworzony przez wersję `v1beta1` może być odczytany, +zaktualizowany i skasowany zarówno przez wersję `v1beta1`, jak i `v1`. +## Trwałość API + +Z naszego doświadczenia wynika, że każdy system, który odniósł sukces, musi się nieustająco rozwijać w miarę zmieniających się potrzeb. +Dlatego Kubernetes został tak zaprojektowany, aby API mogło się zmieniać i rozrastać. +Projekt Kubernetes dąży do tego, aby nie wprowadzać zmian niezgodnych z istniejącymi aplikacjami klienckimi +i utrzymywać zgodność przez wystarczająco długi czas, aby inne projekty zdążyły się dostosować do zmian. + +W ogólności, nowe zasoby i pola definiujące zasoby API są dodawane stosunkowo często. +Usuwanie zasobów lub pól jest regulowane przez +[API deprecation policy](/docs/reference/using-api/deprecation-policy/). + +Po osiągnięciu przez API statusu ogólnej dostępności (_general availability_ - GA), +oznaczanej zazwyczaj jako wersja API `v1`, bardzo zależy nam na utrzymaniu jej zgodności w kolejnych wydaniach. +Kubernetes utrzymuje także zgodność dla wersji _beta_ API tam, gdzie jest to możliwe: +jeśli zdecydowałeś się używać API w wersji beta, możesz z niego korzystać także później, +kiedy dana funkcjonalność osiągnie status stabilnej. + +{{< note >}} +Mimo, że Kubernetes stara się także zachować zgodność dla API w wersji _alpha_, zdarzają się przypadki, +kiedy nie jest to możliwe. Jeśli korzystasz z API w wersji alfa, przed aktualizacją klastra do nowej wersji +zalecamy sprawdzenie w informacjach o wydaniu, czy nie nastąpiła jakaś zmiana w tej części API. +{{< /note >}} + Zajrzyj do [API versions reference](/docs/reference/using-api/#api-versioning) -po szczegółowe informacje, jak definiuje się poziomy wersji API. +po szczegółowe definicje różnych poziomów wersji API. + + ## Rozbudowa API -API Kubernetesa można rozbudowywać (rozszerzać) na dwa sposoby: +API Kubernetesa można rozszerzać na dwa sposoby: -1. [Definicje zasobów własnych](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) - pozwalają deklaratywnie określać, jak serwer API powinien dostarczać wybrane zasoby API. +1. [Definicje zasobów własnych (_custom resources_)](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) + pozwalają deklaratywnie określać, jak serwer API powinien dostarczać wybrane przez Ciebie zasoby API. 1. Można także rozszerzać API Kubernetesa implementując [warstwę agregacji](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). @@ -121,6 +147,9 @@ API Kubernetesa można rozbudowywać (rozszerzać) na dwa sposoby: - Naucz się, jak rozbudowywać API Kubernetesa poprzez dodawanie własnych [CustomResourceDefinition](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). -- [Controlling API Access](/docs/reference/access-authn-authz/controlling-access/) opisuje +- [Controlling Access To The Kubernetes API](/docs/concepts/security/controlling-access/) opisuje sposoby, jakimi klaster zarządza dostępem do API. -- Punkty dostępowe API _(endpoints)_, typy zasobów i przykłady zamieszczono w [API Reference](/docs/reference/kubernetes-api/). +- Punkty dostępowe API _(endpoints)_, typy zasobów i przykłady zamieszczono w + [API Reference](/docs/reference/kubernetes-api/). +- Aby dowiedzieć się, jaki rodzaj zmian można określić jako zgodne i jak zmieniać API, zajrzyj do + [API changes](https://git.k8s.io/community/contributors/devel/sig-architecture/api_changes.md#readme). diff --git a/content/pl/docs/concepts/overview/what-is-kubernetes.md b/content/pl/docs/concepts/overview/what-is-kubernetes.md index db8ea18b7081c..d28c84155316c 100644 --- a/content/pl/docs/concepts/overview/what-is-kubernetes.md +++ b/content/pl/docs/concepts/overview/what-is-kubernetes.md @@ -42,7 +42,7 @@ Kontenery działają w sposób zbliżony do maszyn wirtualnych, ale mają mniejs Kontenery zyskały popularność ze względu na swoje zalety, takie jak: * Szybkość i elastyczność w tworzeniu i instalacji aplikacji: obraz kontenera buduje się łatwiej niż obraz VM. -* Ułatwienie ciągłego rozwoju, integracji oraz wdrażania aplikacji (*Continuous development, integration, and deployment*): obrazy kontenerów mogą być budowane w sposób wiarygodny i częsty. Wycofanie zmian jest łatwe i szybkie (ponieważ obrazy są niezmienne). +* Ułatwienie ciągłego rozwoju, integracji oraz wdrażania aplikacji (*Continuous development, integration, and deployment*): obrazy kontenerów mogą być budowane w sposób wiarygodny i częsty. Wycofywanie zmian jest skuteczne i szybkie (ponieważ obrazy są niezmienne). * Rozdzielenie zadań *Dev* i *Ops*: obrazy kontenerów powstają w fazie *build/release*, oddzielając w ten sposób aplikacje od infrastruktury. * Obserwowalność obejmuje nie tylko informacje i metryki z poziomu systemu operacyjnego, ale także poprawność działania samej aplikacji i inne sygnały. * Spójność środowiska na etapach rozwoju oprogramowania, testowania i działania w trybie produkcyjnym: działa w ten sam sposób na laptopie i w chmurze. diff --git a/content/pl/docs/reference/_index.md b/content/pl/docs/reference/_index.md index bfc120218cf45..fd4ab2bff3bb3 100644 --- a/content/pl/docs/reference/_index.md +++ b/content/pl/docs/reference/_index.md @@ -8,13 +8,14 @@ content_type: concept -Tutaj znajdziesz dokumentację źródłową Kubernetes. +Tutaj znajdziesz dokumentację źródłową Kubernetesa. ## Dokumentacja API -* [Dokumentacja źródłowa API Kubernetesa {{< latest-version >}}](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/) +* [Kubernetes API Reference](/docs/reference/kubernetes-api/) +* [One-page API Reference for Kubernetes {{< param "version" >}}](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) * [Using The Kubernetes API](/docs/reference/using-api/) - ogólne informacje na temat API Kubernetesa. ## Biblioteki klientów API @@ -32,7 +33,7 @@ biblioteki to: * [kubectl](/docs/reference/kubectl/overview/) - Główne narzędzie tekstowe (linii poleceń) do zarządzania klastrem Kubernetes. * [JSONPath](/docs/reference/kubectl/jsonpath/) - Podręcznik składni [wyrażeń JSONPath](https://goessner.net/articles/JsonPath/) dla kubectl. -* [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) - Narzędzie tekstowe do łatwego budowania klastra Kubernetes spełniającego niezbędne wymogi bezpieczeństwa. +* [kubeadm](/docs/reference/setup-tools/kubeadm/) - Narzędzie tekstowe do łatwego budowania klastra Kubernetes spełniającego niezbędne wymogi bezpieczeństwa. ## Dokumentacja komponentów diff --git a/content/pl/docs/reference/glossary/kube-apiserver.md b/content/pl/docs/reference/glossary/kube-apiserver.md index 14b58a9ba6d9f..12620387d008b 100755 --- a/content/pl/docs/reference/glossary/kube-apiserver.md +++ b/content/pl/docs/reference/glossary/kube-apiserver.md @@ -4,7 +4,7 @@ id: kube-apiserver date: 2018-04-12 full_link: /docs/concepts/overview/components/#kube-apiserver short_description: > - Składnik warstwy sterowania udostępniający API Kubernetes. + Składnik warstwy sterowania udostępniający API Kubernetesa. aka: - kube-apiserver @@ -12,13 +12,12 @@ tags: - architecture - fundamental --- - Składnik *master* udostępniający API Kubernetes. Służy jako *front-end* dla warstwy sterowania Kubernetes. - Serwer API jest składnikiem -{{< glossary_tooltip text="warstwy sterowania" term_id="control-plane" >}} Kubernetes, który udostępnia API. +Serwer API jest składnikiem +{{< glossary_tooltip text="warstwy sterowania" term_id="control-plane" >}} Kubernetesa, który udostępnia API. Server API służy jako front-end warstwy sterowania Kubernetes. -Podstawowa implementacją serwera API Kubernetes jest [kube-apiserver](/docs/reference/generated/kube-apiserver/). +Podstawową implementacją serwera API Kubernetesa jest [kube-apiserver](/docs/reference/generated/kube-apiserver/). kube-apiserver został zaprojektowany w taki sposób, aby móc skalować się horyzontalnie — to oznacza, że zwiększa swoją wydajność poprzez dodawanie kolejnych instancji. Można uruchomić kilka instancji kube-apiserver i rozkładać między nimi ruch od klientów. diff --git a/content/pl/docs/reference/tools.md b/content/pl/docs/reference/tools.md index 5d60370ee323d..2ec66964ed70a 100644 --- a/content/pl/docs/reference/tools.md +++ b/content/pl/docs/reference/tools.md @@ -18,7 +18,7 @@ Kubernetes zawiera różne wbudowane narzędzia służące do pracy z systemem: ## Minikube -[`minikube`](https://minikube.sigs.k8s.io/docs/) to narzędzie do łatwego uruchamiania lokalnego klastra Kubernetes na twojej stacji roboczej na potrzeby rozwoju oprogramowania lub prowadzenia testów. +[`minikube`](https://minikube.sigs.k8s.io/docs/) to narzędzie do uruchamiania jednowęzłowego klastra Kubernetes na twojej stacji roboczej na potrzeby rozwoju oprogramowania lub prowadzenia testów. ## Pulpit *(Dashboard)* diff --git a/content/pl/docs/tutorials/_index.md b/content/pl/docs/tutorials/_index.md index e9f8ed32d83e0..c55fd9c3ff458 100644 --- a/content/pl/docs/tutorials/_index.md +++ b/content/pl/docs/tutorials/_index.md @@ -32,7 +32,7 @@ Przed zapoznaniem się z samouczkami warto stworzyć zakładkę do * [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/) -* [Example: Deploying PHP Guestbook application with Redis](/docs/tutorials/stateless-application/guestbook/) +* [Example: Deploying PHP Guestbook application with MongoDB](/docs/tutorials/stateless-application/guestbook/) ## Aplikacje stanowe *(Stateful Applications)* diff --git a/content/pl/docs/tutorials/kubernetes-basics/_index.html b/content/pl/docs/tutorials/kubernetes-basics/_index.html index 39d8bf63c9df5..e27a3ad6bfa7a 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/_index.html +++ b/content/pl/docs/tutorials/kubernetes-basics/_index.html @@ -41,7 +41,7 @@

    Podstawy Kubernetesa

    Co Kubernetes może dla Ciebie zrobić?

    -

    Użytkownicy oczekują od współczesnych serwisów internetowych dostępności non-stop, a deweloperzy chcą móc instalować nowe wersje swoich serwisów kilka razy dziennie. Używając kontenerów można przygotowywać oprogramowanie w taki sposób, aby mogło być instalowane i aktualizowane łatwo i nie powodując żadnych przestojów. Kubernetes pomaga uruchamiać te aplikacje w kontenerach tam, gdzie chcesz i kiedy chcesz i znajdować niezbędne zasoby i narzędzia wymagane do ich pracy. Kubernetes może działać w środowiskach produkcyjnych, jest otwartym oprogramowaniem zaprojektowanym z wykorzystaniem nagromadzonego przez Google doświadczenia w zarządzaniu kontenerami, w połączeniu z najcenniejszymi ideami społeczności.

    +

    Użytkownicy oczekują od współczesnych serwisów internetowych dostępności non-stop, a deweloperzy chcą móc instalować nowe wersje swoich serwisów kilka razy dziennie. Używając kontenerów można przygotowywać oprogramowanie w taki sposób, aby mogło być instalowane i aktualizowane nie powodując żadnych przestojów. Kubernetes pomaga uruchamiać te aplikacje w kontenerach tam, gdzie chcesz i kiedy chcesz i znajdować niezbędne zasoby i narzędzia wymagane do ich pracy. Kubernetes może działać w środowiskach produkcyjnych, jest otwartym oprogramowaniem zaprojektowanym z wykorzystaniem nagromadzonego przez Google doświadczenia w zarządzaniu kontenerami, w połączeniu z najcenniejszymi ideami społeczności.

    diff --git a/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 5b90420c5a712..c879aa82b9cb0 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -91,9 +91,7 @@

    Instalacja pierwszej aplikacji w Kubernetes

    - Na potrzeby pierwszej instalacji użyjesz aplikacji na Node.js zapakowaną w kontener Docker-a. (Jeśli jeszcze nie próbowałeś stworzyć - aplikacji na Node.js i uruchomić za pomocą kontenerów, możesz spróbować teraz, kierując się instrukcjami samouczka - Hello Minikube). + Na potrzeby pierwszej instalacji użyjesz aplikacji hello-node zapakowaną w kontener Docker-a, która korzysta z NGINXa i powtarza wszystkie wysłane do niej zapytania. (Jeśli jeszcze nie próbowałeś stworzyć aplikacji hello-node i uruchomić za pomocą kontenerów, możesz spróbować teraz, kierując się instrukcjami samouczka Hello Minikube).

    Teraz, kiedy wiesz, czym są Deploymenty, przejdźmy do samouczka online, żeby zainstalować naszą pierwszą aplikację!

    diff --git a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html index 4ad9a7be76fab..f9f9134e4a8ae 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -64,12 +64,6 @@

    Sewisy i Etykiety (Labels)

    -
    -
    -

    -
    -
    -

    Serwis kieruje przychodzący ruch do grupy Podów. Serwisy są obiektami abstrakcyjnymi, dzięki którym pody mogą się psuć i być zastępowane przez Kubernetes nowymi bez ujemnego wpływu na działanie twoich aplikacji. Detekcją nowych podów i kierowaniem ruchu pomiędzy zależnymi podami (takimi, jak składowe front-end i back-end w aplikacji) zajmują się Serwisy Kubernetes.

    diff --git a/content/pt/OWNERS b/content/pt-br/OWNERS similarity index 100% rename from content/pt/OWNERS rename to content/pt-br/OWNERS diff --git a/content/pt/_common-resources/index.md b/content/pt-br/_common-resources/index.md similarity index 100% rename from content/pt/_common-resources/index.md rename to content/pt-br/_common-resources/index.md diff --git a/content/pt/_index.html b/content/pt-br/_index.html similarity index 97% rename from content/pt/_index.html rename to content/pt-br/_index.html index 9721bcdd3748d..628047e85ce2f 100644 --- a/content/pt/_index.html +++ b/content/pt-br/_index.html @@ -47,7 +47,7 @@

    Os desafios da migração de mais de 150 micro serviços para o Kubernetes

    - KubeCon em Shanghai em June 24-26, 2019 + KubeCon em Shanghai em Junho, 24-26 de 2019

    @@ -57,4 +57,4 @@

    Os desafios da migração de mais de 150 micro serviços para o Kubernetes}} -{{< blocks/case-studies >}} \ No newline at end of file +{{< blocks/case-studies >}} diff --git a/content/pt/blog/_index.md b/content/pt-br/blog/_index.md similarity index 100% rename from content/pt/blog/_index.md rename to content/pt-br/blog/_index.md diff --git a/content/pt-br/blog/_posts/2020-09-02-scaling-kubernetes-networking-endpointslices.md b/content/pt-br/blog/_posts/2020-09-02-scaling-kubernetes-networking-endpointslices.md new file mode 100644 index 0000000000000..7440689d5f75d --- /dev/null +++ b/content/pt-br/blog/_posts/2020-09-02-scaling-kubernetes-networking-endpointslices.md @@ -0,0 +1,47 @@ +--- +layout: blog +title: 'Escalando a rede do Kubernetes com EndpointSlices' +date: 2020-09-02 +slug: scaling-kubernetes-networking-with-endpointslices +--- + +**Autor:** Rob Scott (Google) + +EndpointSlices é um novo tipo de API que provê uma alternativa escalável e extensível à API de Endpoints. EndpointSlices mantém o rastreio dos endereços IP, portas, informações de topologia e prontidão de Pods que compõem um serviço. + +No Kubernetes 1.19 essa funcionalidade está habilitada por padrão, com o kube-proxy lendo os [EndpointSlices](/docs/concepts/services-networking/endpoint-slices/) ao invés de Endpoints. Apesar de isso ser uma mudança praticamente transparente, resulta numa melhoria notável de escalabilidade em grandes clusters. Também permite a adição de novas funcionalidades em releases futuras do Kubernetes, como o [Roteamento baseado em topologia.](/docs/concepts/services-networking/service-topology/). + +## Limitações de escalabilidade da API de Endpoints +Na API de Endpoints, existia apenas um recurso de Endpoint por serviço (Service). Isso significa que +era necessário ser possível armazenar endereços IPs e portas para cada Pod que compunha o serviço correspondente. Isso resultava em recursos imensos de API. Para piorar, o kube-proxy rodava em cada um dos nós e observava qualquer alteração nos recursos de Endpoint. Mesmo que fosse uma simples mudança em um Endpoint, todo o objeto precisava ser enviado para cada uma das instâncias do kube-proxy. + +Outra limitação da API de Endpoints era que ela limitava o número de objetos que podiam ser associados a um _Service_. O tamanho padrão de um objeto armazenado no etcd é 1.5MB. Em alguns casos, isso poderia limitar um Endpoint a 5,000 IPs de Pod. Isso não chega a ser um problema para a maioria dos usuários, mas torna-se um problema significativo para serviços que se aproximem desse tamanho. + +Para demonstrar o quão significante se torna esse problema em grande escala, vamos usar de um simples exemplo: Imagine um _Service_ que possua 5,000 Pods, e que possa causar o Endpoint a ter 1.5Mb . Se apenas um Endpoint nessa lista sofra uma alteração, todo o objeto de Endpoint precisará ser redistribuído para cada um dos nós do cluster. Em um cluster com 3.000 nós, essa atualização causará o envio de 4.5Gb de dados (1.5Mb de Endpoints * 3,000 nós) para todo o cluster. Isso é quase que o suficiente para encher um DVD, e acontecerá para cada mudança de Endpoint. Agora imagine uma atualização gradual em um _Deployment_ que resulte nos 5,000 Pods serem substituídos - isso é mais que 22Tb (ou 5,000 DVDs) de dados transferidos. + +## Dividindo os endpoints com a API de EndpointSlice +A API de EndpointSlice foi desenhada para resolver esse problema com um modelo similar de _sharding_. Ao invés de rastrar todos os IPs dos Pods para um _Service_, com um único recurso de Endpoint, nós dividimos eles em múltiplos EndpointSlices menores. + +Usemos por exemplo um serviço com 15 pods. Nós teríamos um único recurso de Endpoints referente a todos eles. Se o EndpointSlices for configurado para armazenar 5 _endpoints_ cada, nós teríamos 3 EndpointSlices diferentes: +![EndpointSlices](/images/blog/2020-09-02-scaling-kubernetes-networking-endpointslices/endpoint-slices.png) + +Por padrão, o EndpointSlices armazena um máximo de 100 _endpoints_ cada, podendo isso ser configurado com a flag `--max-endpoints-per-slice` no kube-controller-manager. + +## EndpointSlices provê uma melhoria de escalabilidade em 10x +Essa API melhora dramaticamente a escalabilidade da rede. Agora quando um Pod é adicionado ou removido, apenas 1 pequeno EndpointSlice necessita ser atualizado. Essa diferença começa a ser notada quando centenas ou milhares de Pods compõem um único _Service_. + +Mais significativo, agora que todos os IPs de Pods para um _Service_ não precisam ser armazenados em um único recurso, nós não precisamos nos preocupar com o limite de tamanho para objetos armazendos no etcd. EndpointSlices já foram utilizados para escalar um serviço além de 100,000 endpoints de rede. + +Tudo isso é possível com uma melhoria significativa de performance feita no kube-proxy. Quando o EndpointSlices é usado em grande escala, muito menos dados serão transferidos para as atualizações de endpoints e o kube-proxy torna-se mais rápido para atualizar regras do iptables ou do ipvs. Além disso, os _Services_ podem escalar agora para pelo menos 10x mais além dos limites anteriores. + +## EndpointSlices permitem novas funcionalidades +Introduzido como uma funcionalidade alpha no Kubernetes v1.16, os EndpointSlices foram construídos para permitir algumas novas funcionalidades arrebatadoras em futuras versões do Kubernetes. Isso inclui serviços dual-stack, roteamento baseado em topologia e subconjuntos de _endpoints_. + +Serviços Dual-stack são uma nova funcionalidade que foi desenvolvida juntamente com o EndpointSlices. Eles irão utilizar simultâneamente endereços IPv4 e IPv6 para serviços, e dependem do campo addressType do Endpointslices para conter esses novos tipos de endereço por família de IP. + +O roteamento baseado por topologia irá atualizar o kube-proxy para dar preferência no roteamento de requisições para a mesma região ou zona, utilizando-se de campos de topologia armazenados em cada endpoint dentro de um EndpointSlice. Como uma melhoria futura disso, estamos explorando o potencial de subconjuntos de endpoint. Isso irá permitir o kube-proxy apenas observar um subconjunto de EndpointSlices. Por exemplo, isso pode ser combinado com o roteamento baseado em topologia e assim, o kube-proxy precisará observar apenas EndpointSlices contendo _endpoints_ na mesma zona. Isso irá permitir uma outra melhoria significativa de escalabilidade. + +## O que isso significa para a API de Endpoints? +Apesar da API de EndpointSlice prover uma alternativa nova e escalável à API de Endpoints, a API de Endpoints continuará a ser considerada uma funcionalidade estável. A mudança mais significativa para a API de Endpoints envolve começar a truncar Endpoints que podem causar problemas de escalabilidade. + +A API de Endpoints não será removida, mas muitas novas funcionalidades irão depender da nova API EndpointSlice. Para obter vantágem da funcionalidade e escalabilidade que os EndpointSlices provém, aplicações que hoje consomem a API de Endpoints devem considerar suportar EndpointSlices no futuro. diff --git a/content/pt-br/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md b/content/pt-br/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md new file mode 100644 index 0000000000000..ada16762a00fb --- /dev/null +++ b/content/pt-br/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md @@ -0,0 +1,45 @@ +--- +layout: blog +title: "Não entre em pânico: Kubernetes e Docker" +date: 2020-12-02 +slug: dont-panic-kubernetes-and-docker +--- + +**Autores / Autoras**: Jorge Castro, Duffie Cooley, Kat Cosgrove, Justin Garrison, Noah Kantrowitz, Bob Killen, Rey Lejano, Dan “POP” Papandrea, Jeffrey Sica, Davanum “Dims” Srinivas + +**Tradução:** João Brito + +Kubernetes está [deixando de usar Docker](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md#deprecation) como seu agente de execução após a versão v1.20. + +**Não entre em pânico. Não é tão dramático quanto parece.** + +TL;DR Docker como um agente de execução primário está sendo deixado de lado em favor de agentes de execução que utilizam a Interface de Agente de Execução de Containers (Container Runtime Interface "CRI") criada para o Kubernetes. As imagens criadas com o Docker continuarão a funcionar em seu cluster com os agentes atuais, como sempre estiveram. + +Se você é um usuário final de Kubernetes, quase nada mudará para você. Isso não significa a morte do Docker, e isso não significa que você não pode, ou não deva, usar ferramentas Docker em desenvolvimento mais. Docker ainda é uma ferramenta útil para a construção de containers, e as imagens resultantes de executar `docker build` ainda rodarão em seu cluster Kubernetes. + +Se você está usando um Kubernetes gerenciado como GKE, EKS, ou AKS (que usa como [padrão containerd](https://github.com/Azure/AKS/releases/tag/2020-11-16)) você precisará ter certeza que seus nós estão usando um agente de execução de container suportado antes que o suporte ao Docker seja removido nas versões futuras do Kubernetes. Se você tem mudanças em seus nós, talvez você precise atualizá-los baseado em seu ambiente e necessidades do agente de execução. + +Se você está rodando seus próprios clusters, você também precisa fazer mudanças para evitar quebras em seu cluster. Na versão v1.20, você terá o aviso de alerta da perda de suporte ao Docker. Quando o suporte ao agente de execução do Docker for removido em uma versão futura (atualmente planejado para a versão 1.22 no final de 2021) do Kubernetes ele não será mais suportado e você precisará trocar para um dos outros agentes de execução de container compatível, como o containerd ou CRI-O. Mas tenha certeza que esse agente de execução escolhido tenha suporte às configurações do daemon do Docker usadas atualmente (Ex.: logs) + +## Então porque a confusão e toda essa turma surtando? + +Estamos falando aqui de dois ambientes diferentes, e isso está criando essa confusão. Dentro do seu cluster Kubernetes, existe uma coisa chamada de agente de execução de container que é responsável por baixar e executar as imagens de seu container. Docker é a escolha popular para esse agente de execução (outras escolhas comuns incluem containerd e CRI-O), mas Docker não foi projetado para ser embutido no Kubernetes, e isso causa problemas. + +Se liga, o que chamamos de "Docker" não é exatamente uma coisa - é uma stack tecnológica inteira, e uma parte disso é chamado de "containerd", que é o agente de execução de container de alto-nível por si só. Docker é legal e útil porque ele possui muitas melhorias de experiência do usuário e isso o torna realmente fácil para humanos interagirem com ele enquanto estão desenvolvendo, mas essas melhorias para o usuário não são necessárias para o Kubernetes, pois ele não é humano. + +Como resultado dessa camada de abstração amigável aos humanos, seu cluster Kubernetes precisa usar outra ferramenta chamada Dockershim para ter o que ele realmente precisa, que é o containerd. Isso não é muito bom, porque adiciona outra coisa a ser mantida e que pode quebrar. O que está atualmente acontecendo aqui é que o Dockershim está sendo removido do Kubelet assim que que a versão v1.23 for lançada, que remove o suporte ao Docker como agente de execução de container como resultado. Você deve estar pensando, mas se o containerd está incluso na stack do Docker, porque o Kubernetes precisa do Dockershim? + +Docker não é compatível com CRI, a [Container Runtime Interface](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/) (interface do agente de execução de container). Se fosse, nós não precisaríamos do shim, e isso não seria nenhum problema. Mas isso não é o fim do mundo, e você não precisa entrar em pânico - você só precisa mudar seu agente de execução de container do Docker para um outro suportado. + +Uma coisa a ser notada: Se você está contando com o socket do Docker (`/var/run/docker.sock`) como parte do seu fluxo de trabalho em seu cluster hoje, mover para um agente de execução diferente acaba com sua habilidade de usá-lo. Esse modelo é conhecido como Docker em Docker. Existem diversas opções por aí para esse caso específico como o [kaniko](https://github.com/GoogleContainerTools/kaniko), [img](https://github.com/genuinetools/img), e [buildah](https://github.com/containers/buildah). + +## O que essa mudança representa para os desenvolvedores? Ainda escrevemos Dockerfiles? Ainda vamos fazer build com Docker? + +Essa mudança aborda um ambiente diferente do que a maioria das pessoas usa para interagir com Docker. A instalação do Docker que você está usando em desenvolvimento não tem relação com o agente de execução de Docker dentro de seu cluster Kubernetes. É confuso, dá pra entender. +Como desenvolvedor, Docker ainda é útil para você em todas as formas que era antes dessa mudança ser anunciada. A imagem que o Docker cria não é uma imagem específica para Docker e sim uma imagem que segue o padrão OCI ([Open Container Initiative](https://opencontainers.org/)). + +Qualquer imagem compatível com OCI, independente da ferramenta usada para construí-la será vista da mesma forma pelo Kubernetes. Ambos [containerd](https://containerd.io/) e [CRI-O](https://cri-o.io/) sabem como baixar e executá-las. Esse é o porque temos um padrão para containers. + +Então, essa mudança está chegando. Isso irá causar problemas para alguns, mas nada catastrófico, no geral é uma boa coisa. Dependendo de como você interage com o Kubernetes, isso tornará as coisas mais fáceis. Se isso ainda é confuso para você, tudo bem, tem muita coisa rolando aqui; Kubernetes tem um monte de partes móveis, e ninguém é 100% especialista nisso. Nós encorajamos toda e qualquer tipo de questão independente do nível de experiência ou de complexidade! Nosso objetivo é ter certeza que todos estão entendendo o máximo possível as mudanças que estão chegando. Esperamos que isso tenha respondido a maioria de suas questões e acalmado algumas ansiedades! ❤️ + +Procurando mais respostas? Dê uma olhada em nosso apanhado de [questões quanto ao desuso do Dockershim](/blog/2020/12/02/dockershim-faq/). diff --git a/content/pt/case-studies/_index.md b/content/pt-br/case-studies/_index.md similarity index 100% rename from content/pt/case-studies/_index.md rename to content/pt-br/case-studies/_index.md diff --git a/content/pt/case-studies/chinaunicom/chinaunicom_featured_logo.png b/content/pt-br/case-studies/chinaunicom/chinaunicom_featured_logo.png similarity index 100% rename from content/pt/case-studies/chinaunicom/chinaunicom_featured_logo.png rename to content/pt-br/case-studies/chinaunicom/chinaunicom_featured_logo.png diff --git a/content/pt/case-studies/chinaunicom/index.html b/content/pt-br/case-studies/chinaunicom/index.html similarity index 100% rename from content/pt/case-studies/chinaunicom/index.html rename to content/pt-br/case-studies/chinaunicom/index.html diff --git a/content/pt/community/_index.html b/content/pt-br/community/_index.html similarity index 100% rename from content/pt/community/_index.html rename to content/pt-br/community/_index.html diff --git a/content/pt/community/code-of-conduct.md b/content/pt-br/community/code-of-conduct.md similarity index 100% rename from content/pt/community/code-of-conduct.md rename to content/pt-br/community/code-of-conduct.md diff --git a/content/pt/community/static/README.md b/content/pt-br/community/static/README.md similarity index 100% rename from content/pt/community/static/README.md rename to content/pt-br/community/static/README.md diff --git a/content/pt/community/static/cncf-code-of-conduct.md b/content/pt-br/community/static/cncf-code-of-conduct.md similarity index 100% rename from content/pt/community/static/cncf-code-of-conduct.md rename to content/pt-br/community/static/cncf-code-of-conduct.md diff --git a/content/pt/docs/_index.md b/content/pt-br/docs/_index.md similarity index 100% rename from content/pt/docs/_index.md rename to content/pt-br/docs/_index.md diff --git a/content/pt/docs/_search.md b/content/pt-br/docs/_search.md similarity index 100% rename from content/pt/docs/_search.md rename to content/pt-br/docs/_search.md diff --git a/content/pt/docs/concepts/_index.md b/content/pt-br/docs/concepts/_index.md similarity index 100% rename from content/pt/docs/concepts/_index.md rename to content/pt-br/docs/concepts/_index.md diff --git a/content/pt/docs/concepts/architecture/_index.md b/content/pt-br/docs/concepts/architecture/_index.md similarity index 100% rename from content/pt/docs/concepts/architecture/_index.md rename to content/pt-br/docs/concepts/architecture/_index.md diff --git a/content/pt/docs/concepts/architecture/cloud-controller.md b/content/pt-br/docs/concepts/architecture/cloud-controller.md similarity index 100% rename from content/pt/docs/concepts/architecture/cloud-controller.md rename to content/pt-br/docs/concepts/architecture/cloud-controller.md diff --git a/content/pt/docs/concepts/architecture/master-node-communication.md b/content/pt-br/docs/concepts/architecture/control-plane-node-communication.md similarity index 66% rename from content/pt/docs/concepts/architecture/master-node-communication.md rename to content/pt-br/docs/concepts/architecture/control-plane-node-communication.md index 8cf2ad86c6aab..de69ad8ea8985 100644 --- a/content/pt/docs/concepts/architecture/master-node-communication.md +++ b/content/pt-br/docs/concepts/architecture/control-plane-node-communication.md @@ -1,15 +1,12 @@ --- -reviewers: -- dchen1107 -- liggitt -title: Comunicação entre Node e Master +title: Comunicação entre Nó e Control Plane content_type: concept weight: 20 --- -Este documento cataloga os caminhos de comunicação entre o Master (o +Este documento cataloga os caminhos de comunicação entre o control plane (o apiserver) e o cluster Kubernetes. A intenção é permitir que os usuários personalizem sua instalação para proteger a configuração de rede então o cluster pode ser executado em uma rede não confiável (ou em IPs totalmente públicos em um @@ -20,10 +17,10 @@ provedor de nuvem). -## Cluster para o Master +## Nó para o Control Plane -Todos os caminhos de comunicação do cluster para o Master terminam no -apiserver (nenhum dos outros componentes do Master são projetados para expor +Todos os caminhos de comunicação do cluster para o control plane terminam no +apiserver (nenhum dos outros componentes do control plane são projetados para expor Serviços remotos). Em uma implantação típica, o apiserver é configurado para escutar conexões remotas em uma porta HTTPS segura (443) com uma ou mais clientes [autenticação](/docs/reference/access-authn-authz/authentication/) habilitado. Uma ou mais formas de [autorização](/docs/reference/access-authn-authz/authorization/) @@ -41,21 +38,21 @@ para provisionamento automatizado de certificados de cliente kubelet. Os pods que desejam se conectar ao apiserver podem fazê-lo com segurança, aproveitando conta de serviço para que o Kubernetes injetará automaticamente o certificado raiz público certificado e um token de portador válido no pod quando ele é instanciado. -O serviço `kubernetes` (em todos os namespaces) é configurado com um IP virtual +O serviço `kubernetes` (no namespace `default`) é configurado com um IP virtual endereço que é redirecionado (via kube-proxy) para o endpoint com HTTPS no apiserver. -Os componentes principais também se comunicam com o apiserver do cluster através da porta segura. +Os componentes do control plane também se comunicam com o apiserver do cluster através da porta segura. Como resultado, o modo de operação padrão para conexões do cluster -(nodes e pods em execução nos Nodes) para o Master é protegido por padrão -e pode passar por redes não confiáveis ​​e / ou públicas. +(nodes e pods em execução nos Nodes) para o control plane é protegido por padrão +e pode passar por redes não confiáveis ​​e/ou públicas. -## Master para o Cluster +## Control Plane para o nó -Existem dois caminhos de comunicação primários do mestre (apiserver) para o -cluster. O primeiro é do apiserver para o processo do kubelet que é executado em -cada Node no cluster. O segundo é do apiserver para qualquer Node, pod, +Existem dois caminhos de comunicação primários do control plane (apiserver) para os nós. +O primeiro é do apiserver para o processo do kubelet que é executado em +cada nó no cluster. O segundo é do apiserver para qualquer nó, pod, ou serviço através da funcionalidade de proxy do apiserver. ### apiserver para o kubelet @@ -63,8 +60,8 @@ ou serviço através da funcionalidade de proxy do apiserver. As conexões do apiserver ao kubelet são usadas para: * Buscar logs para pods. -  * Anexar (através de kubectl) pods em execução. -  * Fornecer a funcionalidade de encaminhamento de porta do kubelet. + * Anexar (através de kubectl) pods em execução. + * Fornecer a funcionalidade de encaminhamento de porta do kubelet. Essas conexões terminam no endpoint HTTPS do kubelet. Por padrão, o apiserver não verifica o certificado de serviço do kubelet, @@ -94,12 +91,18 @@ Estas conexões **não são atualmente seguras** para serem usados por redes nã ### SSH Túnel -O Kubernetes suporta túneis SSH para proteger o Servidor Master -> caminhos de comunicação no cluster. Nesta configuração, o apiserver inicia um túnel SSH para cada nó +O Kubernetes suporta túneis SSH para proteger os caminhos de comunicação do control plane para os nós. Nesta configuração, o apiserver inicia um túnel SSH para cada nó no cluster (conectando ao servidor ssh escutando na porta 22) e passa todo o tráfego destinado a um kubelet, nó, pod ou serviço através do túnel. Este túnel garante que o tráfego não seja exposto fora da rede aos quais os nós estão sendo executados. -Atualmente, os túneis SSH estão obsoletos, portanto, você não deve optar por usá-los, a menos que saiba o que está fazendo. Um substituto para este canal de comunicação está sendo projetado. +Atualmente, os túneis SSH estão obsoletos, portanto, você não deve optar por usá-los, a menos que saiba o que está fazendo. O serviço Konnectivity é um substituto para este canal de comunicação. +### Konnectivity service +{{< feature-state for_k8s_version="v1.18" state="beta" >}} + +Como uma substituição aos túneis SSH, o serviço Konnectivity fornece proxy de nível TCP para a comunicação do control plane para o cluster. O serviço Konnectivity consiste em duas partes: o servidor Konnectivity na rede control plane e os agentes Konnectivity na rede dos nós. Os agentes Konnectivity iniciam conexões com o servidor Konnectivity e mantêm as conexões de rede. Depois de habilitar o serviço Konnectivity, todo o tráfego do control plane para os nós passa por essas conexões. + +Veja a [tarefa do Konnectivity](docs/tasks/extend-kubernetes/setup-konnectivity/) para configurar o serviço Konnectivity no seu cluster. diff --git a/content/pt/docs/concepts/architecture/controller.md b/content/pt-br/docs/concepts/architecture/controller.md similarity index 100% rename from content/pt/docs/concepts/architecture/controller.md rename to content/pt-br/docs/concepts/architecture/controller.md diff --git a/content/pt-br/docs/concepts/cluster-administration/_index.md b/content/pt-br/docs/concepts/cluster-administration/_index.md new file mode 100755 index 0000000000000..67051766ed496 --- /dev/null +++ b/content/pt-br/docs/concepts/cluster-administration/_index.md @@ -0,0 +1,69 @@ +--- +title: Administração de Cluster +weight: 100 +content_type: concept +description: > + Detalhes de baixo nível relevantes para criar ou administrar um cluster Kubernetes. +no_list: true +--- + + +A visão geral da administração do cluster é para qualquer pessoa que crie ou administre um cluster do Kubernetes. +É pressuposto alguma familiaridade com os [conceitos](/docs/concepts) principais do Kubernetes. + + +## Planejando um cluster + +Consulte os guias em [Configuração](/docs/setup) para exemplos de como planejar, instalar e configurar clusters Kubernetes. As soluções listadas neste artigo são chamadas de *distros*. + + {{< note >}} + Nem todas as distros são mantidas ativamente. Escolha distros que foram testadas com uma versão recente do Kubernetes. + {{< /note >}} + +Antes de escolher um guia, aqui estão algumas considerações: + +- Você quer experimentar o Kubernetes em seu computador ou deseja criar um cluster de vários nós com alta disponibilidade? Escolha as distros mais adequadas ás suas necessidades. +- Você vai usar um **cluster Kubernetes gerenciado** , como o [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/), ou **vai hospedar seu próprio cluster**? +- Seu cluster será **local**, ou **na nuvem (IaaS)**? O Kubernetes não oferece suporte direto a clusters híbridos. Em vez disso, você pode configurar vários clusters. +- **Se você estiver configurando o Kubernetes local**, leve em consideração qual [modelo de rede](/docs/concepts/cluster-Administration/networking) se encaixa melhor. +- Você vai executar o Kubernetes em um hardware **bare metal** ou em **máquinas virtuais? (VMs)**? +- Você **deseja apenas executar um cluster** ou espera **participar ativamente do desenvolvimento do código do projeto Kubernetes**? Se for a segunda opção, +escolha uma distro desenvolvida ativamente. Algumas distros usam apenas versão binária, mas oferecem uma maior variedade de opções. +- Familiarize-se com os [componentes](/docs/concepts/overview/components/) necessários para executar um cluster. + + +## Gerenciando um cluster + +* Aprenda como [gerenciar nós](/docs/concepts/architecture/nodes/). +* Aprenda a configurar e [gerenciar a quota de recursos](/docs/concepts/policy/resource-quotas/) para clusters compartilhados. + +## Protegendo um cluster + +* [Gerar Certificados](/docs/tasks/administer-cluster/certificates/) descreve os passos para gerar certificados usando diferentes cadeias de ferramentas. + +* [Ambiente de Contêineres do Kubernetes](/docs/concepts/containers/container-environment/) descreve o ambiente para contêineres gerenciados pelo kubelet em um nó Kubernetes. + +* [Controle de Acesso a API do Kubernetes](/docs/concepts/security/controlling-access) descreve como o Kubernetes implementa o controle de acesso para sua própria API. + +* [Autenticação](/docs/reference/access-authn-authz/authentication/) explica a autenticação no Kubernetes, incluindo as várias opções de autenticação. + +* [Autorização](/docs/reference/access-authn-authz/authorization/) é separado da autenticação e controla como as chamadas HTTP são tratadas. + +* [Usando Controladores de Admissão](/docs/reference/access-authn-authz/admission-controllers/) explica plugins que interceptam requisições para o servidor da API Kubernetes após +a autenticação e autorização. + +* [usando Sysctl em um Cluster Kubernetes](/docs/tasks/administer-cluster/sysctl-cluster/) descreve a um administrador como usar a ferramenta de linha de comando `sysctl` para +definir os parâmetros do kernel. + +* [Auditoria](/docs/tasks/debug-application-cluster/audit/) descreve como interagir com *logs* de auditoria do Kubernetes. + +### Protegendo o kubelet + * [Comunicação Control Plane-Nó](/docs/concepts/architecture/control-plane-node-communication/) + * [TLS bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) + * [Autenticação/autorização do kubelet](/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/) + +## Serviços Opcionais para o Cluster + +* [Integração com DNS](/docs/concepts/services-networking/dns-pod-service/) descreve como resolver um nome DNS diretamente para um serviço Kubernetes. + +* [Registro e Monitoramento da Atividade do Cluster](/docs/concepts/cluster-administration/logging/) explica como funciona o *logging* no Kubernetes e como implementá-lo. diff --git a/content/pt/docs/concepts/cluster-administration/addons.md b/content/pt-br/docs/concepts/cluster-administration/addons.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/addons.md rename to content/pt-br/docs/concepts/cluster-administration/addons.md diff --git a/content/pt/docs/concepts/cluster-administration/certificates.md b/content/pt-br/docs/concepts/cluster-administration/certificates.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/certificates.md rename to content/pt-br/docs/concepts/cluster-administration/certificates.md diff --git a/content/pt/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/pt-br/docs/concepts/cluster-administration/cluster-administration-overview.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/cluster-administration-overview.md rename to content/pt-br/docs/concepts/cluster-administration/cluster-administration-overview.md diff --git a/content/pt/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/pt-br/docs/concepts/cluster-administration/kubelet-garbage-collection.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/kubelet-garbage-collection.md rename to content/pt-br/docs/concepts/cluster-administration/kubelet-garbage-collection.md diff --git a/content/pt/docs/concepts/cluster-administration/logging.md b/content/pt-br/docs/concepts/cluster-administration/logging.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/logging.md rename to content/pt-br/docs/concepts/cluster-administration/logging.md diff --git a/content/pt-br/docs/concepts/cluster-administration/networking.md b/content/pt-br/docs/concepts/cluster-administration/networking.md new file mode 100644 index 0000000000000..74abf0ac29035 --- /dev/null +++ b/content/pt-br/docs/concepts/cluster-administration/networking.md @@ -0,0 +1,250 @@ +--- +title: Conectividade do Cluster +content_type: concept +weight: 50 +--- + + +Conectividade é uma parte central do Kubernetes, mas pode ser desafiador +entender exatamente como é o seu funcionamento esperado. Existem 4 problemas +distintos em conectividade que devem ser tratados: + +1. Comunicações contêiner-para-contêiner altamente acopladas: Isso é resolvido + por {{< glossary_tooltip text="Pods" term_id="pod" >}} e comunicações através do `localhost`. +2. Comunicações pod-para-pod: Esse é o foco primário desse documento. +3. Comunicações pod-para-serviço (_service_): Isso é tratado em [Services](/docs/concepts/services-networking/service/). +4. Comunicações Externas-para-serviços: Isso é tratado em [services](/docs/concepts/services-networking/service/). + + + +Kubernetes é basicamente o compartilhamento de máquinas entre aplicações. Tradicionalmente, +compartilhar máquinas requer a garantia de que duas aplicações não tentem utilizar +as mesmas portas. Coordenar a alocação de portas entre múltiplos desenvolvedores é +muito dificil de fazer em escala e expõe os usuários a problemas em nível do cluster e +fora de seu controle. + +A alocação dinâmica de portas traz uma série de complicações para o sistema - toda +aplicação deve obter suas portas através de flags de configuração, os servidores de API +devem saber como inserir números dinämicos de portas nos blocos de configuração, serviços +precisam saber como buscar um ao outro, etc. Ao invés de lidar com isso, o Kubernetes +faz de uma maneira diferente. + +## O modelo de conectividade e rede do Kubernetes + +Todo `Pod` obtém seu próprio endereço IP. Isso significa que vocë não precisa +criar links explícitos entre os `Pods` e vocë quase nunca terá que lidar com o +mapeamento de portas de contêineres para portas do host. Isso cria um modelo simples, +retro-compatível onde os `Pods` podem ser tratados muito mais como VMs ou hosts +físicos da perspectiva de alocação de portas, nomes, descobrimento de serviços +(_service discovery_), balanceamento de carga, configuração de aplicações e migrações. + +O Kubernetes impõe os seguintes requisitos fundamentais para qualquer implementação de +rede (exceto qualquer política de segmentação intencional): + * pods em um nó podem se comunicar com todos os pods em todos os nós sem usar _NAT_. + * agentes em um nó (por exemplo o kubelet ou um serviço local) podem se comunicar com + todos os Pods naquele nó. + +Nota: Para as plataformas que suportam `Pods` executando na rede do host (como o Linux): + + * pods alocados na rede do host de um nó podem se comunicar com todos os pods + em todos os nós sem _NAT_. + +Esse modelo não só é menos complexo, mas é principalmente compatível com o +desejo do Kubernetes de permitir a portabilidade com baixo esforço de aplicações +de VMs para contêineres. Se a sua aplicação executava anteriormente em uma VM, sua VM +possuía um IP e podia se comunicar com outras VMs no seu projeto. Esse é o mesmo +modelo básico. + +Os endereços de IP no Kubernetes existem no escopo do `Pod` - contêineres em um `Pod` +compartilham o mesmo _network namespace_ - incluíndo seu endereço de IP e MAC. +Isso significa que contêineres que compõem um `Pod` podem se comunicar entre eles +através do endereço `localhost` e respectivas portas. Isso também significa que +contêineres em um mesmo `Pod` devem coordenar a alocação e uso de portas, o que não +difere do modelo de processos rodando dentro de uma mesma VM. Isso é chamado de +modelo "IP-por-pod". + +Como isso é implementado é um detalhe do agente de execução de contêiner em uso. + +É possível solicitar uma porta no nó que será encaminhada para seu `Pod` (chamado +de _portas do host_), mas isso é uma operação muito específica. Como esse encaminhamento +é implementado é um detalhe do agente de execução do contêiner. O `Pod` mesmo +desconhece a existência ou não de portas do host. + +## Como implementar o modelo de conectividade do Kubernetes + +Existe um número de formas de implementar esse modelo de conectividade. Esse +documento não é um estudo exaustivo desses vários métodos, mas pode servir como +uma introdução de várias tecnologias e serve como um ponto de início. + +A conectividade no Kubernetes é fornecida através de plugins de +{{< glossary_tooltip text="CNIs" term_id="cni" >}} + +As seguintes opções estão organizadas alfabeticamente e não implicam preferência por +qualquer solução. + +{{% thirdparty-content %}} + +### Antrea + +O projeto [Antrea](https://github.com/vmware-tanzu/antrea) é uma solução de +conectividade para Kubernetes que pretende ser nativa. Ela utiliza o Open vSwitch +na camada de conectividade de dados. O Open vSwitch é um switch virtual de alta +performance e programável que suporta Linux e Windows. O Open vSwitch permite +ao Antrea implementar políticas de rede do Kubernetes (_NetworkPolicies_) de +uma forma muito performática e eficiente. + +Graças à característica programável do Open vSwitch, o Antrea consegue implementar +uma série de funcionalidades de rede e segurança. + +### AWS VPC CNI para Kubernetes + +O [AWS VPC CNI](https://github.com/aws/amazon-vpc-cni-k8s) oferece conectividade +com o AWS Virtual Private Cloud (VPC) para clusters Kubernetes. Esse plugin oferece +alta performance e disponibilidade e baixa latência. Adicionalmente, usuários podem +aplicar as melhores práticas de conectividade e segurança existentes no AWS VPC +para a construção de clusters Kubernetes. Isso inclui possibilidade de usar o +_VPC flow logs_, políticas de roteamento da VPC e grupos de segurança para isolamento +de tráfego. + +O uso desse plugin permite aos Pods no Kubernetes ter o mesmo endereço de IP dentro do +pod como se eles estivessem dentro da rede do VPC. O CNI (Container Network Interface) +aloca um _Elastic Networking Interface_ (ENI) para cada nó do Kubernetes e usa uma +faixa de endereços IP secundário de cada ENI para os Pods no nó. O CNI inclui +controles para pré alocação dos ENIs e endereços IP para um início mais rápido dos +pods e permite clusters com até 2,000 nós. + +Adicionalmente, esse CNI pode ser utilizado junto com o [Calico](https://docs.aws.amazon.com/eks/latest/userguide/calico.html) +para a criação de políticas de rede (_NetworkPolicies_). O projeto AWS VPC CNI +tem código fonte aberto com a [documentação no Github](https://github.com/aws/amazon-vpc-cni-k8s). + +### Azure CNI para o Kubernetes +[Azure CNI](https://docs.microsoft.com/en-us/azure/virtual-network/container-networking-overview) é um +plugin de [código fonte aberto](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) +que integra os Pods do Kubernetes com uma rede virtual da Azure (também conhecida como VNet) +provendo performance de rede similar à de máquinas virtuais no ambiente. Os Pods +podem se comunicar com outras VNets e com ambientes _on-premises_ com o uso de +funcionalidades da Azure, e também podem ter clientes com origem dessas redes. +Os Pods podem acessar serviços da Azure, como armazenamento e SQL, que são +protegidos por _Service Endpoints_ e _Private Link_. Você pode utilizar as políticas +de segurança e roteamento para filtrar o tráfico do Pod. O plugin associa IPs da VNet +para os Pods utilizando um pool de IPs secundário pré-configurado na interface de rede +do nó Kubernetes. + +O Azure CNI está disponível nativamente no [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni). + +### Calico + +[Calico](https://docs.projectcalico.org/) é uma solução de conectividade e +segurança para contêineres, máquinas virtuais e serviços nativos em hosts. O +Calico suporta múltiplas camadas de conectividade/dados, como por exemplo: +uma camada Linux eBPF nativa, uma camada de conectividade baseada em conceitos +padrão do Linux e uma camada baseada no HNS do Windows. O calico provê uma +camada completa de conectividade e rede, mas também pode ser usado em conjunto com +[CNIs de provedores de nuvem](https://docs.projectcalico.org/networking/determine-best-networking#calico-compatible-cni-plugins-and-cloud-provider-integrations) +para permitir a criação de políticas de rede. + +### Cilium + +[Cilium](https://github.com/cilium/cilium) é um software de código fonte aberto +para prover conectividade e segurança entre contêineres de aplicação. O Cilium +pode lidar com tráfego na camada de aplicação (ex. HTTP) e pode forçar políticas +de rede nas camadas L3-L7 usando um modelo de segurança baseado em identidade e +desacoplado do endereçamento de redes, podendo inclusive ser utilizado com outros +plugins CNI. + +### Flannel + +[Flannel](https://github.com/coreos/flannel#flannel) é uma camada muito simples +de conectividade que satisfaz os requisitos do Kubernetes. Muitas pessoas +reportaram sucesso em utilizar o Flannel com o Kubernetes. + +### Google Compute Engine (GCE) + +Para os scripts de configuração do Google Compute Engine, [roteamento +avançado](https://cloud.google.com/vpc/docs/routes) é usado para associar +para cada VM uma sub-rede (o padrão é `/24` - 254 IPs). Qualquer tráfico direcionado +para aquela sub-rede será roteado diretamente para a VM pela rede do GCE. Isso é +adicional ao IP principal associado à VM, que é mascarado para o acesso à Internet. +Uma _brige_ Linux (chamada `cbr0`) é configurada para existir naquela sub-rede, e é +configurada no docker através da opção `--bridge`. + +O Docker é iniciado com: + + +```shell +DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false" +``` + +Essa _bridge_ é criada pelo Kubelet (controlada pela opção `--network-plugin=kubenet`) +de acordo com a informação `.spec.podCIDR` do Nó. + +O Docker irá agora alocar IPs do bloco `cbr-cidr`. Contêineres podem alcançar +outros contêineres e nós através da interface `cbr0`. Esses IPs são todos roteáveis +dentro da rede do projeto do GCE. + +O GCE mesmo não sabe nada sobre esses IPs, então não irá mascará-los quando tentarem +se comunicar com a internet. Para permitir isso uma regra de IPTables é utilizada para +mascarar o tráfego para IPs fora da rede do projeto do GCE (no exemplo abaixo, 10.0.0.0/8): + +```shell +iptables -t nat -A POSTROUTING ! -d 10.0.0.0/8 -o eth0 -j MASQUERADE +``` + +Por fim, o encaminhamento de IP deve ser habilitado no Kernel de forma a processar +os pacotes vindos dos contêineres: + +```shell +sysctl net.ipv4.ip_forward=1 +``` + +O resultado disso tudo é que `Pods` agora podem alcançar outros `Pods` e podem também +se comunicar com a Internet. + +### Kube-router + +[Kube-router](https://github.com/cloudnativelabs/kube-router) é uma solução construída +que visa prover alta performance e simplicidade operacional. Kube-router provê um +proxy de serviços baseado no [LVS/IPVS](https://www.linuxvirtualserver.org/software/ipvs.html), +uma solução de comunicação pod-para-pod baseada em encaminhamento de pacotes Linux e sem camadas +adicionais, e funcionalidade de políticas de redes baseadas no IPTables/IPSet. + +### Redes L2 e bridges Linux + +Se você tem uma rede L2 "burra", como um switch em um ambiente "bare-metal", +você deve conseguir fazer algo similar ao ambiente GCE explicado acima. +Note que essas instruções foram testadas casualmente - parece funcionar, mas +não foi propriamente testado. Se você conseguir usar essa técnica e aperfeiçoar +o processo, por favor nos avise!! + +Siga a parte _"With Linux Bridge devices"_ desse +[tutorial super bacana](https://blog.oddbit.com/2014/08/11/four-ways-to-connect-a-docker/) do +Lars Kellogg-Stedman. + +### Multus (Plugin multi redes) {#multus} + +[Multus](https://github.com/Intel-Corp/multus-cni) é um plugin Multi CNI para +suportar a funcionalidade multi redes do Kubernetes usando objetos baseados em {{< glossary_tooltip text="CRDs" term_id="CustomResourceDefinition" >}}. + +Multus suporta todos os [plugins referência](https://github.com/containernetworking/plugins) (ex. [Flannel](https://github.com/containernetworking/plugins/tree/master/plugins/meta/flannel), +[DHCP](https://github.com/containernetworking/plugins/tree/master/plugins/ipam/dhcp), +[Macvlan](https://github.com/containernetworking/plugins/tree/master/plugins/main/macvlan)) +que implementam a especificação de CNI e plugins de terceiros +(ex. [Calico](https://github.com/projectcalico/cni-plugin), [Weave](https://github.com/weaveworks/weave), +[Cilium](https://github.com/cilium/cilium), [Contiv](https://github.com/contiv/netplugin)). +Adicionalmente, Multus suporta cargas de trabalho no Kubernetes que necessitem de funcionalidades como +[SRIOV](https://github.com/hustcat/sriov-cni), [DPDK](https://github.com/Intel-Corp/sriov-cni), +[OVS-DPDK & VPP](https://github.com/intel/vhost-user-net-plugin). + +### OVN (Open Virtual Networking) + +OVN é uma solução de virtualização de redes de código aberto desenvolvido pela +comunidade Open vSwitch. Permite a criação de switches lógicos, roteadores lógicos, +listas de acesso, balanceadores de carga e mais, para construir diferences topologias +de redes virtuais. Esse projeto possui um plugin específico para o Kubernetes e a +documentação em [ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes). + +## {{% heading "whatsnext" %}} + +Design inicial do modelo de conectividade do Kubernetes e alguns planos futuros +estão descritos com maiores detalhes no +[documento de design de redes](https://git.k8s.io/community/contributors/design-proposals/network/networking.md). diff --git a/content/pt/docs/concepts/configuration/_index.md b/content/pt-br/docs/concepts/configuration/_index.md similarity index 100% rename from content/pt/docs/concepts/configuration/_index.md rename to content/pt-br/docs/concepts/configuration/_index.md diff --git a/content/pt-br/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/pt-br/docs/concepts/configuration/organize-cluster-access-kubeconfig.md new file mode 100644 index 0000000000000..4b431b486f915 --- /dev/null +++ b/content/pt-br/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -0,0 +1,131 @@ +--- +title: Organizando o acesso ao cluster usando arquivos kubeconfig +content_type: concept +weight: 60 +--- + + + +Utilize arquivos kubeconfig para organizar informações sobre clusters, usuários, namespaces e mecanismos de autenticação. A ferramenta de linha de comando `kubectl` faz uso dos arquivos kubeconfig para encontrar as informações necessárias para escolher e se comunicar com o serviço de API de um cluster. + + +{{< note >}} +Um arquivo que é utilizado para configurar o acesso aos clusters é chamado de *kubeconfig*. Esta á uma forma genérica de referenciamento para um arquivo de configuração desta natureza. Isso não significa que existe um arquivo com o nome `kubeconfig`. +{{< /note >}} + +Por padrão, o `kubectl` procura por um arquivo de nome `config` no diretório `$HOME/.kube` + +Você pode especificar outros arquivos kubeconfig através da variável de ambiente `KUBECONFIG` ou adicionando a opção [`--kubeconfig`](/docs/reference/generated/kubectl/kubectl/). + +Para maiores detalhes na criação e especificação de um kubeconfig, veja o passo a passo em [Configurar Acesso para Múltiplos Clusters](/docs/tasks/access-application-cluster/configure-access-multiple-clusters). + + + + +## Suportando múltiplos clusters, usuários e mecanismos de autenticação + +Imagine que você possua inúmeros clusters, e seus usuários e componentes se autenticam de várias formas. Por exemplo: + +- Um kubelet ativo pode se autenticar utilizando certificados +- Um usuário pode se autenticar através de tokens +- Administradores podem possuir conjuntos de certificados os quais provém acesso aos usuários de forma individual. + +Através de arquivos kubeconfig, você pode organizar os seus clusters, usuários, e namespaces. Você também pode definir contextos para uma fácil troca entre clusters e namespaces. + + +## Contexto + +Um elemento de *contexto* em um kubeconfig é utilizado para agrupar parâmetros de acesso em um nome conveniente. Cada contexto possui três parâmetros: cluster, namespace, e usuário. + +Por padrão, a ferramenta de linha de comando `kubectl` utiliza os parâmetros do _contexto atual_ para se comunicar com o cluster. + +Para escolher o contexto atual: + +```shell +kubectl config use-context +``` + +## A variável de ambiente KUBECONFIG + +A variável de ambiente `KUBECONFIG` possui uma lista dos arquivos kubeconfig. Para Linux e Mac, esta lista é delimitada por vírgula. No Windows, a lista é delimitada por ponto e vírgula. A variável de ambiente `KUBECONFIG` não é um requisito obrigatório - caso ela não exista o `kubectl` utilizará o arquivo kubeconfig padrão localizado no caminho `$HOME/.kube/config`. + +Se a variável de ambiente `KUBECONFIG` existir, o `kubectl` utilizará uma configuração que é o resultado da combinação dos arquivos listados na variável de ambiente `KUBECONFIG`. + +## Combinando arquivos kubeconfig + +Para inspecionar a sua configuração atual, execute o seguinte comando: + +```shell +kubectl config view +``` + +Como descrito anteriormente, a saída poderá ser resultado de um único arquivo kubeconfig, ou poderá ser o resultado da junção de vários arquivos kubeconfig. + +Aqui estão as regras que o `kubectl` utiliza quando realiza a combinação de arquivos kubeconfig: + +1. Se o argumento `--kubeconfig` está definido, apenas o arquivo especificado será utilizado. Apenas uma instância desta flag é permitida. + + Caso contrário, se a variável de ambiente `KUBECONFIG` estiver definida, esta deverá ser utilizada como uma lista de arquivos a serem combinados, seguindo o fluxo a seguir: + + * Ignorar arquivos vazios. + * Produzir erros para aquivos cujo conteúdo não for possível desserializar. + * O primeiro arquivo que definir um valor ou mapear uma chave determinada, será o escolhido. + * Nunca modificar um valor ou mapear uma chave. + Exemplo: Preservar o contexto do primeiro arquivo que definir `current-context`. + Exemplo: Se dois arquivos especificarem um `red-user`, use apenas os valores do primeiro `red-user`. Mesmo se um segundo arquivo possuir entradas não conflitantes sobre a mesma entrada `red-user`, estas deverão ser descartadas. + + Para um exemplo de definição da variável de ambiente `KUBECONFIG` veja [Definido a variável de ambiente KUBECONFIG](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable). + + Caso contrário, utilize o arquivo kubeconfig padrão encontrado no diretório `$HOME/.kube/config`, sem qualquer tipo de combinação. + +1. Determine o contexto a ser utilizado baseado no primeiro padrão encontrado, nesta ordem: + + 1. Usar o conteúdo da flag `--context` caso ela existir. + 1. Usar o `current-context` a partir da combinação dos arquivos kubeconfig. + + + Um contexto vazio é permitido neste momento. + + +1. Determinar o cluster e o usuário. Neste ponto, poderá ou não existir um contexto. + Determinar o cluster e o usuário no primeiro padrão encontrado de acordo com a ordem à seguir. Este procedimento deverá executado duas vezes: uma para definir o usuário a outra para definir o cluster. + + 1. Utilizar a flag caso ela existir: `--user` ou `--cluster`. + 1. Se o contexto não estiver vazio, utilizar o cluster ou usuário deste contexto. + + O usuário e o cluster poderão estar vazios neste ponto. + +1. Determinar as informações do cluster atual a serem utilizadas. Neste ponto, poderá ou não existir informações de um cluster. + + Construir cada peça de informação do cluster baseado nas opções à seguir; a primeira ocorrência encontrada será a opção vencedora: + + 1. Usar as flags de linha de comando caso existirem: `--server`, `--certificate-authority`, `--insecure-skip-tls-verify`. + 1. Se algum atributo do cluster existir a partir da combinação de kubeconfigs, estes deverão ser utilizados. + 1. Se não existir informação de localização do servidor falhar. + +1. Determinar a informação atual de usuário a ser utilizada. Construir a informação de usuário utilizando as mesmas regras utilizadas para o caso de informações de cluster, exceto para a regra de técnica de autenticação que deverá ser única por usuário: + + 1. Usar as flags, caso existirem: `--client-certificate`, `--client-key`, `--username`, `--password`, `--token`. + 1. Usar os campos `user` resultado da combinação de arquivos kubeconfig. + 1. Se existirem duas técnicas conflitantes, falhar. + +1. Para qualquer informação que ainda estiver ausente, utilizar os valores padrão e potencialmente solicitar informações de autenticação a partir do prompt de comando. + + +## Referências de arquivos + +Arquivos e caminhos referenciados em um arquivo kubeconfig são relativos à localização do arquivo kubeconfig. + +Referências de arquivos na linha de comando são relativas ao diretório de trabalho vigente. + +No arquivo `$HOME/.kube/config`, caminhos relativos são armazenados de forma relativa, e caminhos absolutos são armazenados de forma absoluta. + +## {{% heading "whatsnext" %}} + + +* [Configurar Accesso para Multiplos Clusters](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) +* [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) + + + + diff --git a/content/pt-br/docs/concepts/containers/_index.md b/content/pt-br/docs/concepts/containers/_index.md new file mode 100644 index 0000000000000..6ce26043c59c1 --- /dev/null +++ b/content/pt-br/docs/concepts/containers/_index.md @@ -0,0 +1,34 @@ +--- +title: Contêineres +weight: 40 +description: Tecnologia para empacotar aplicações com suas dependências em tempo de execução +content_type: concept +no_list: true +--- + + + +Cada contêiner executado é repetível; a padronização de ter +dependências incluídas significa que você obtém o mesmo comportamento onde quer que você execute. + +Os contêineres separam os aplicativos da infraestrutura de _host_ subjacente. +Isso torna a implantação mais fácil em diferentes ambientes de nuvem ou sistema operacional. + + + + +## Imagem de contêiner +Uma [imagem de contêiner](/docs/concepts/containers/images/) é um pacote de software pronto para executar, contendo tudo que é preciso para executar uma aplicação: +o código e o agente de execução necessário, aplicação, bibliotecas do sistema e valores padrões para qualquer configuração essencial. + +Por _design_, um contêiner é imutável: você não pode mudar o código de um contêiner que já está executando. Se você tem uma aplicação conteinerizada e quer fazer mudanças, você precisa construir uma nova imagem que inclui a mudança, e recriar o contêiner para iniciar a partir da imagem atualizada. + +## Agente de execução de contêiner + +{{< glossary_definition term_id="container-runtime" length="all" >}} + +## {{% heading "whatsnext" %}} + +* [Imagens de contêineres](/docs/concepts/containers/images/) +* [Pods](/docs/concepts/workloads/pods/) + diff --git a/content/pt-br/docs/concepts/containers/container-environment.md b/content/pt-br/docs/concepts/containers/container-environment.md new file mode 100644 index 0000000000000..af28e2dd3fd58 --- /dev/null +++ b/content/pt-br/docs/concepts/containers/container-environment.md @@ -0,0 +1,56 @@ +--- +title: Ambiente de Contêiner +content_type: concept +weight: 20 +--- + + + +Essa página descreve os recursos disponíveis para contêineres no ambiente de contêiner. + + + + + +## Ambiente de contêiner + +O ambiente de contêiner do Kubernetes fornece recursos importantes para contêineres: + +* Um sistema de arquivos, que é a combinação de uma [imagem](/docs/concepts/containers/images/) e um ou mais [volumes](/docs/concepts/storage/volumes/). +* Informação sobre o contêiner propriamente. +* Informação sobre outros objetos no cluster. + +### Informação de contêiner + +O _hostname_ de um contêiner é o nome do Pod em que o contêiner está executando. +Isso é disponibilizado através do comando `hostname` ou da função [`gethostname`](https://man7.org/linux/man-pages/man2/gethostname.2.html) chamada na libc. + +O nome do Pod e o Namespace são expostos como variáveis de ambiente através de um mecanismo chamado [downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/). + +Variáveis de ambiente definidas pelo usuário a partir da definição do Pod também são disponíveis para o contêiner, assim como qualquer variável de ambiente especificada estáticamente na imagem Docker. + +### Informação do cluster + +Uma lista de todos os serviços que estão executando quando um contêiner foi criado é disponibilizada para o contêiner como variáveis de ambiente. +Essas variáveis de ambiente são compatíveis com a funcionalidade _docker link_ do Docker. + +Para um serviço nomeado *foo* que mapeia para um contêiner nomeado *bar*, as seguintes variáveis são definidas: + +```shell +FOO_SERVICE_HOST= +FOO_SERVICE_PORT= +``` + +Serviços possuem endereço IP dedicado e são disponibilizados para o contêiner via DNS, +se possuírem [DNS addon](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/) habilitado. + + + +## {{% heading "whatsnext" %}} + + +* Aprenda mais sobre [hooks de ciclo de vida do contêiner](/docs/concepts/containers/container-lifecycle-hooks/). +* Obtenha experiência prática + [anexando manipuladores a eventos de ciclo de vida do contêiner](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). + + diff --git a/content/pt-br/docs/concepts/containers/container-lifecycle-hooks.md b/content/pt-br/docs/concepts/containers/container-lifecycle-hooks.md new file mode 100644 index 0000000000000..984f248256ad9 --- /dev/null +++ b/content/pt-br/docs/concepts/containers/container-lifecycle-hooks.md @@ -0,0 +1,114 @@ +--- +title: Hooks de Ciclo de Vida do Contêiner +content_type: concept +weight: 30 +--- + + + +Essa página descreve como os contêineres gerenciados pelo _kubelet_ podem usar a estrutura de _hook_ de ciclo de vida do contêiner para executar código acionado por eventos durante seu ciclo de vida de gerenciamento. + + + + +## Visão Geral + +Análogo a muitas estruturas de linguagem de programação que tem _hooks_ de ciclo de vida de componentes, como angular, +o Kubernetes fornece aos contêineres _hooks_ de ciclo de vida. +Os _hooks_ permitem que os contêineres estejam cientes dos eventos em seu ciclo de vida de gerenciamento +e executem código implementado em um manipulador quando o _hook_ de ciclo de vida correspondente é executado. + +## Hooks do contêiner + +Existem dois _hooks_ que são expostos para os contêiners: + +`PostStart` + +Este _hook_ é executado imediatamente após um contêiner ser criado. +Entretanto, não há garantia que o _hook_ será executado antes do ENTRYPOINT do contêiner. +Nenhum parâmetro é passado para o manipulador. + +`PreStop` + +Esse _hook_ é chamado imediatamente antes de um contêiner ser encerrado devido a uma solicitação de API ou um gerenciamento de evento como liveness/startup probe failure, preemption, resource contention e outros. +Uma chamada ao _hook_ `PreStop` falha se o contêiner já está em um estado finalizado ou concluído e o _hook_ deve ser concluído antes que o sinal TERM seja enviado para parar o contêiner. A contagem regressiva do período de tolerância de término do Pod começa antes que o _hook_ `PreStop` seja executado, portanto, independentemente do resultado do manipulador, o contêiner será encerrado dentro do período de tolerância de encerramento do Pod. Nenhum parâmetro é passado para o manipulador. + +Uma descrição mais detalhada do comportamento de término pode ser encontrada em [Término de Pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination). + +### Implementações de manipulador de hook + +Os contêineres podem acessar um _hook_ implementando e registrando um manipulador para esse _hook_. +Existem dois tipos de manipuladores de _hooks_ que podem ser implementados para contêineres: + +* Exec - Executa um comando específico, como `pre-stop.sh`, dentro dos cgroups e Namespaces do contêiner. +* HTTP - Executa uma requisição HTTP em um endpoint específico do contêiner. + +### Execução do manipulador de hook + + +Quando um _hook_ de gerenciamento de ciclo de vida do contêiner é chamado, o sistema de gerenciamento do Kubernetes executa o manipulador de acordo com a ação do _hook_, `httpGet` e `tcpSocket` são executados pelo processo kubelet e `exec` é executado pelo contêiner. + +As chamadas do manipulador do _hook_ são síncronas no contexto do Pod que contém o contêiner. +Isso significa que para um _hook_ `PostStart`, o ENTRYPOINT do contêiner e o _hook_ disparam de forma assíncrona. +No entanto, se o _hook_ demorar muito para ser executado ou travar, o contêiner não consegue atingir o estado `running`. + + +Os _hooks_ `PreStop` não são executados de forma assíncrona a partir do sinal para parar o contêiner, o _hook_ precisa finalizar a sua execução antes que o sinal TERM possa ser enviado. +Se um _hook_ `PreStop` travar durante a execução, a fase do Pod será `Terminating` e permanecerá até que o Pod seja morto após seu `terminationGracePeriodSeconds` expirar. Esse período de tolerância se aplica ao tempo total necessário +para o _hook_ `PreStop`executar e para o contêiner parar normalmente. +Se por exemplo, o `terminationGracePeriodSeconds` é 60, e o _hook_ leva 55 segundos para ser concluído, e o contêiner leva 10 segundos para parar normalmente após receber o sinal, então o contêiner será morto antes que possa parar +normalmente, uma vez que o `terminationGracePeriodSeconds` é menor que o tempo total (55 + 10) que é necessário para que essas duas coisas aconteçam. + +Se um _hook_ `PostStart` ou `PreStop` falhar, ele mata o contêiner. + +Os usuários devem tornar seus _hooks_ o mais leve possíveis. +Há casos, no entanto, em que comandos de longa duração fazem sentido, como ao salvar o estado +antes de parar um contêiner. + +### Garantias de entrega de _hooks_ + +A entrega do _hook_ é destinada a acontecer *pelo menos uma vez*, +o que quer dizer que um _hook_ pode ser chamado várias vezes para qualquer evento, +como para `PostStart` ou `PreStop`. +Depende da implementação do _hook_ lidar com isso corretamente. + +Geralmente, apenas entregas únicas são feitas. +Se, por exemplo, um receptor de _hook_ HTTP estiver inativo e não puder receber tráfego, +não há tentativa de reenviar. +Em alguns casos raros, no entanto, pode ocorrer uma entrega dupla. +Por exemplo, se um kubelet reiniciar no meio do envio de um _hook_, o _hook_ pode ser +reenviado depois que o kubelet voltar a funcionar. + +### Depurando manipuladores de _hooks_ + +Os logs para um manipulador de _hook_ não são expostos em eventos de Pod. +Se um manipulador falhar por algum motivo, ele transmitirá um evento. +Para `PostStart` é o evento `FailedPostStartHook` e para `PreStop` é o evento +`FailedPreStopHook`. +Você pode ver esses eventos executando `kubectl describe pod `. +Aqui está um exemplo de saída de eventos da execução deste comando: + +``` +Events: + FirstSeen LastSeen Count From SubObjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 1m 1m 1 {default-scheduler } Normal Scheduled Successfully assigned test-1730497541-cq1d2 to gke-test-cluster-default-pool-a07e5d30-siqd + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Pulling pulling image "test:1.0" + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Created Created container with docker id 5c6a256a2567; Security:[seccomp=unconfined] + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Pulled Successfully pulled image "test:1.0" + 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Started Started container with docker id 5c6a256a2567 + 38s 38s 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Killing Killing container with docker id 5c6a256a2567: PostStart handler: Error executing in Docker Container: 1 + 37s 37s 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Killing Killing container with docker id 8df9fdfd7054: PostStart handler: Error executing in Docker Container: 1 + 38s 37s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "main" with RunContainerError: "PostStart handler: Error executing in Docker Container: 1" + 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook +``` + + + +## {{% heading "whatsnext" %}} + + +* Saiba mais sobre o [Ambiente de contêiner](/docs/concepts/containers/container-environment/). +* Obtenha experiência prática + [anexando manipuladores a eventos de ciclo de vida do contêiner](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). + diff --git a/content/pt-br/docs/concepts/containers/images.md b/content/pt-br/docs/concepts/containers/images.md new file mode 100644 index 0000000000000..6f8b81dd7c455 --- /dev/null +++ b/content/pt-br/docs/concepts/containers/images.md @@ -0,0 +1,290 @@ +--- +reviewers: +- femrtnz +- jcjesus +- hugopfeffer +title: Imagens +content_type: concept +weight: 10 +--- + + + +Uma imagem de contêiner representa dados binários que encapsulam uma aplicação e todas as suas dependências de software. As imagens de contêiner são pacotes de software executáveis que podem ser executados de forma autônoma e que fazem suposições muito bem definidas sobre seu agente de execução do ambiente. + +Normalmente, você cria uma imagem de contêiner da sua aplicação e a envia para um registro antes de fazer referência a ela em um {{< glossary_tooltip text="Pod" term_id="pod" >}} + +Esta página fornece um resumo sobre o conceito de imagem de contêiner. + + + +## Nomes das imagens + +As imagens de contêiner geralmente recebem um nome como `pause`, `exemplo/meuconteiner`, ou `kube-apiserver`. +As imagens também podem incluir um hostname de algum registro; por exemplo: `exemplo.registro.ficticio/nomeimagem`, +e um possível número de porta; por exemplo: `exemplo.registro.ficticio:10443/nomeimagem`. + +Se você não especificar um hostname de registro, o Kubernetes presumirá que você se refere ao registro público do Docker. + +Após a parte do nome da imagem, você pode adicionar uma _tag_ (como também usar com comandos como `docker` e` podman`). +As tags permitem identificar diferentes versões da mesma série de imagens. + +Tags de imagem consistem em letras maiúsculas e minúsculas, dígitos, sublinhados (`_`), +pontos (`.`) e travessões (` -`). +Existem regras adicionais sobre onde você pode colocar o separador +caracteres (`_`,`-` e `.`) dentro de uma tag de imagem. +Se você não especificar uma tag, o Kubernetes presumirá que você se refere à tag `latest` (mais recente). + +{{< caution >}} +Você deve evitar usar a tag `latest` quando estiver realizando o deploy de contêineres em produção, +pois é mais difícil rastrear qual versão da imagem está sendo executada, além de tornar mais difícil o processo de reversão para uma versão funcional. + +Em vez disso, especifique uma tag significativa, como `v1.42.0`. +{{< /caution >}} + +## Atualizando imagens + +A política padrão de pull é `IfNotPresent` a qual faz com que o +{{}} ignore +o processo de *pull* da imagem, caso a mesma já exista. Se você prefere sempre forçar o processo de *pull*, +você pode seguir uma das opções abaixo: + +- defina a `imagePullPolicy` do contêiner para` Always`. +- omita `imagePullPolicy` e use`: latest` como a tag para a imagem a ser usada. +- omita o `imagePullPolicy` e a tag da imagem a ser usada. +- habilite o [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) controlador de admissão. + +Quando `imagePullPolicy` é definido sem um valor específico, ele também é definido como` Always`. + +## Multiarquitetura de imagens com índice de imagens + +Além de fornecer o binário das imagens, um registro de contêiner também pode servir um [índice de imagem do contêiner](https://github.com/opencontainers/image-spec/blob/master/image-index.md). Um índice de imagem pode apontar para múltiplos [manifestos da imagem](https://github.com/opencontainers/image-spec/blob/master/manifest.md) para versões específicas de arquitetura de um contêiner. A ideia é que você possa ter um nome para uma imagem (por exemplo: `pause`, `exemple/meuconteiner`, `kube-apiserver`) e permitir que diferentes sistemas busquem o binário da imagem correta para a arquitetura de máquina que estão usando. + +O próprio Kubernetes normalmente nomeia as imagens de contêiner com o sufixo `-$(ARCH)`. Para retrocompatibilidade, gere as imagens mais antigas com sufixos. A ideia é gerar a imagem `pause` que tem o manifesto para todas as arquiteturas e `pause-amd64` que é retrocompatível com as configurações anteriores ou arquivos YAML que podem ter codificado as imagens com sufixos. + +## Usando um registro privado + +Os registros privados podem exigir chaves para acessar as imagens deles. +As credenciais podem ser fornecidas de várias maneiras: + - Configurando nós para autenticação em um registro privado + - todos os pods podem ler qualquer registro privado configurado + - requer configuração de nó pelo administrador do cluster + - Imagens pré-obtidas + - todos os pods podem usar qualquer imagem armazenada em cache em um nó + - requer acesso root a todos os nós para configurar + - Especificando ImagePullSecrets em um Pod + - apenas pods que fornecem chaves próprias podem acessar o registro privado + - Extensões locais ou específicas do fornecedor + - se estiver usando uma configuração de nó personalizado, você (ou seu provedor de nuvem) pode implementar seu mecanismo para autenticar o nó ao registro do contêiner. + +Essas opções são explicadas com mais detalhes abaixo. + +### Configurando nós para autenticação em um registro privado + +Se você executar o Docker em seus nós, poderá configurar o contêiner runtime do Docker +para autenticação em um registro de contêiner privado. + +Essa abordagem é adequada se você puder controlar a configuração do nó. + +{{< note >}} +O Kubernetes padrão é compatível apenas com as seções `auths` e` HttpHeaders` na configuração do Docker. +Auxiliares de credencial do Docker (`credHelpers` ou` credsStore`) não são suportados. +{{< /note >}} + +Docker armazena chaves de registros privados no arquivo `$HOME/.dockercfg` ou `$HOME/.docker/config.json`. Se você colocar o mesmo arquivo na lista de caminhos de pesquisa abaixo, o kubelet o usa como provedor de credenciais ao obter imagens. + +* `{--root-dir:-/var/lib/kubelet}/config.json` +* `{cwd of kubelet}/config.json` +* `${HOME}/.docker/config.json` +* `/.docker/config.json` +* `{--root-dir:-/var/lib/kubelet}/.dockercfg` +* `{cwd of kubelet}/.dockercfg` +* `${HOME}/.dockercfg` +* `/.dockercfg` + +{{< note >}} +Você talvez tenha que definir `HOME=/root` explicitamente no ambiente do processo kubelet. +{{< /note >}} + +Aqui estão as etapas recomendadas para configurar seus nós para usar um registro privado. Neste +exemplo, execute-os em seu desktop/laptop: + + 1. Execute `docker login [servidor]` para cada conjunto de credenciais que deseja usar. Isso atualiza o `$HOME/.docker/config.json` em seu PC. + 1. Visualize `$HOME/.docker/config.json` em um editor para garantir que contém apenas as credenciais que você deseja usar. + 1. Obtenha uma lista de seus nós; por exemplo: + - se você quiser os nomes: `nodes=$( kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}' )` + - se você deseja obter os endereços IP: `nodes=$( kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}' )` + 1. Copie seu `.docker/config.json` local para uma das listas de caminhos de busca acima. + - por exemplo, para testar isso: `for n in $nodes; do scp ~/.docker/config.json root@"$n":/var/lib/kubelet/config.json; done` + +{{< note >}} +Para clusters de produção, use uma ferramenta de gerenciamento de configuração para que você possa aplicar esta +configuração em todos os nós que você precisar. +{{< /note >}} + +Verifique se está funcionando criando um pod que usa uma imagem privada; por exemplo: + +```shell +kubectl apply -f - <}} +Essa abordagem é adequada se você puder controlar a configuração do nó. Isto +não funcionará de forma confiável se o seu provedor de nuvem for responsável pelo gerenciamento de nós e os substituir +automaticamente. +{{< /note >}} + +Por padrão, o kubelet tenta realizar um "pull" para cada imagem do registro especificado. +No entanto, se a propriedade `imagePullPolicy` do contêiner for definida como` IfNotPresent` ou `Never`, +em seguida, uma imagem local é usada (preferencial ou exclusivamente, respectivamente). + +Se você quiser usar imagens pré-obtidas como um substituto para a autenticação do registro, +você deve garantir que todos os nós no cluster tenham as mesmas imagens pré-obtidas. + +Isso pode ser usado para pré-carregar certas imagens com o intuíto de aumentar a velocidade ou como uma alternativa para autenticação em um registro privado. + +Todos os pods terão permissão de leitura a quaisquer imagens pré-obtidas. + +### Especificando imagePullSecrets em um pod + +{{< note >}} +Esta é a abordagem recomendada para executar contêineres com base em imagens +de registros privados. +{{< /note >}} + +O Kubernetes oferece suporte à especificação de chaves de registro de imagem de contêiner em um pod. + +#### Criando um segredo com Docker config + +Execute o seguinte comando, substituindo as palavras em maiúsculas com os valores apropriados: + +```shell +kubectl create secret docker-registry --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL +``` + +Se você já tem um arquivo de credenciais do Docker, em vez de usar o +comando acima, você pode importar o arquivo de credenciais como um Kubernetes +{{< glossary_tooltip text="Secrets" term_id="secret" >}}. +[Criar um segredo com base nas credenciais Docker existentes](/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials) explica como configurar isso. + +Isso é particularmente útil se você estiver usando vários registros privados de contêineres, como `kubectl create secret docker-registry` cria um Segredo que +só funciona com um único registro privado. + +{{< note >}} +Os pods só podem fazer referência a *pull secrets* de imagem em seu próprio namespace, +portanto, esse processo precisa ser feito uma vez por namespace. +{{< /note >}} + +#### Referenciando um imagePullSecrets em um pod + +Agora, você pode criar pods que fazem referência a esse segredo adicionando uma seção `imagePullSecrets` +na definição de Pod. + +Por exemplo: + +```shell +cat < pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: foo + namespace: awesomeapps +spec: + containers: + - name: foo + image: janedoe/awesomeapp:v1 + imagePullSecrets: + - name: myregistrykey +EOF +cat <> ./kustomization.yaml +resources: +- pod.yaml +EOF +``` + +Isso precisa ser feito para cada pod que está usando um registro privado. + +No entanto, a configuração deste campo pode ser automatizada definindo o imagePullSecrets +em um recurso de [ServiceAccount](/docs/tasks/configure-pod-container/configure-service-account/). + +Verifique [Adicionar ImagePullSecrets a uma conta de serviço](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) para obter instruções detalhadas. + +Você pode usar isso em conjunto com um `.docker / config.json` por nó. As credenciais +serão mescladas. + +## Casos de uso + +Existem várias soluções para configurar registros privados. Aqui estão alguns +casos de uso comuns e soluções sugeridas. + +1. Cluster executando apenas imagens não proprietárias (por exemplo, código aberto). Não há necessidade de ocultar imagens. + - Use imagens públicas no Docker hub. + - Nenhuma configuração necessária. + - Alguns provedores de nuvem armazenam em cache ou espelham automaticamente imagens públicas, o que melhora a disponibilidade e reduz o tempo para extrair imagens. +1. Cluster executando algumas imagens proprietárias que devem ser ocultadas para quem está fora da empresa, mas + visível para todos os usuários do cluster. + - Use um [registro Docker](https://docs.docker.com/registry/) privado hospedado. + - Pode ser hospedado no [Docker Hub](https://hub.docker.com/signup) ou em outro lugar. + - Configure manualmente .docker/config.json em cada nó conforme descrito acima. + - Ou execute um registro privado interno atrás de seu firewall com permissão de leitura. + - Nenhuma configuração do Kubernetes é necessária. + - Use um serviço de registro de imagem de contêiner que controla o acesso à imagem + - Funcionará melhor com o escalonamento automático do cluster do que com a configuração manual de nós. + - Ou, em um cluster onde alterar a configuração do nó é inconveniente, use `imagePullSecrets`. +1. Cluster com imagens proprietárias, algumas das quais requerem controle de acesso mais rígido. + - Certifique-se de que o [controlador de admissão AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) está ativo. Caso contrário, todos os pods têm potencialmente acesso a todas as imagens. + - Mova dados confidenciais para um recurso "secreto", em vez de empacotá-los em uma imagem. +1. Um cluster multilocatário em que cada locatário precisa de seu próprio registro privado. + - Certifique-se de que o [controlador de admissão AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) está ativo. Caso contrário, todos os Pods de todos os locatários terão potencialmente acesso a todas as imagens. + - Execute um registro privado com autorização necessária. + - Gere credenciais de registro para cada locatário, coloque em segredo e preencha o segredo para cada namespace de locatário. + - O locatário adiciona esse segredo a imagePullSecrets de cada namespace. + + +Se precisar de acesso a vários registros, você pode criar um segredo para cada registro. +O Kubelet mesclará qualquer `imagePullSecrets` em um único `.docker/config.json` virtual + +## {{% heading "whatsnext" %}} + +* Leia a [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/master/manifest.md) diff --git a/content/pt-br/docs/concepts/containers/runtime-class.md b/content/pt-br/docs/concepts/containers/runtime-class.md new file mode 100644 index 0000000000000..70e42a78f0cff --- /dev/null +++ b/content/pt-br/docs/concepts/containers/runtime-class.md @@ -0,0 +1,179 @@ +--- +title: Classes de execução +content_type: concept +weight: 20 +--- + + + +{{< feature-state for_k8s_version="v1.20" state="stable" >}} + +Essa página descreve o recurso _RuntimeClass_ e a seleção do mecanismo do agente de execução. + +RuntimeClass é uma funcionalidade para selecionar as configurações do agente de execução do contêiner. +A configuração do agente de execução de contêineres é usada para executar os contêineres de um Pod. + + + + +## Motivação + +Você pode configurar um _RuntimeClass_ diferente entre os diferentes Pods para prover +um equilíbrio entre performance versus segurança. Por exemplo, se parte de sua carga de +trabalho necessita de um alto nível de garantia de segurança da informação, você pode +optar em executar esses Pods em um agente de execução que usa virtualização de hardware. +Você então terá o benefício do isolamento extra de um agente de execução alternativo, ao +custo de uma latência adicional. + +Você pode ainda usar um _RuntimeClass_ para executar diferentes Pods com o mesmo agente +de execução de contêineres mas com diferentes configurações. + +## Configuração + +1. Configure a implementação do CRI nos nós (depende do agente de execução) +2. Crie o recurso RuntimeClass correspondente. + +### 1. Configure a implementação do CRI nos nós + +As configurações disponíveis através do RuntimeClass sáo dependentes da implementação do +_Container Runtime Interface_ ({{< glossary_tooltip term_id="cri" >}}). Veja a documentação correspondente [abaixo](#configuração-do-cri) para a +sua implementação CRI para verificar como configurar. + +{{< note >}} +RuntimeClass assume uma configuração homogênea de nós entre todo o cluster por padrão +(o que significa que todos os nós estão configurados do mesmo jeito referente aos agentes de +execução). Para suportar configurações heterogêneas, veja [Associação](#associação) abaixo. +{{< /note >}} + +As configurações possuem um nome `handler` correspondente, referenciado pelo RuntimeClass. +Esse nome deve ser um valor DNS 1123 válido (letras, números e o carácter `-`). + +### 2. Crie o recurso RuntimeClass correspondente + +As etapas de configuração no passo 1 devem todas estar associadas a um nome para o campo `handler` +que identifica a configuração. Para cada um, crie o objeto RuntimeClass correspondente. + +O recurso RuntimeClass atualmente possui apenas 2 campos significativos: o nome do RuntimeClass +(`metadata.name`) e o agente (`handler`). A definição do objeto se parece conforme a seguir: + +```yaml +apiVersion: node.k8s.io/v1 # RuntimeClass é definido no grupo de API node.k8s.io +kind: RuntimeClass +metadata: + name: myclass # O nome que o RuntimeClass será chamado como + # RuntimeClass é um recurso global, e não possui namespace. +handler: myconfiguration # Nome da configuração CRI correspondente +``` + +O nome de um objeto RuntimeClass deve ser um +[nome de subdomínio DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) válido. + +{{< note >}} +É recomendado que operações de escrita no objeto RuntimeClass (criar/atualizar/patch/apagar) +sejam restritas a administradores do cluster. Isso geralmente é o padrão. Veja [Visão Geral +de autorizações](/docs/reference/access-authn-authz/authorization/) para maiores detalhes. +{{< /note >}} + +## Uso + +Uma vez que as classes de execução estão configuradas no cluster, usar elas é relativamente +simples. Especifique um `runtimeClassName` na especificação do Pod. Por exemplo: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + runtimeClassName: myclass + # ... +``` + +Isso irá instruir o kubelet a usar o RuntimeClass nomeado acima (myclass) para esse Pod. Se +o nome do RuntimeClass não existir, ou o CRI não puder executar a solicitação, o Pod entrará na [fase +final](/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) `Failed`. Procure por um +[evento](/docs/tasks/debug-application-cluster/debug-application-introspection/) correspondente +para uma mensagem de erro. + +Se nenhum `runtimeClassName` for especificado, o RuntimeHandler padrão será utilizado, que é equivalente +ao comportamento quando a funcionalidade de RuntimeClass está desativada. + +### Configuração do CRI + +Para maiores detalhes de configuração dos agentes de execução CRI, veja [instalação do CRI](/docs/setup/production-environment/container-runtimes/). + +#### dockershim + +O CRI dockershim embutido no Kubernetes não suporta outros agentes de execução. + +#### {{< glossary_tooltip term_id="containerd" >}} + +Agentes de execução são configurados através da configuração do containerd em +`/etc/containerd/config.toml`. Agentes válidos são configurados sob a seção de `runtimes`: + +``` +[plugins.cri.containerd.runtimes.${HANDLER_NAME}] +``` + +Veja a documentação de configuração do containerd para maiores detalhes: +https://github.com/containerd/cri/blob/master/docs/config.md + +#### {{< glossary_tooltip term_id="cri-o" >}} + +Agentes de execução são configurados através da configuração do CRI-O em `/etc/crio/crio.conf`. +Agentes válidos são configurados na seção [crio.runtime +table](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md#crioruntime-table): + +``` +[crio.runtime.runtimes.${HANDLER_NAME}] + runtime_path = "${PATH_TO_BINARY}" +``` + +Veja a [documentação de configuração](https://raw.githubusercontent.com/cri-o/cri-o/9f11d1d/docs/crio.conf.5.md) do CRI-O para maiores detalhes. + +## Associação + +{{< feature-state for_k8s_version="v1.16" state="beta" >}} + +Ao especificar o campo `scheduling` para um RuntimeClass, você pode colocar limites e +garantir que os Pods executando dentro de uma RuntimeClass sejam associados a nós que +suportem eles. Se o `scheduling` não estiver configurado, assume-se que esse RuntimeClass +é suportado por todos os nós. + +Para garantir que os Pods sejam executados em um nó que suporte um RuntimeClass específico, +aquele conjunto de nós deve possuir uma marca/label padrão que é selecionado pelo campo +`runtimeclass.scheduling.nodeSelector`. O nodeSelector do RuntimeClass é combinado com o +nodeSelector do Pod em tempo de admissão, obtendo a intersecção do conjunto de nós selecionado +por cada. Se existir um conflito, o pod será rejeitado. + +Se os nós suportados possuírem marcação de restrição para prevenir outros Pods com uma +classe de execução diferente de executar no nó, você pode adicionar o campo `tolerations` +ao objeto RuntimeClass. Assim como com o `nodeSelector`, o `tolerations` é combinado com +o campo `tolerations` do Pod em tempo de admissão, efetivamente pegando a intersecção do +conjunto de nós aplicáveis para cada. + +Para saber mais sobre a configuração de seleção de nós e tolerâncias, veja [Associando Pods a +Nós](/docs/concepts/scheduling-eviction/assign-pod-node/). + +### Sobrecarga de Pods + +{{< feature-state for_k8s_version="v1.18" state="beta" >}} + +Você pode especificar os recursos extra que estão associados à execução de um Pod. Declarar esses +recursos extra permite ao cluster (incluindo o agendador/scheduler de pods) contabilizar por +esses recursos quando estiver decidindo sobre Pods e recursos. Para usar a contabilização +desses recursos extras, você deve estar com o [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +PodOverhead habilitado (ele já está habilitado por padrão). + +Os recursos extras utilizados são especificados no objeto RuntimeClass através do campo `overhead`. +Ao usar esses campos, você especifica o uso extra de recursos necessários para executar +Pods utilizando-se desse Runtimeclass e assim contabilizar esses recursos para o Kubernetes. + + +## {{% heading "whatsnext" %}} + + +- [RuntimeClass Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md) +- [RuntimeClass Scheduling Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md#runtimeclass-scheduling) +- Leia mais sobre [Sobrecarga de Pods](/docs/concepts/scheduling-eviction/pod-overhead/) +- [PodOverhead Feature Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) diff --git a/content/pt/docs/concepts/extend-kubernetes/_index.md b/content/pt-br/docs/concepts/extend-kubernetes/_index.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/_index.md rename to content/pt-br/docs/concepts/extend-kubernetes/_index.md diff --git a/content/pt/docs/concepts/extend-kubernetes/api-extension/_index.md b/content/pt-br/docs/concepts/extend-kubernetes/api-extension/_index.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/api-extension/_index.md rename to content/pt-br/docs/concepts/extend-kubernetes/api-extension/_index.md diff --git a/content/pt/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/pt-br/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md rename to content/pt-br/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md diff --git a/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/_index.md b/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/_index.md new file mode 100644 index 0000000000000..6b6090a666fee --- /dev/null +++ b/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/_index.md @@ -0,0 +1,4 @@ +--- +title: Extensões de Computação, armazenamento e redes +weight: 30 +--- diff --git a/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md new file mode 100644 index 0000000000000..3d61ab72cc0d6 --- /dev/null +++ b/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -0,0 +1,209 @@ +--- +title: Plugins de rede +content_type: concept +weight: 10 +--- + + + +Plugins de redes no Kubernetes podem ser dos seguintes tipos: + +* Plugins CNI: Aderentes à especificação [Container Network Interface](https://github.com/containernetworking/cni) (CNI), desenhados para interoperabilidade. + * Kubernetes usa a versão [v0.4.0](https://github.com/containernetworking/cni/blob/spec-v0.4.0/SPEC.md) da especificação CNI. +* Plugin kubenet: Implementa o `cbr0` básico usando os plugins CNI `bridge` e `host-local` + + + +## Instalação + +O kubelet possui um plugin único padrão, e um plugin padrão comum para todo o cluster. +Ele verifica o plugin quando inicia, se lembra o que encontrou, e executa o plugin selecionado +em momentos oportunos dentro do ciclo de vida de um Pod (isso é verdadeiro apenas com o Docker, +uma vez que o CRI gerencia seus próprios plugins de CNI). Existem dois parâmetros de linha de comando +no Kubelet para se ter em mente quando usando plugins: + +* `cni-bin-dir`: O Kubelet verifica esse diretório por plugins na inicialização +* `network-plugin`: O plugin de rede que deve ser utilizado do diretório configurado em +`cni-bin-dir`. Deve ser igual ao nome configurado por um plugin no diretório de plugins. +Para plugins de CNI, isso equivale ao valor `cni`. + +## Requisitos de plugins de Rede + +Além de prover a [interface `NetworkPlugin`](https://github.com/kubernetes/kubernetes/tree/{{< param "fullversion" >}}/pkg/kubelet/dockershim/network/plugins.go) +para configuração da rede do pod, o plugin pode necessitar de suporte específico ao +kube-proxy. +O proxy iptables obviamente depende do iptables, e o plugin deve garantir que o +tráfego do contêiner esteja disponível para o iptables. Por exemplo, se o plugin +conecta os contêineres à _Linux bridge_, o plugin deve configurar a diretiva de +_sysctl_ `net/bridge/bridge-nf-call-iptables` com o valor `1` para garantir que o +proxy iptables opere normalmente. Se o plugin não faz uso da _Linux Bridge_ (mas outro +mecanismo, como Open vSwitch) ele deve garantir que o tráfego do contêiner é roteado +apropriadamente para o proxy. + +Por padrão, se nenhum plugin de rede é configurado no kubelet, o plugin `noop` é utilizado, +que configura `net/bridge/bridge-nf-call-iptables=1` para garantir que configurações simples +(como Docker com _bridge Linux_) operem corretamente com o proxy iptables. + +### CNI + +O plugin de CNI é selecionado utilizando-se da opção `--network-plugin=cni` no início do Kubeket. +O Kubelet lê um arquivo do diretório especificado em `--cni-conf-dir` (padrão `/etc/cni/net.d`) +e usa a configuração de CNI desse arquivo para configurar a rede de cada Pod. O arquivo de +configuração do CNI deve usar a [especificação de CNI](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration), +e qualquer plugin referenciado nesse arquivo deve estar presente no diretório +`--cni-bin-dir` (padrão `/opt/cni/bin`). + +Se existirem múltiplos arquivos de configuração no diretório, o kubelet usa o arquivo de +configuração que vier primeiro pelo nome, em ordem alfabética. + +Adicionalmente ao plugin de CNI especificado no arquivo de configuração, o Kubernetes requer +o plugin CNI padrão [`lo`](https://github.com/containernetworking/plugins/blob/master/plugins/main/loopback/loopback.go) ao menos na versão 0.2.0. + +#### Suporte a hostPort + +O plugin de redes CNI suporta `hostPort`. Você pode utilizar o plugin oficial +[portmap](https://github.com/containernetworking/plugins/tree/master/plugins/meta/portmap) +ou usar seu próprio plugin com a funcionalidade de _portMapping_. + +Caso você deseje habilitar o suporte a `hostPort`, você deve especificar +`portMappings capability` no seu `cni-conf-dir`. +Por exemplo: + +```json +{ + "name": "k8s-pod-network", + "cniVersion": "0.3.0", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "127.0.0.1", + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/calico-kubeconfig" + } + }, + { + "type": "portmap", + "capabilities": {"portMappings": true} + } + ] +} +``` + +#### Suporte a controle de banda + +**Funcionalidade experimental** + +O plugin de rede CNI também suporta o controle de banda de entrada e saída. +Você pode utilizar o plugin oficial [bandwidth](https://github.com/containernetworking/plugins/tree/master/plugins/meta/bandwidth) +desenvolvido ou usar seu próprio plugin de controle de banda. + +Se você habilitar o suporte ao controle de banda, você deve adicionar o plugin `bandwidth` +no seu arquivo de configuração de CNI (padrão `/etc/cni/net.d`) e garantir que o programa +exista no diretório de binários do CNI (padrão `/opt/cni/bin`). + +```json +{ + "name": "k8s-pod-network", + "cniVersion": "0.3.0", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "127.0.0.1", + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/calico-kubeconfig" + } + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] +} +``` + +Agora você pode adicionar as anotações `kubernetes.io/ingress-bandwidth` e +`kubernetes.io/egress-bandwidth` em seu pod. +Por exemplo: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + annotations: + kubernetes.io/ingress-bandwidth: 1M + kubernetes.io/egress-bandwidth: 1M +... +``` + +### kubenet + +Kubenet é um plugin de rede muito simples, existente apenas no Linux. Ele não +implementa funcionalidades mais avançadas, como rede entre nós ou políticas de rede. +Ele é geralmente utilizado junto a um provedor de nuvem que configura as regras de +roteamento para comunicação entre os nós, ou em ambientes com apenas um nó. + +O Kubenet cria uma _interface bridge_ no Linux chamada `cbr0` e cria um par _veth_ +para cada um dos pods com o host como a outra ponta desse par, conectado à `cbr0`. +Na interface no lado do Pod um endereço IP é alocado de uma faixa associada ao nó, +sendo parte de alguma configuração no nó ou pelo controller-manager. Na interface `cbr0` +é associado o MTU equivalente ao menor MTU de uma interface de rede do host. + +Esse plugin possui alguns requisitos: + +* Os plugins CNI padrão `bridge`, `lo` e `host-local` são obrigatórios, ao menos na +versão 0.2.0. O Kubenet buscará inicialmente esses plugins no diretório `/opt/cni/bin`. +Especifique a opção `cni-bin-dir` no kubelet para fornecer um diretório adicional +de busca. O primeiro local equivalente será o utilizado. +* O kubelet deve ser executado com a opção `--network-plugin=kubenet` para habilitar esse plugin. +* O Kubelet deve ainda ser executado com a opção `--non-masquerade-cidr=` para +garantir que o tráfego de IPs para fora dessa faixa seja mascarado. +* O nó deve possuir uma subrede associada, através da opção `--pod-cidr` configurada +na inicialização do kubelet, ou as opções `--allocate-node-cidrs=true --cluster-cidr=` +utilizadas na inicialização do _controller-manager_. + +### Customizando o MTU (com kubenet) + +O MTU deve sempre ser configurado corretamente para obter-se a melhor performance de +rede. Os plugins de rede geralmente tentam detectar uma configuração correta de MTU, +porém algumas vezes a lógica não irá resultar em uma configuração adequada. Por exemplo, +se a _Docker bridge_ ou alguma outra interface possuir um MTU pequeno, o kubenet irá +selecionar aquela MTU. Ou caso você esteja utilizando encapsulamento IPSEC, o MTU deve +ser reduzido, e esse cálculo não faz parte do escopo da maioria dos plugins de rede. + +Sempre que necessário, você pode configurar explicitamente o MTU com a opção `network-plugin-mtu` +no kubelet. Por exemplo, na AWS o MTU da `eth0` geralmente é 9001 então você deve +especificar `--network-plugin-mtu=9001`. Se você estiver usando IPSEC você deve reduzir +o MTU para permitir o encapsulamento excedente; por exemplo: `--network-plugin-mtu=8773`. + +Essa opção faz parte do plugin de rede. Atualmente **apenas o kubenet suporta a configuração +`network-plugin-mtu`**. + +## Resumo de uso + +* `--network-plugin=cni` especifica que devemos usar o plugin de redes `cni` com os +binários do plugin localizados em `--cni-bin-dir` (padrão `/opt/cni/bin`) e as +configurações do plugin localizadas em `--cni-conf-dir` (default `/etc/cni/net.d`). +* `--network-plugin=kubenet` especifica que iremos usar o plugin de rede `kubenet` +com os plugins CNI `bridge`, `lo` e `host-local` localizados em `/opt/cni/bin` ou `cni-bin-dir`. +* `--network-plugin-mtu=9001` especifica o MTU a ser utilizado, atualmente apenas em uso +pelo plugin de rede `kubenet` + +## {{% heading "whatsnext" %}} diff --git a/content/pt/docs/concepts/extend-kubernetes/operator.md b/content/pt-br/docs/concepts/extend-kubernetes/operator.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/operator.md rename to content/pt-br/docs/concepts/extend-kubernetes/operator.md diff --git a/content/pt-br/docs/concepts/overview/_index.md b/content/pt-br/docs/concepts/overview/_index.md new file mode 100644 index 0000000000000..f254849b77406 --- /dev/null +++ b/content/pt-br/docs/concepts/overview/_index.md @@ -0,0 +1,7 @@ +--- +title: "Visão Geral" +weight: 20 +description: Obtenha uma visão em alto-nível do Kubernetes e dos componentes a partir dos quais ele é construído. +sitemap: + priority: 0.9 +--- diff --git a/content/pt-br/docs/concepts/overview/components.md b/content/pt-br/docs/concepts/overview/components.md new file mode 100644 index 0000000000000..b03946c4ae2b3 --- /dev/null +++ b/content/pt-br/docs/concepts/overview/components.md @@ -0,0 +1,117 @@ +--- +reviewers: +title: Componentes do Kubernetes +content_type: concept +description: > + Um cluster Kubernetes consiste de componentes que representam a camada de gerenciamento, e um conjunto de máquinas chamadas nós. +weight: 20 +card: + name: concepts + weight: 20 +--- + + +Ao implantar o Kubernetes, você obtém um cluster. +{{< glossary_definition term_id="cluster" length="all" prepend="Um cluster Kubernetes consiste em">}} + +Este documento descreve os vários componentes que você precisa ter para implantar um cluster Kubernetes completo e funcional. + +Esse é o diagrama de um cluster Kubernetes com todos os componentes interligados. + +![Componentes do Kubernetes](/images/docs/components-of-kubernetes.svg) + + + +## Componentes da camada de gerenciamento + +Os componentes da camada de gerenciamento tomam decisões globais sobre o cluster (por exemplo, agendamento de _pods_), bem como detectam e respondem aos eventos do cluster (por exemplo, iniciando um novo _{{< glossary_tooltip text="pod" term_id="pod" >}}_ quando o campo `replicas` de um _Deployment_ não está atendido). + +Os componentes da camada de gerenciamento podem ser executados em qualquer máquina do cluster. Contudo, para simplificar, os _scripts_ de configuração normalmente iniciam todos os componentes da camada de gerenciamento na mesma máquina, e não executa contêineres de usuário nesta máquina. Veja [Construindo clusters de alta disponibilidade](/docs/admin/high-availability/) para um exemplo de configuração de múltiplas VMs para camada de gerenciamento (_multi-main-VM_). + +### kube-apiserver + +{{< glossary_definition term_id="kube-apiserver" length="all" >}} + +### etcd + +{{< glossary_definition term_id="etcd" length="all" >}} + +### kube-scheduler + +{{< glossary_definition term_id="kube-scheduler" length="all" >}} + +### kube-controller-manager + +{{< glossary_definition term_id="kube-controller-manager" length="all" >}} + +Alguns tipos desses controladores são: + + * Controlador de nó: responsável por perceber e responder quando os nós caem. + * Controlador de _Job_: Observa os objetos _Job_ que representam tarefas únicas e, em seguida, cria _pods_ para executar essas tarefas até a conclusão. + * Controlador de _endpoints_: preenche o objeto _Endpoints_ (ou seja, junta os Serviços e os _pods_). + * Controladores de conta de serviço e de _token_: crie contas padrão e _tokens_ de acesso de API para novos _namespaces_. + +### cloud-controller-manager + +{{< glossary_definition term_id="cloud-controller-manager" length="short" >}} + +O cloud-controller-manager executa apenas controladores que são específicos para seu provedor de nuvem. +Se você estiver executando o Kubernetes em suas próprias instalações ou em um ambiente de aprendizagem dentro de seu +próprio PC, o cluster não possui um gerenciador de controlador de nuvem. + +Tal como acontece com o kube-controller-manager, o cloud-controller-manager combina vários ciclos de controle logicamente independentes em um binário único que você executa como um processo único. Você pode escalar horizontalmente (exectuar mais de uma cópia) para melhorar o desempenho ou para auxiliar na tolerância a falhas. + +Os seguintes controladores podem ter dependências de provedor de nuvem: + + * Controlador de nó: para verificar junto ao provedor de nuvem para determinar se um nó foi excluído da nuvem após parar de responder. + * Controlador de rota: para configurar rotas na infraestrutura de nuvem subjacente. + * Controlador de serviço: Para criar, atualizar e excluir balanceadores de carga do provedor de nuvem. + +## Node Components + +Os componentes de nó são executados em todos os nós, mantendo os _pods_ em execução e fornecendo o ambiente de execução do Kubernetes. + +### kubelet + +{{< glossary_definition term_id="kubelet" length="all" >}} + +### kube-proxy + +{{< glossary_definition term_id="kube-proxy" length="all" >}} + +### Container runtime + +{{< glossary_definition term_id="container-runtime" length="all" >}} + +## Addons + +Complementos (_addons_) usam recursos do Kubernetes ({{< glossary_tooltip term_id="daemonset" >}}, {{< glossary_tooltip term_id="deployment" >}}, etc) para implementar funcionalidades do cluster. Como fornecem funcionalidades em nível do cluster, recursos de _addons_ que necessitem ser criados dentro de um _namespace_ pertencem ao _namespace_ `kube-system`. + +Alguns _addons_ selecionados são descritos abaixo; para uma lista estendida dos _addons_ disponíveis, por favor consulte [Addons](/docs/concepts/cluster-administration/addons/). + +### DNS + +Embora os outros complementos não sejam estritamente necessários, todos os clusters do Kubernetes devem ter um [DNS do cluster](/docs/concepts/services-networking/dns-pod-service/), já que muitos exemplos dependem disso. + +O DNS do cluster é um servidor DNS, além de outros servidores DNS em seu ambiente, que fornece registros DNS para serviços do Kubernetes. + +Os contêineres iniciados pelo Kubernetes incluem automaticamente esse servidor DNS em suas pesquisas DNS. + +### Web UI (Dashboard) + +[Dashboard](/docs/tasks/access-application-cluster/web-ui-dashboard/) é uma interface de usuário Web, de uso geral, para clusters do Kubernetes. Ele permite que os usuários gerenciem e solucionem problemas de aplicações em execução no cluster, bem como o próprio cluster. + +### Monitoramento de recursos do contêiner + +[Monitoramento de recursos do contêiner](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) registra métricas de série temporal genéricas sobre os contêineres em um banco de dados central e fornece uma interface de usuário para navegar por esses dados. + +### Logging a nivel do cluster + +Um mecanismo de [_logging_ a nível do cluster](/docs/concepts/cluster-administration/logging/) é responsável por guardar os _logs_ dos contêineres em um armazenamento central de _logs_ com um interface para navegação/pesquisa. + +## {{% heading "whatsnext" %}} + +* Aprenda sobre [Nós](/docs/concepts/architecture/nodes/). +* Aprenda sobre [Controladores](/docs/concepts/architecture/controller/). +* Aprenda sobre [kube-scheduler](/docs/concepts/scheduling-eviction/kube-scheduler/). +* Leia a [documentação](https://etcd.io/docs/) oficial do **etcd**. diff --git a/content/pt-br/docs/concepts/overview/what-is-kubernetes.md b/content/pt-br/docs/concepts/overview/what-is-kubernetes.md new file mode 100644 index 0000000000000..29473a7f75ed6 --- /dev/null +++ b/content/pt-br/docs/concepts/overview/what-is-kubernetes.md @@ -0,0 +1,94 @@ +--- +reviewers: +title: O que é Kubernetes? +description: > + Kubernetes é um plataforma de código aberto, portável e extensiva para o gerenciamento de cargas de trabalho e serviços distribuídos em contêineres, que facilita tanto a configuração declarativa quanto a automação. Ele possui um ecossistema grande, e de rápido crescimento. Serviços, suporte, e ferramentas para Kubernetes estão amplamente disponíveis. +content_type: concept +weight: 10 +card: + name: concepts + weight: 10 +sitemap: + priority: 0.9 +--- + + +Essa página é uma visão geral do Kubernetes. + + + +Kubernetes é um plataforma de código aberto, portável e extensiva para o gerenciamento de cargas de trabalho e serviços distribuídos em contêineres, que facilita tanto a configuração declarativa quanto a automação. Ele possui um ecossistema grande, e de rápido crescimento. Serviços, suporte, e ferramentas para Kubernetes estão amplamente disponíveis. + +O Google tornou Kubernetes um projeto de código-aberto em 2014. O Kubernetes combina [mais de 15 anos de experiência do Google](/blog/2015/04/borg-predecessor-to-kubernetes/) executando cargas de trabalho produtivas em escala, com as melhores idéias e práticas da comunidade. + +O nome **Kubernetes** tem origem no Grego, significando _timoneiro_ ou _piloto_. **K8s** é a abreviação derivada pela troca das oito letras "ubernete" por "8", se tornado _K"8"s_. + +## Voltando no tempo + +Vamos dar uma olhada no porque o Kubernetes é tão útil, voltando no tempo. + +![Evolução das implantações](/images/docs/Container_Evolution.svg) + +**Era da implantação tradicional:** No início, as organizações executavam aplicações em servidores físicos. Não havia como definir limites de recursos para aplicações em um mesmo servidor físico, e isso causava problemas de alocação de recursos. Por exemplo, se várias aplicações fossem executadas em um mesmo servidor físico, poderia haver situações em que uma aplicação ocupasse a maior parte dos recursos e, como resultado, o desempenho das outras aplicações seria inferior. Uma solução para isso seria executar cada aplicação em um servidor físico diferente. Mas isso não escalava, pois os recursos eram subutilizados, e se tornava custoso para as organizações manter muitos servidores físicos. + +**Era da implantação virtualizada:** Como solução, a virtualização foi introduzida. Esse modelo permite que você execute várias máquinas virtuais (VMs) em uma única CPU de um servidor físico. A virtualização permite que as aplicações sejam isoladas entre as VMs, e ainda fornece um nível de segurança, pois as informações de uma aplicação não podem ser acessadas livremente por outras aplicações. + +A virtualização permite melhor utilização de recursos em um servidor físico, e permite melhor escalabilidade porque uma aplicação pode ser adicionada ou atualizada facilmente, reduz os custos de hardware e muito mais. Com a virtualização, você pode apresentar um conjunto de recursos físicos como um cluster de máquinas virtuais descartáveis. + +Cada VM é uma máquina completa que executa todos os componentes, incluindo seu próprio sistema operacional, além do hardware virtualizado. + +**Era da implantação em contêineres:** Contêineres são semelhantes às VMs, mas têm propriedades de isolamento flexibilizados para compartilhar o sistema operacional (SO) entre as aplicações. Portanto, os contêineres são considerados leves. Semelhante a uma VM, um contêiner tem seu próprio sistema de arquivos, compartilhamento de CPU, memória, espaço de processo e muito mais. Como eles estão separados da infraestrutura subjacente, eles são portáveis entre nuvens e distribuições de sistema operacional. + +Contêineres se tornaram populares porque eles fornecem benefícios extra, tais como: + +* Criação e implantação ágil de aplicações: aumento da facilidade e eficiência na criação de imagem de contêiner comparado ao uso de imagem de VM. +* Desenvolvimento, integração e implantação contínuos: fornece capacidade de criação e de implantação de imagens de contêiner de forma confiável e frequente, com a funcionalidade de efetuar reversões rápidas e eficientes (devido à imutabilidade da imagem). +* Separação de interesses entre Desenvolvimento e Operações: crie imagens de contêineres de aplicações no momento de construção/liberação em vez de no momento de implantação, desacoplando as aplicações da infraestrutura. +* A capacidade de observação (Observabilidade) não apenas apresenta informações e métricas no nível do sistema operacional, mas também a integridade da aplicação e outros sinais. +* Consistência ambiental entre desenvolvimento, teste e produção: funciona da mesma forma em um laptop e na nuvem. +* Portabilidade de distribuição de nuvem e sistema operacional: executa no Ubuntu, RHEL, CoreOS, localmente, nas principais nuvens públicas e em qualquer outro lugar. +* Gerenciamento centrado em aplicações: eleva o nível de abstração da execução em um sistema operacional em hardware virtualizado à execução de uma aplicação em um sistema operacional usando recursos lógicos. +* Microserviços fracamente acoplados, distribuídos, elásticos e livres: as aplicações são divididas em partes menores e independentes e podem ser implantados e gerenciados dinamicamente - não uma pilha monolítica em execução em uma grande máquina de propósito único. +* Isolamento de recursos: desempenho previsível de aplicações. +* Utilização de recursos: alta eficiência e densidade. + +## Por que você precisa do Kubernetes e o que ele pode fazer{#why-you-need-kubernetes-and-what-can-it-do} + +Os contêineres são uma boa maneira de agrupar e executar suas aplicações. Em um ambiente de produção, você precisa gerenciar os contêineres que executam as aplicações e garantir que não haja tempo de inatividade. Por exemplo, se um contêiner cair, outro contêiner precisa ser iniciado. Não seria mais fácil se esse comportamento fosse controlado por um sistema? + +É assim que o Kubernetes vem ao resgate! O Kubernetes oferece uma estrutura para executar sistemas distribuídos de forma resiliente. Ele cuida do escalonamento e do recuperação à falha de sua aplicação, fornece padrões de implantação e muito mais. Por exemplo, o Kubernetes pode gerenciar facilmente uma implantação no método canário para seu sistema. + +O Kubernetes oferece a você: + +* **Descoberta de serviço e balanceamento de carga** +O Kubernetes pode expor um contêiner usando o nome DNS ou seu próprio endereço IP. Se o tráfego para um contêiner for alto, o Kubernetes pode balancear a carga e distribuir o tráfego de rede para que a implantação seja estável. +* **Orquestração de armazenamento** +O Kubernetes permite que você monte automaticamente um sistema de armazenamento de sua escolha, como armazenamentos locais, provedores de nuvem pública e muito mais. +* **Lançamentos e reversões automatizadas** +Você pode descrever o estado desejado para seus contêineres implantados usando o Kubernetes, e ele pode alterar o estado real para o estado desejado em um ritmo controlada. Por exemplo, você pode automatizar o Kubernetes para criar novos contêineres para sua implantação, remover os contêineres existentes e adotar todos os seus recursos para o novo contêiner. +* **Empacotamento binário automático** +Você fornece ao Kubernetes um cluster de nós que pode ser usado para executar tarefas nos contêineres. Você informa ao Kubernetes de quanta CPU e memória (RAM) cada contêiner precisa. O Kubernetes pode encaixar contêineres em seus nós para fazer o melhor uso de seus recursos. +* **Autocorreção** +O Kubernetes reinicia os contêineres que falham, substitui os contêineres, elimina os contêineres que não respondem à verificação de integridade definida pelo usuário e não os anuncia aos clientes até que estejam prontos para servir. +* **Gerenciamento de configuração e de segredos** +O Kubernetes permite armazenar e gerenciar informações confidenciais, como senhas, tokens OAuth e chaves SSH. Você pode implantar e atualizar segredos e configuração de aplicações sem reconstruir suas imagens de contêiner e sem expor segredos em sua pilha de configuração. + +## O que o Kubernetes não é + +O Kubernetes não é um sistema PaaS (plataforma como serviço) tradicional e completo. Como o Kubernetes opera no nível do contêiner, e não no nível do hardware, ele fornece alguns recursos geralmente aplicáveis comuns às ofertas de PaaS, como implantação, escalonamento, balanceamento de carga, e permite que os usuários integrem suas soluções de _logging_, monitoramento e alerta. No entanto, o Kubernetes não é monolítico, e essas soluções padrão são opcionais e conectáveis. O Kubernetes fornece os blocos de construção para a construção de plataformas de desenvolvimento, mas preserva a escolha e flexibilidade do usuário onde é importante. + +Kubernetes: + +* Não limita os tipos de aplicações suportadas. O Kubernetes visa oferecer suporte a uma variedade extremamente diversa de cargas de trabalho, incluindo cargas de trabalho sem estado, com estado e de processamento de dados. Se uma aplicação puder ser executada em um contêiner, ele deve ser executado perfeitamente no Kubernetes. +* Não implanta código-fonte e não constrói sua aplicação. Os fluxos de trabalho de integração contínua, entrega e implantação (CI/CD) são determinados pelas culturas e preferências da organização, bem como pelos requisitos técnicos. +* Não fornece serviços em nível de aplicação, tais como middleware (por exemplo, barramentos de mensagem), estruturas de processamento de dados (por exemplo, Spark), bancos de dados (por exemplo, MySQL), caches, nem sistemas de armazenamento em cluster (por exemplo, Ceph), como serviços integrados. Esses componentes podem ser executados no Kubernetes e/ou podem ser acessados por aplicações executadas no Kubernetes por meio de mecanismos portáteis, como o [Open Service Broker](https://openservicebrokerapi.org/). +* Não dita soluções de _logging_, monitoramento ou alerta. Ele fornece algumas integrações como prova de conceito e mecanismos para coletar e exportar métricas. +* Não fornece nem exige um sistema/idioma de configuração (por exemplo, Jsonnet). Ele fornece uma API declarativa que pode ser direcionada por formas arbitrárias de especificações declarativas. +* Não fornece nem adota sistemas abrangentes de configuração de máquinas, manutenção, gerenciamento ou autocorreção. +* Adicionalmente, o Kubernetes não é um mero sistema de orquestração. Na verdade, ele elimina a necessidade de orquestração. A definição técnica de orquestração é a execução de um fluxo de trabalho definido: primeiro faça A, depois B e depois C. Em contraste, o Kubernetes compreende um conjunto de processos de controle independentes e combináveis que conduzem continuamente o estado atual em direção ao estado desejado fornecido. Não importa como você vai de A para C. O controle centralizado também não é necessário. Isso resulta em um sistema que é mais fácil de usar e mais poderoso, robusto, resiliente e extensível. + + +## {{% heading "whatsnext" %}} + +* Dê uma olhada em [Componentes do Kubernetes](/docs/concepts/overview/components/). +* Pronto para [Iniciar](/docs/setup/)? diff --git a/content/pt/docs/concepts/overview/working-with-objects/_index.md b/content/pt-br/docs/concepts/overview/working-with-objects/_index.md similarity index 100% rename from content/pt/docs/concepts/overview/working-with-objects/_index.md rename to content/pt-br/docs/concepts/overview/working-with-objects/_index.md diff --git a/content/pt/docs/concepts/overview/working-with-objects/names.md b/content/pt-br/docs/concepts/overview/working-with-objects/names.md similarity index 100% rename from content/pt/docs/concepts/overview/working-with-objects/names.md rename to content/pt-br/docs/concepts/overview/working-with-objects/names.md diff --git a/content/pt-br/docs/concepts/scheduling-eviction/_index.md b/content/pt-br/docs/concepts/scheduling-eviction/_index.md new file mode 100644 index 0000000000000..e9e036f0c3f6c --- /dev/null +++ b/content/pt-br/docs/concepts/scheduling-eviction/_index.md @@ -0,0 +1,8 @@ +--- +title: "Escalonamento" +weight: 90 +description: > + No Kubernetes, agendamento refere-se a garantia de que os pods correspondam aos nós para que o kubelet possa executá-los. + Remoção é o processo de falha proativa de um ou mais pods em nós com falta de recursos. +--- + diff --git a/content/pt/docs/concepts/scheduling/kube-scheduler.md b/content/pt-br/docs/concepts/scheduling-eviction/kube-scheduler.md similarity index 93% rename from content/pt/docs/concepts/scheduling/kube-scheduler.md rename to content/pt-br/docs/concepts/scheduling-eviction/kube-scheduler.md index 575a8e78394e1..8c8b0ec39aea8 100644 --- a/content/pt/docs/concepts/scheduling/kube-scheduler.md +++ b/content/pt-br/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -91,4 +91,7 @@ do escalonador: * Aprenda como [configurar vários escalonadores](/docs/tasks/administer-cluster/configure-multiple-schedulers/) * Aprenda sobre [políticas de gerenciamento de topologia](/docs/tasks/administer-cluster/topology-manager/) * Aprenda sobre [Pod Overhead](/docs/concepts/configuration/pod-overhead/) - +* Saiba mais sobre o agendamento de pods que usam volumes em: + * [Suporte de topologia de volume](/docs/concepts/storage/storage-classes/#volume-binding-mode) + * [Rastreamento de capacidade de armazenamento](/docs/concepts/storage/storage-capacity/) + * [Limites de volumes específicos do nó](/docs/concepts/storage/storage-limits/) \ No newline at end of file diff --git a/content/pt/docs/concepts/configuration/pod-overhead.md b/content/pt-br/docs/concepts/scheduling-eviction/pod-overhead.md similarity index 54% rename from content/pt/docs/concepts/configuration/pod-overhead.md rename to content/pt-br/docs/concepts/scheduling-eviction/pod-overhead.md index 78ba1d6ffde8e..c3788b22fa500 100644 --- a/content/pt/docs/concepts/configuration/pod-overhead.md +++ b/content/pt-br/docs/concepts/scheduling-eviction/pod-overhead.md @@ -1,9 +1,5 @@ --- -reviewers: -- dchen1107 -- egernst -- tallclair -title: Pod Overhead +title: Sobrecarga de Pod content_type: concept weight: 50 --- @@ -12,10 +8,10 @@ weight: 50 {{< feature-state for_k8s_version="v1.18" state="beta" >}} -Quando executa um Pod num nó, o próprio Pod usa uma quantidade de recursos do sistema. Estes -recursos são adicionais aos recursos necessários para executar o(s) _container(s)_ dentro do Pod. +Quando você executa um Pod num nó, o próprio Pod usa uma quantidade de recursos do sistema. Estes +recursos são adicionais aos recursos necessários para executar o(s) contêiner(s) dentro do Pod. Sobrecarga de Pod, do inglês _Pod Overhead_, é uma funcionalidade que serve para contabilizar os recursos consumidos pela -infraestrutura do Pod para além das solicitações e limites do _container_. +infraestrutura do Pod para além das solicitações e limites do contêiner. @@ -23,27 +19,27 @@ infraestrutura do Pod para além das solicitações e limites do _container_. -No Kubernetes, a sobrecarga de _Pods_ é definido no tempo de +No Kubernetes, a sobrecarga de Pods é definido no tempo de [admissão](/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks) de acordo com a sobrecarga associada à -[RuntimeClass](/docs/concepts/containers/runtime-class/) do _Pod_. +[RuntimeClass](/docs/concepts/containers/runtime-class/) do Pod. Quando é ativada a Sobrecarga de Pod, a sobrecarga é considerada adicionalmente à soma das -solicitações de recursos do _container_ ao agendar um Pod. Semelhantemente, o _kubelet_ +solicitações de recursos do contêiner ao agendar um Pod. Semelhantemente, o _kubelet_ incluirá a sobrecarga do Pod ao dimensionar o cgroup do Pod e ao -executar a classificação de despejo do Pod. +executar a classificação de prioridade de migração do Pod em caso de _drain_ do Node. -## Possibilitando a Sobrecarga do Pod {#set-up} +## Habilitando a Sobrecarga de Pod {#set-up} -Terá de garantir que o [portão de funcionalidade](/docs/reference/command-line-tools-reference/feature-gates/) -`PodOverhead` está ativo (está ativo por defeito a partir da versão 1.18) -por todo o cluster, e uma `RuntimeClass` é utilizada que defina o campo `overhead`. +Terá de garantir que o [Feature Gate](/docs/reference/command-line-tools-reference/feature-gates/) +`PodOverhead` esteja ativo (está ativo por padrão a partir da versão 1.18) +em todo o cluster, e uma `RuntimeClass` utilizada que defina o campo `overhead`. ## Exemplo de uso Para usar a funcionalidade PodOverhead, é necessário uma RuntimeClass que define o campo `overhead`. -Por exemplo, poderia usar a definição da RuntimeClass abaixo com um _container runtime_ virtualizado -que usa cerca de 120MiB por Pod para a máquina virtual e o sistema operativo convidado: +Por exemplo, poderia usar a definição da RuntimeClass abaixo com um agente de execução de contêiner virtualizado +que use cerca de 120MiB por Pod para a máquina virtual e o sistema operacional convidado: ```yaml --- @@ -88,9 +84,9 @@ spec: memory: 100Mi ``` -Na altura de admissão o [controlador de admissão](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) RuntimeClass +No tempo de admissão o [controlador de admissão](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) RuntimeClass atualiza o _PodSpec_ da carga de trabalho de forma a incluir o `overhead` como descrito na RuntimeClass. Se o _PodSpec_ já tiver este campo definido -o _Pod_ será rejeitado. No exemplo dado, como apenas o nome do RuntimeClass é especificado, o controlador de admissão muda o _Pod_ de forma a +o Pod será rejeitado. No exemplo dado, como apenas o nome do RuntimeClass é especificado, o controlador de admissão muda o Pod de forma a incluir um `overhead`. Depois do controlador de admissão RuntimeClass, pode verificar o _PodSpec_ atualizado: @@ -99,44 +95,43 @@ Depois do controlador de admissão RuntimeClass, pode verificar o _PodSpec_ atua kubectl get pod test-pod -o jsonpath='{.spec.overhead}' ``` -O output é: +A saída é: ``` map[cpu:250m memory:120Mi] ``` -Se for definido um _ResourceQuota_, a soma dos pedidos dos _containers_ assim como o campo `overhead` são contados. +Se for definido um _ResourceQuota_, a soma das requisições dos contêineres assim como o campo `overhead` são contados. -Quando o kube-scheduler está a decidir que nó deve executar um novo _Pod_, o agendador considera o `overhead` do _Pod_, -assim como a soma de pedidos aos _containers_ para esse _Pod_. Para este exemplo, o agendador adiciona os -pedidos e a sobrecarga, depois procura um nó com 2.25 CPU e 320 MiB de memória disponível. +Quando o kube-scheduler está decidindo que nó deve executar um novo Pod, o agendador considera o `overhead` do pod, +assim como a soma de pedidos aos contêineres para esse _Pod_. Para este exemplo, o agendador adiciona as requisições e a sobrecarga, depois procura um nó com 2.25 CPU e 320 MiB de memória disponível. -Assim que um _Pod_ é agendado a um nó, o kubelet nesse nó cria um novo {{< glossary_tooltip text="cgroup" term_id="cgroup" >}} -para o _Pod_. É dentro deste _pod_ que o _container runtime_ subjacente vai criar _containers_. +Assim que um Pod é agendado a um nó, o kubelet nesse nó cria um novo {{< glossary_tooltip text="cgroup" term_id="cgroup" >}} +para o Pod. É dentro deste Pod que o agente de execução de contêiners subjacente vai criar contêineres. -Se o recurso tiver um limite definido para cada _container_ (_QoS_ garantida ou _Burstrable QoS_ com limites definidos), -o kubelet definirá um limite superior para o cgroup do _pod_ associado a esse recurso (cpu.cfs_quota_us para CPU -e memory.limit_in_bytes de memória). Este limite superior é baseado na soma dos limites do _container_ mais o `overhead` +Se o recurso tiver um limite definido para cada contêiner (_QoS_ garantida ou _Burstrable QoS_ com limites definidos), +o kubelet definirá um limite superior para o cgroup do Pod associado a esse recurso (cpu.cfs_quota_us para CPU +e memory.limit_in_bytes de memória). Este limite superior é baseado na soma dos limites do contêiner mais o `overhead` definido no _PodSpec_. -Para o CPU, se o _Pod_ for QoS garantida ou _Burstrable QoS_, o kubelet vai definir `cpu.shares` baseado na soma dos -pedidos ao _container_ mais o `overhead` definido no _PodSpec_. +Para CPU, se o Pod for QoS garantida ou _Burstrable QoS_, o kubelet vai definir `cpu.shares` baseado na soma dos +pedidos ao contêiner mais o `overhead` definido no _PodSpec_. -Olhando para o nosso exemplo, verifique os pedidos ao _container_ para a carga de trabalho: +Olhando para o nosso exemplo, verifique as requisições ao contêiner para a carga de trabalho: ```bash kubectl get pod test-pod -o jsonpath='{.spec.containers[*].resources.limits}' ``` -O total de pedidos ao _container_ são 2000m CPU e 200MiB de memória: +O total de requisições ao contêiner são 2000m CPU e 200MiB de memória: ``` map[cpu: 500m memory:100Mi] map[cpu:1500m memory:100Mi] ``` -Verifique isto contra o que é observado pelo nó: +Verifique isto comparado ao que é observado pelo nó: ```bash kubectl describe node | grep test-pod -B2 ``` -O output mostra que 2250m CPU e 320MiB de memória são solicitados, que inclui _PodOverhead_: +A saída mostra que 2250m CPU e 320MiB de memória são solicitados, que inclui _PodOverhead_: ``` Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE --------- ---- ------------ ---------- --------------- ------------- --- @@ -145,12 +140,12 @@ O output mostra que 2250m CPU e 320MiB de memória são solicitados, que inclui ## Verificar os limites cgroup do Pod -Verifique os cgroups de memória do Pod no nó onde a carga de trabalho está em execução. No seguinte exemplo, [`crictl`] (https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md) -é usado no nó, que fornece uma CLI para _container runtimes_ compatíveis com CRI. Isto é um -exemplo avançado para mostrar o comportamento do _PodOverhead_, e não é esperado que os utilizadores precisem de verificar +Verifique os cgroups de memória do Pod no nó onde a carga de trabalho está em execução. No seguinte exemplo, [`crictl`](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md) +é usado no nó, que fornece uma CLI para agentes de execução compatíveis com CRI. Isto é um +exemplo avançado para mostrar o comportamento do _PodOverhead_, e não é esperado que os usuários precisem verificar cgroups diretamente no nó. -Primeiro, no nó em particular, determine o identificador do _Pod_: +Primeiro, no nó em particular, determine o identificador do Pod: ```bash # Execute no nó onde o Pod está agendado @@ -163,15 +158,15 @@ A partir disto, pode determinar o caminho do cgroup para o _Pod_: sudo crictl inspectp -o=json $POD_ID | grep cgroupsPath ``` -O caminho do cgroup resultante inclui o _container_ `pause` do _Pod_. O cgroup no nível do _Pod_ está um diretório acima. +O caminho do cgroup resultante inclui o contêiner `pause` do Pod. O cgroup no nível do Pod está um diretório acima. ``` "cgroupsPath": "/kubepods/podd7f4b509-cf94-4951-9417-d1087c92a5b2/7ccf55aee35dd16aca4189c952d83487297f3cd760f1bbf09620e206e7d0c27a" ``` -Neste caso especifico, o caminho do cgroup do pod é `kubepods/podd7f4b509-cf94-4951-9417-d1087c92a5b2`. Verifique a configuração cgroup de nível do _Pod_ para a memória: +Neste caso especifico, o caminho do cgroup do Pod é `kubepods/podd7f4b509-cf94-4951-9417-d1087c92a5b2`. Verifique a configuração cgroup de nível do Pod para a memória: ```bash # Execute no nó onde o Pod está agendado -# Mude também o nome do cgroup de forma a combinar com o cgroup alocado ao pod. +# Mude também o nome do cgroup para combinar com o cgroup alocado ao Pod. cat /sys/fs/cgroup/memory/kubepods/podd7f4b509-cf94-4951-9417-d1087c92a5b2/memory.limit_in_bytes ``` @@ -182,10 +177,10 @@ Isto é 320 MiB, como esperado: ### Observabilidade -Uma métrica `kube_pod_overhead` está disponível em [kube-state-metrics] (https://github.com/kubernetes/kube-state-metrics) -para ajudar a identificar quando o _PodOverhead_ está a ser utilizado e para ajudar a observar a estabilidade das cargas de trabalho +Uma métrica `kube_pod_overhead` está disponível em [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) +para ajudar a identificar quando o _PodOverhead_ está sendo utilizado e para ajudar a observar a estabilidade das cargas de trabalho em execução com uma sobrecarga (_Overhead_) definida. Esta funcionalidade não está disponível na versão 1.9 do kube-state-metrics, -mas é esperado num próximo _release_. Os utilizadores necessitarão entretanto de construir kube-state-metrics a partir da fonte. +mas é esperado em uma próxima versão. Os usuários necessitarão entretanto construir o kube-state-metrics a partir do código fonte. diff --git a/content/pt-br/docs/concepts/security/_index.md b/content/pt-br/docs/concepts/security/_index.md new file mode 100644 index 0000000000000..63fca06b9a9be --- /dev/null +++ b/content/pt-br/docs/concepts/security/_index.md @@ -0,0 +1,5 @@ +--- +title: "Segurança" +weight: 81 +--- + diff --git a/content/pt-br/docs/concepts/security/overview.md b/content/pt-br/docs/concepts/security/overview.md new file mode 100644 index 0000000000000..1f75e051aefc2 --- /dev/null +++ b/content/pt-br/docs/concepts/security/overview.md @@ -0,0 +1,153 @@ +--- +title: Visão Geral da Segurança Cloud Native +content_type: concept +weight: 10 +--- + + + +Esta visão geral define um modelo para pensar sobre a segurança em Kubernetes no contexto da Segurança em Cloud Native. + +{{< warning >}} +Este modelo de segurança no contêiner fornece sugestões, não prova políticas de segurança da informação. +{{< /warning >}} + + + +## Os 4C da Segurança Cloud Native + +Você pode pensar na segurança em camadas. Os 4C da segurança Cloud Native são a Cloud, +Clusters, Contêineres e Código. + +{{< note >}} +Esta abordagem em camadas aumenta a [defesa em profundidade](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)) +para segurança, que é amplamente considerada como uma boa prática de segurança para software de sistemas. +{{< /note >}} + +{{< figure src="/images/docs/4c.png" title="Os 4C da Segurança Cloud Native" >}} + +Cada camada do modelo de segurança Cloud Native é construída sobre a próxima camada mais externa. +A camada de código se beneficia de uma base forte (Cloud, Cluster, Contêiner) de camadas seguras. +Você não pode proteger contra padrões ruins de segurança nas camadas de base através de +segurança no nível do Código. + +## Cloud + +De muitas maneiras, a Cloud (ou servidores co-localizados, ou o datacenter corporativo) é a +[base de computação confiável](https://en.wikipedia.org/wiki/Trusted_computing_base) +de um cluster Kubernetes. Se a camada de Cloud é vulnerável (ou +configurado de alguma maneira vulnerável), então não há garantia de que os componentes construídos +em cima desta base estejam seguros. Cada provedor de Cloud faz recomendações de segurança +para executar as cargas de trabalho com segurança nos ambientes. + +### Segurança no provedor da Cloud + +Se você estiver executando um cluster Kubernetes em seu próprio hardware ou em um provedor de nuvem diferente, +consulte sua documentação para melhores práticas de segurança. +Aqui estão os links para as documentações de segurança dos provedores mais populares de nuvem: + +{{< table caption="Cloud provider security" >}} + +Provedor IaaS | Link | +-------------------- | ------------ | +Alibaba Cloud | https://www.alibabacloud.com/trust-center | +Amazon Web Services | https://aws.amazon.com/security/ | +Google Cloud Platform | https://cloud.google.com/security/ | +IBM Cloud | https://www.ibm.com/cloud/security | +Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security | +VMWare VSphere | https://www.vmware.com/security/hardening-guides.html | + +{{< /table >}} + +### Segurança de Infraestrutura {#infrastructure-security} + +Sugestões para proteger sua infraestrutura em um cluster Kubernetes: + +{{< table caption="Infrastructure security" >}} + +Área de Interesse para Infraestrutura Kubernetes | Recomendação | +--------------------------------------------- | -------------- | +Acesso de rede ao servidor API (Control plane) | Todo o acesso ao control plane do Kubernetes publicamente na Internet não é permitido e é controlado por listas de controle de acesso à rede restritas ao conjunto de endereços IP necessários para administrar o cluster.| +Acesso de rede aos Nós (nodes) | Os nós devem ser configurados para _só_ aceitar conexões (por meio de listas de controle de acesso à rede) do control plane nas portas especificadas e aceitar conexões para serviços no Kubernetes do tipo NodePort e LoadBalancer. Se possível, esses nós não devem ser expostos inteiramente na Internet pública. +Acesso do Kubernetes à API do provedor de Cloud | Cada provedor de nuvem precisa conceder um conjunto diferente de permissões para o control plane e nós do Kubernetes. É melhor fornecer ao cluster permissão de acesso ao provedor de nuvem que segue o [princípio do menor privilégio](https://en.wikipedia.org/wiki/Principle_of_least_privilege) para os recursos que ele precisa administrar. A [documentação do Kops](https://github.com/kubernetes/kops/blob/master/docs/iam_roles.md#iam-roles) fornece informações sobre as políticas e roles do IAM. +Acesso ao etcd | O acesso ao etcd (o armazenamento de dados do Kubernetes) deve ser limitado apenas ao control plane. Dependendo de sua configuração, você deve tentar usar etcd sobre TLS. Mais informações podem ser encontradas na [documentação do etcd](https://github.com/etcd-io/etcd/tree/master/Documentation). +Encriptação etcd | Sempre que possível, é uma boa prática encriptar todas as unidades de armazenamento, mas como o etcd mantém o estado de todo o cluster (incluindo os Secrets), seu disco deve ser criptografado. + +{{< /table >}} + +## Cluster + +Existem duas áreas de preocupação para proteger o Kubernetes: + +* Protegendo os componentes do cluster que são configuráveis. +* Protegendo as aplicações que correm no cluster. + +### Componentes do Cluster {#cluster-components} + +Se você deseja proteger seu cluster de acesso acidental ou malicioso e adotar +boas práticas de informação, leia e siga os conselhos sobre +[protegendo seu cluster](/docs/tasks/administer-cluster/securing-a-cluster/). + +### Componentes no cluster (sua aplicação) {#cluster-applications} + +Dependendo da superfície de ataque de sua aplicação, você pode querer se concentrar em +tópicos específicos de segurança. Por exemplo: se você estiver executando um serviço (Serviço A) que é crítico +numa cadeia de outros recursos e outra carga de trabalho separada (Serviço B) que é +vulnerável a um ataque de exaustão de recursos e, por consequência, o risco de comprometer o Serviço A +é alto se você não limitar os recursos do Serviço B. A tabela a seguir lista +áreas de atenção na segurança e recomendações para proteger cargas de trabalho em execução no Kubernetes: + +Área de interesse para a segurança do Workload | Recomendação | +------------------------------ | --------------------- | +Autorização RBAC (acesso à API Kubernetes) | https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +Autenticação | https://kubernetes.io/docs/concepts/security/controlling-access/ +Gerenciamento de segredos na aplicação (e encriptando-os no etcd em repouso) | https://kubernetes.io/docs/concepts/configuration/secret/
    https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/ +Políticas de segurança do Pod | https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +Qualidade de serviço (e gerenciamento de recursos de cluster) | https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/ +Políticas de Rede | https://kubernetes.io/docs/concepts/services-networking/network-policies/ +TLS para Kubernetes Ingress | https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + +## Contêiner + +A segurança do contêiner está fora do escopo deste guia. Aqui estão recomendações gerais e +links para explorar este tópico: + +Área de Interesse para Contêineres | Recomendação | +------------------------------ | -------------- | +Scanners de Vulnerabilidade de Contêiner e Segurança de Dependência de SO | Como parte da etapa de construção de imagem, você deve usar algum scanner em seus contêineres em busca de vulnerabilidades. +Assinatura Imagem e Enforcement | Assinatura de imagens de contêineres para manter um sistema de confiança para o conteúdo de seus contêineres. +Proibir Usuários Privilegiados | Ao construir contêineres, consulte a documentação para criar usuários dentro dos contêineres que tenham o menor nível de privilégio no sistema operacional necessário para cumprir o objetivo do contêiner. +Use o Contêiner em Runtime com Isolamento mais Forte | Selecione [classes de contêiner runtime](/docs/concepts/containers/runtime-class/) com o provedor de isolamento mais forte. + +## Código + +O código da aplicação é uma das principais superfícies de ataque sobre a qual você tem maior controle. +Embora a proteção do código do aplicativo esteja fora do tópico de segurança do Kubernetes, aqui +são recomendações para proteger o código do aplicativo: + +### Segurança de código + +{{< table caption="Code security" >}} + +Área de Atenção para o Código | Recomendação | +-------------------------| -------------- | +Acesso só através de TLS | Se seu código precisar se comunicar por TCP, execute um handshake TLS com o cliente antecipadamente. Com exceção de alguns casos, encripte tudo em trânsito. Indo um passo adiante, é uma boa ideia encriptar o tráfego de rede entre os serviços. Isso pode ser feito por meio de um processo conhecido como mutual ou [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication), que realiza uma verificação bilateral da comunicação mediante os certificados nos serviços. | +Limitando intervalos de porta de comunicação | Essa recomendação pode ser um pouco autoexplicativa, mas, sempre que possível, você só deve expor as portas em seu serviço que são absolutamente essenciais para a comunicação ou coleta de métricas. | +Segurança na Dependência de Terceiros | É uma boa prática verificar regularmente as bibliotecas de terceiros de sua aplicação em busca de vulnerabilidades de segurança. Cada linguagem de programação possui uma ferramenta para realizar essa verificação automaticamente. | +Análise de Código Estático | A maioria das linguagens fornece uma maneira para analisar um extrato do código referente a quaisquer práticas de codificação potencialmente inseguras. Sempre que possível, você deve automatizar verificações usando ferramentas que podem verificar as bases de código em busca de erros de segurança comuns. Algumas das ferramentas podem ser encontradas em [OWASP Source Code Analysis Tools](https://owasp.org/www-community/Source_Code_Analysis_Tools). | +Ataques de sondagem dinâmica | Existem algumas ferramentas automatizadas que você pode executar contra seu serviço para tentar alguns dos ataques mais conhecidos. Isso inclui injeção de SQL, CSRF e XSS. Uma das ferramentas de análise dinâmica mais populares é o [OWASP Zed Attack proxy](https://owasp.org/www-project-zap/). | + +{{< /table >}} + +## {{% heading "whatsnext" %}} + +Saiba mais sobre os tópicos de segurança do Kubernetes: + +* [Padrões de segurança do Pod](/docs/concepts/security/pod-security-standards/) +* [Políticas de rede para Pods](/docs/concepts/services-networking/network-policies/) +* [Controle de acesso à API Kubernetes](/docs/concepts/security/controlling-access) +* [Protegendo seu cluster](/docs/tasks/administer-cluster/securing-a-cluster/) +* [Criptografia de dados em trânsito](/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane +* [Criptografia de dados em repouso](/docs/tasks/administer-cluster/encrypt-data/) +* [Secrets no Kubernetes](/docs/concepts/configuration/secret/) +* [Runtime class](/docs/concepts/containers/runtime-class) \ No newline at end of file diff --git a/content/pt/docs/concepts/workloads/controllers/_index.md b/content/pt-br/docs/concepts/workloads/controllers/_index.md similarity index 100% rename from content/pt/docs/concepts/workloads/controllers/_index.md rename to content/pt-br/docs/concepts/workloads/controllers/_index.md diff --git a/content/pt/docs/concepts/workloads/controllers/cron-jobs.md b/content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md similarity index 100% rename from content/pt/docs/concepts/workloads/controllers/cron-jobs.md rename to content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md diff --git a/content/pt/docs/contribute/_index.md b/content/pt-br/docs/contribute/_index.md similarity index 100% rename from content/pt/docs/contribute/_index.md rename to content/pt-br/docs/contribute/_index.md diff --git a/content/pt/docs/home/_index.md b/content/pt-br/docs/home/_index.md similarity index 100% rename from content/pt/docs/home/_index.md rename to content/pt-br/docs/home/_index.md diff --git a/content/pt/docs/home/supported-doc-versions.md b/content/pt-br/docs/home/supported-doc-versions.md similarity index 100% rename from content/pt/docs/home/supported-doc-versions.md rename to content/pt-br/docs/home/supported-doc-versions.md diff --git a/content/pt/docs/reference/_index.md b/content/pt-br/docs/reference/_index.md similarity index 100% rename from content/pt/docs/reference/_index.md rename to content/pt-br/docs/reference/_index.md diff --git a/content/pt-br/docs/reference/access-authn-authz/authentication.md b/content/pt-br/docs/reference/access-authn-authz/authentication.md new file mode 100644 index 0000000000000..7882b4974d36d --- /dev/null +++ b/content/pt-br/docs/reference/access-authn-authz/authentication.md @@ -0,0 +1,888 @@ +--- +title: Autenticação +content_type: concept +weight: 10 +--- + + +Essa página demonstra uma visão geral sobre autenticação + + +## Usuários no Kubernetes + +Todos os clusters Kubernetes possuem duas categorias de usuários: contas de serviço gerenciadas pelo Kubernetes e usuários normais. + +Assume-se que um serviço independente do cluster gerencia usuários normais das seguintes formas: + +- Um administrador distribuindo chaves privadas +- Uma base de usuários como Keystone {{< glossary_definition term_id="keystone" length="all" >}} ou Google Accounts +- Um arquivo com uma lista de nomes de usuários e senhas + +Neste quesito, _Kubernetes não possui objetos que possam representar as contas de um usuário normal._ Usuários normais não podem ser adicionados ao _cluster_ através de uma chamada para a API. + +Apesar de um usuário normal não poder ser adicionado através de uma chamada para a API, qualquer usuário que apresente um certificado válido e assinado pela autoridade de certificados (CA) do _cluster_ é considerado autenticado. Nesta configuração, Kubernetes determina o nome do usuário baseado no campo de nome comum no sujeito (_subject_) do certificado (por exemplo: "/CN=bob"). A partir daí, o subsistema de controle de acesso baseado em função (RBAC) determina se o usuário é autorizado a realizar uma operação específica sobre o recurso. Para mais detalhes, veja a referência sobre o tópico de usuários normais dentro de [requisição de certificado](/docs/reference/access-authn-authz/certificate-signing-requests/#normal-user). + +Em contraste a usuários normais, contas de serviço são considerados usuários gerenciados pela API do Kubernetes. Elas estão vinculadas à _namespaces_ específicas e criadas automaticamente pelo servidor de API ou manualmente através de chamadas da API. Contas de serviço estão ligadas a um conjunto de credenciais armazenados como `Secrets`, aos quais são montados dentro dos _pods_ assim permitindo que processos internos ao _cluster_ comuniquem-se com a API do Kubernetes. + +Requisições para a API estão ligadas a um usuário normal, conta de serviço ou serão tratadas como [requisições anônimas](#anonymous-requests). Isto significa que cada processo dentro ou fora do _cluster_, desde um usuário humano utilizando o `kubectl` de uma estação de trabalho, a `kubelets` rodando nos nós, a membros da camada de gerenciamento (s/painel de controle) devem autenticar-se ao realizarem suas requisições para o servidor API ou serão tratados como usuário anônimo. + +## Estratégias de autenticação + +Kubernetes usa certificados de clientes, _bearer Token_, um proxy realizando autenticação, ou uma autenticação básica HTTP para autenticar requisições para o servidor de API através de plugins. Como requisições HTTP são feitas no servidor de API, plugins tentam associar os seguintes atributos junto a requisição: + +* Username {{< glossary_definition term_id="username" length="all" >}}: um valor (String) que identifica o usuário final. Valores comuns podem ser `kube-admin` ou `jane@example.com` +* UID {{< glossary_definition term_id="uid" length="all" >}}: um valor (String) que identifica o usuário final e tenta ser mais consistente e único do que username. +* Groups: Um conjunto de valores em que cada item indica a associação de um usuário à uma coleção lógica de usuários. Valores comuns podem ser `system:masters` ou `devops-team`. +* Campos extras: um mapa que pode conter uma lista de atributos que armazena informações adicionais em que autorizadores podem achar útil. + +Todos os valores são transparentes para o sistema de autenticação e somente trazem significado quando interpretados por um [autorizador](/docs/reference/access-authn-authz/authorization/). + +É possível habilitar múltiplos métodos de autenticação. Deve-se normalmente usar pelo menos dois métodos: + +- _Tokens_ para contas de serviço; +- Pelo menos um outro método de autenticação para usuários. + +Quando múltiplos módulos de autenticação estão habilitados, o primeiro módulo a autenticar com sucesso uma requisição termina, o fluxo de avaliação da mesma. + +O servidor de API não garante a ordem em que os autenticadores são processados. + +O grupo `system:authenticated` é incluído na lista de grupos de todos os usuários autenticados. + +Integrações com outros protocolos de autenticação, como LDAP {{< glossary_definition term_id="ldap" length="all" >}}, SAML {{< glossary_definition term_id="saml" length="all" >}}, Kerberos {{< glossary_definition term_id="kerberos" length="all" >}}, alternate x509 schemes {{< glossary_definition term_id="alternate-x509-schemes" length="all" >}}, etc, podem ser alcançadas utilizando-se de um [proxy](#autenticando-com-proxy) ou [webhook](#token-de-autenticação-via-webhook) de autenticação. + +### Certificados de cliente X509 + +Autenticação via certificados de cliente pode ser habilitada ao passar a opção `--client-ca-file=ARQUIVO` para o servidor de API. O arquivo referenciado deve conter um ou mais autoridades de certificação usadas para validar o certificado de cliente passado para o servidor de API. Se o certificado de cliente é apresentado e verificado, o _common name_ {{< glossary_definition term_id="tls-common-name" length="all" >}} do sujeito é usado como o nome de usuário para a requisição. A partir da versão 1.4, certificados de cliente podem também indicar o pertencimento de um usuário a um grupo utilizando o campo de organização do certificado. Para incluir múltiplos grupos para o usuário, deve-se incluir múltiplos campos de organização no certificado. + +Por exemplo, utilizando o comando de linha `openssl` para gerar uma requisição de assinatura de certificado: + +``` bash +openssl req -new -key jbeda.pem -out jbeda-csr.pem -subj "/CN=jbeda/O=app1/O=app2" +``` + +Isto criaria um arquivo de tipo CSR (requisição de assinatura de certificado) para o usuário "jbeda" pertencendo a dois grupos: "app1" e "app2". + +Veja como gerar um certificado de cliente em [Gerenciando Certificados](/docs/concepts/cluster-administration/certificates/) + +### Arquivo estático de Token + +O servidor de API lê _bearer tokens_ de um arquivo quando recebe uma requisição contendo a opção `--token-auth-file=ARQUIVO` via linha de comando. Atualmente, tokens têm duração indefinida, e a lista de tokens não pode ser modificada sem reiniciar o servidor de API. + +O arquivo de token é do tipo CSV contendo no mínimo 3 colunas: token, nome de usuário, identificador de usuário (uid), seguido pelos nomes de grupos (opcional). + +{{< note >}} +Se uma entrada possuir mais de um grupo, a coluna deve ser cercada por aspas duplas, por exemplo: + +```conf +token,usuario,uid,"grupo1,grupo2,grupo3" +``` +{{< /note >}} + +#### Adicionando um _bearer token_ em uma requisição + +Quando utilizando-se de _bearer token_ para autenticação de um cliente HTTP, o servidor de API espera um cabeçalho `Authorization` com um valor `Bearer TOKEN`. O token deve ser uma sequência de caracteres que pode ser colocada como valor em um cabeçalho HTTP não utilizando-se mais do que as facilidades de codificação e citação de HTTP. Por exemplo, se o valor de um token é `31ada4fd-adec-460c-809a-9e56ceb75269` então iria aparecer dentro de um cabeçalho HTTP como: + +```http +Authorization: Bearer 31ada4fd-adec-460c-809a-9e56ceb75269 +``` +### Tokens de inicialização + +{{< feature-state for_k8s_version="v1.18" state="stable" >}} + +Para permitir a inicialização simplificada para novos _clusters_, Kubernetes inclui um token dinamicamente gerenciado denominado *Bootstrap Token*. Estes _tokens_ são armazenados como Secrets dentro do namespace `kube-system`, onde eles podem ser dinamicamente criados e gerenciados. O componente Gerenciador de Controle (Controller Manager) possui um controlador "TokenCleaner" que apaga os _tokens_ de inicialização expirados. + +Os _tokens_ seguem o formato `[a-z0-9]{6}.[a-z0-9]{16}`. O primeiro componente é um identificador do _token_ e o segundo é o segredo. Você pode especificar o _token_ como um cabeçalho HTTP como: + +```http +Authorization: Bearer 781292.db7bc3a58fc5f07e +``` + +Deve-se habilitar os _tokens_ de inicialização com a opção `--enable-bootstrap-token-auth` no servidor de API. Deve-se habilitar o controlador `TokenCleaner` através da opção `--controllers` no Gerenciador de Controle. Isso é feito, por exemplo, como: `--controllers=*,tokencleaner`. O `kubeadm`, por exemplo, irá realizar isso caso seja utilizado para a inicialização do cluster. + +O autenticador o autentica como `system:bootstrap:` e é incluído no grupo `system:bootstrappers`. O nome e grupo são intencionalmente limitados para desencorajar usuários a usarem estes _tokens_ após inicialização. Os nomes de usuários e grupos podem ser utilizados (e são utilizados pelo `kubeadm`) para elaborar as políticas de autorização para suportar a inicialização de um cluster. + +Por favor veja [Bootstrap Tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) para documentação detalhada sobre o autenticador e controladores de _Token_ de inicialização, bem como gerenciar estes _tokens_ com `kubeadm`. + +### Tokens de Contas de serviço + +Uma conta de serviço é um autenticador habilitado automaticamente que usa bearer tokens para verificar as requisições. O plugin aceita dois parâmetros opcionais: + +* `--service-account-key-file` Um arquivo contendo uma chave codificada no formato PEM para assinar _bearer tokens_. Se não especificado, a chave privada de TLS no servidor de API será utilizada +* `--service-account-lookup` Se habilitado, _tokens_ deletados do servidor de API serão revogados. + +Contas de serviço são normalmente criadas automaticamente pelo servidor de API e associada a _pods_ rodando no cluster através do controlador de admissão [Admission Controller](/docs/reference/access-authn-authz/admission-controllers/) de `ServiceAccount`. Os tokens de contas de serviços são montados nos Pods, em localizações já pré definidas e conhecidas e permitem processos dentro do cluster a se comunicarem com o servidor de API. Contas podem ser explicitamente associadas com _pods_ utilizando o campo `serviceAccountName` na especificação do pod (`PodSpec`): + +{{< note >}} +`serviceAccountName` é normalmente omitida por ser feito automaticamente +{{< /note >}} + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: default +spec: + replicas: 3 + template: + metadata: + # ... + spec: + serviceAccountName: bob-the-bot + containers: + - name: nginx + image: nginx:1.14.2 +``` + +Os _tokens_ de contas de serviço são perfeitamente válidos para ser usados fora do cluster e podem ser utilizados para criar identidades para processos de longa duração que desejem comunicar-se com a API do Kubernetes. Para criar manualmente uma conta de serviço, utilize-se simplesmente o comando `kubectl create serviceaccount (NOME)`. Isso cria uma conta de serviço e um segredo associado a ela no namespace atual. + +```bash +kubectl create serviceaccount jenkins +``` + +```none +serviceaccount "jenkins" created +``` + +Verificando um segredo associado: + +```bash +kubectl get serviceaccounts jenkins -o yaml +``` + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + # ... +secrets: +- name: jenkins-token-1yvwg +``` +O segredo criado irá armazenar a autoridade de certificado do servidor de API e um JSON Web Token (JWT) digitalmente assinado. + +```bash +kubectl get secret jenkins-token-1yvwg -o yaml +``` + +```yaml +apiVersion: v1 +data: + ca.crt: (APISERVER'S CA BASE64 ENCODED) + namespace: ZGVmYXVsdA== + token: (BEARER TOKEN BASE64 ENCODED) +kind: Secret +metadata: + # ... +type: kubernetes.io/service-account-token +``` + +{{< note >}} +Valores são codificados em base64 porque segredos são sempre codificados neste formato. +{{< /note >}} + +O JWT assinado pode ser usado como um _bearer token_ para autenticar-se como a conta de serviço. Veja [acima](#adicionando-um-bearer-token-em-uma-requisição) como o _token_ pode ser incluído em uma requisição. Normalmente esses segredos são montados no pod para um acesso interno ao cluster ao servidor de API, porém pode ser utilizado fora do cluster também. + +Contas de serviço são autenticadas com o nome de usuário `system:serviceaccount:(NAMESPACE):(SERVICEACCOUNT)` e são atribuídas aos grupos `system:serviceaccounts` e `system:serviceaccounts:(NAMESPACE)`. + +AVISO: porque os _tokens_ das contas de serviço são armazenados em segredos, qualquer usuário com acesso de leitura a esses segredos podem autenticar-se como a conta de serviço. Tome cuidado quando conceder permissões a contas de serviços e capacidade de leitura de segredos. + +### Tokens OpenID Connect + +[OpenID Connect](https://openid.net/connect/) é uma variação do framework de autorização OAuth2 que suporta provedores como Azure Active Directory, Salesforce, e Google. A principal extensão do OAuth2 é um campo adicional de _token_ de acesso chamado [ID Token](https://openid.net/specs/openid-connect-core-1_0.html#IDToken). Este _token_ é um tipo de JSON Web Token (JWT) com campos bem definidos, como usuário, e-mail e é assinado pelo servidor de autorização. + +Para identificar o usuário, o autenticador usa o `id_token` (e não `access_token`) do _bearer token_ da resposta de autorização do OAuth2 [token response](https://openid.net/specs/openid-connect-core-1_0.html#TokenResponse). Veja [acima](#adicionando-um-bearer-token-em-uma-requisição) como incluir um _token_ em uma requisição. + +{{< mermaid >}} +sequenceDiagram + participant usuário as Usuário + participant IDP as Provedor
    de Identidade + participant kube as Kubectl + participant API as API Server + + usuário ->> IDP: 1. Realizar Login no IdP + activate IDP + IDP -->> usuário: 2. Fornece access_token,
    id_token, e refresh_token + deactivate IDP + activate usuário + usuário ->> kube: 3. Entrar Kubectl
    com --token sendo id_token
    ou adiciona tokens no arquivo .kube/config + deactivate usuário + activate kube + kube ->> API: 4. Emite requisição incluindo o cabeçalho HTTP Authorization: Bearer... + deactivate kube + activate API + API ->> API: 5. O token do tipo JWT possui assinatura válida ? + API ->> API: 6. O token está expirado ? (iat+exp) + API ->> API: 7. Usuário autorizado ? + API -->> kube: 8. Autorizado: Realiza
    ação e retorna resultado + deactivate API + activate kube + kube --x usuário: 9. Retorna resultado + deactivate kube +{{< /mermaid >}} + +1. Login no seu provedor de identidade. +2. Seu provedor de identidade ira fornecer um `access_token`, `id_token` e um `refresh_token`. +3. Quando utilizando `kubectl`, utilize do seu `id_token` com a opção `--token` ou adicione o token diretamente no seu arquivo de configuração `kubeconfig`. +4. `kubectl` envia o seu `id_token` em um cabeçalho HTTP chamado _Authorization_ para o servidor de API. +5. O servidor de API irá garantir que a assinatura do token JWT é válida, verificando-o em relação ao certificado mencionado na configuração. +6. Verificação para garantir que o`id_token` não esteja expirado. +7. Garantir que o usuário é autorizado. +8. Uma vez autorizado o servidor de API retorna a resposta para o `kubectl`. +9. `kubectl` fornece retorno ao usuário. + +Uma vez que todos os dados necessários para determinar sua identidade encontram-se no `id_token`, Kubernetes não precisa realizar outra chamada para o provedor de identidade. Em um modelo onde cada requisição não possui estado, isso fornece uma solução escalável para autenticação. Isso, porem, apresenta alguns desafios: + +1. Kubernetes não possui uma "interface web" para disparar o processo de autenticação. Não há browser ou interface para coletar credenciais que são necessárias para autenticar-se primeiro no seu provedor de identidade. +2. O `id_token` não pode ser revogado, funcionando como um certificado, portanto deve possuir curta validade (somente alguns minutos) o que pode tornar a experiência um pouco desconfortável, fazendo com que se requisite um novo _token_ toda vez em um curto intervalo (poucos minutos de validade do _token_) +3. Para autenticar-se ao dashboard Kubernetes, você deve executar o comando `kubectl proxy` ou um proxy reverso que consiga injetar o `id_token`. + +#### Configurando o Servidor de API + +Para habilitar o plugin de autorização, configure as seguintes opções no servidor de API: + +| Parâmetro | Descrição | Exemplo | Obrigatório | +| --------- | ----------- | ------- | ------- | +| `--oidc-issuer-url` | URL do provedor que permite ao servidor de API descobrir chaves públicas de assinatura. Somente URLs que usam o esquema `https://` são aceitas. Isto normalmente é o endereço de descoberta do provedor sem o caminho, por exemplo "https://accounts.google.com" ou "https://login.salesforce.com". Esta URL deve apontar para o nível abaixo do caminho .well-known/openid-configuration | Se o valor da URL de descoberta é `https://accounts.google.com/.well-known/openid-configuration`, entao o valor deve ser `https://accounts.google.com` | Sim | +| `--oidc-client-id` | Identificador do cliente para o qual todos os tokens são gerados. | kubernetes | Sim | +| `--oidc-username-claim` | Atributo do JWT a ser usado como nome de usuário. Por padrão o valor `sub`, o qual é esperado que seja um identificador único do usuário final. Administradores podem escolher outro atributo, como `email` ou `name`, dependendo de seu provedor de identidade. No entanto, outros atributos além de `email` serão prefixados com a URL do emissor issuer URL para prevenir conflitos de nome com outros plugins. | sub | Não | +| `--oidc-username-prefix` | Prefixos adicionados ao atributo de nome de usuário para prevenir conflitos de nomes existentes (como por exemplo usuários `system:`). Por exemplo, o valor `oidc:` irá criar usuários como `oidc:jane.doe`. Se esta opção não for fornecida `--oidc-username-claim` e um valor diferente de `email` irá conter um prefixo padrão com o valor de `( Issuer URL )#` onde `( Issuer URL )` era o valor da opção `--oidc-issuer-url`. O valor `-` pode ser utilizado para desabilitar todos os prefixos. | `oidc:` | Não | +| `--oidc-groups-claim` | Atributo do JWT a ser utilizado para mapear os grupos dos usuários. Se o atributo está presente, ele deve ser do tipo vetor de Strings. | groups | Não | +| `--oidc-groups-prefix` | Prefixo adicionados ao atributo de grupo para prevenir conflitos de nomes existentes (como por exemplo `system:` grupos). Por exemplo, o valor `oidc:` irá criar nomes de grupos como `oidc:engineering` e `oidc:infra`. | `oidc:` | Não | +| `--oidc-required-claim` | Um par de chave=valor que descreve atributos obrigatórios no _ID Token_. Se configurado, a presença do atributo é verificado dentro do _ID Token_ com um valor relacionado. Repita esta opção para configurar múltiplos atributos obrigatórios. | `claim=value` | Não | +| `--oidc-ca-file` | O caminho para o arquivo de certificado da autoridade de certificados (CA) que assinou o certificado do provedor de identidades. | `/etc/kubernetes/ssl/kc-ca.pem` | Não | + +É importante ressaltar que o servidor de API não é um cliente Oauth2, ao contrário, ele só pode ser configurado para confiar em um emissor. Isso permite o uso de emissores públicos, como Google, sem confiar em credenciais emitidas por terceiros. Administradores que desejam utilizar-se de múltiplos clientes OAuth2 devem explorar provedores os quais suportam atributos `azp` (parte autorizada), que é um mecanismo para permitir um cliente a emitir tokens em nome de outro. + +Kubernetes não oferece um provedor de identidade OpenID Connect. Pode-se utilizar provedores públicos existentes como Google ou [outros](https://connect2id.com/products/nimbus-oauth-openid-connect-sdk/openid-connect-providers). Ou, pode-se rodar o próprio provedor de identidade no cluster, como [dex](https://dexidp.io/), +[Keycloak](https://github.com/keycloak/keycloak), +CloudFoundry [UAA](https://github.com/cloudfoundry/uaa), ou +Tremolo Security's [OpenUnison](https://github.com/tremolosecurity/openunison). + +Para um provedor de identidades funcionar no Kubernetes, ele deve: + +1. Suportar o framework [OpenID connect discovery](https://openid.net/specs/openid-connect-discovery-1_0.html); Nem todos suportam. +2. Executar TLS com cifras criptográficas não obsoletos. +3. Possuir certificados assinados por uma Autoridade certificadora (mesmo que o CA não seja comercial ou seja auto-assinado) + +Uma nota sobre o requisito #3 acima. Se você instalar o seu próprio provedor de identidades (ao invés de utilizar um provedor como Google ou Microsoft) você DEVE ter o certificado web do seu provedor de identidades assinado por um certificado contendo a opção `CA` configurada para `TRUE`, mesmo que seja um certificado auto assinado. Isso deve-se a implementação do cliente TLS em Golang que é bastante restrito quanto aos padrões em torno da validação de certificados. Se você não possui um CA em fácil alcance, você pode usar [este script](https://github.com/dexidp/dex/blob/master/examples/k8s/gencert.sh) criado pelo time Dex para criar um simples CA, um par de chaves e certificado assinados. +Ou você pode usar [este script similar](https://raw.githubusercontent.com/TremoloSecurity/openunison-qs-kubernetes/master/src/main/bash/makessl.sh) o qual gera certificados SHA256 com uma vida mais longa e tamanho maior de chave. + +Instruções de configuração para sistemas específicos podem ser encontrados em: + +- [UAA](https://docs.cloudfoundry.org/concepts/architecture/uaa.html) +- [Dex](https://dexidp.io/docs/kubernetes/) +- [OpenUnison](https://www.tremolosecurity.com/orchestra-k8s/) + +#### Utilizando kubectl + +##### Opção 1 - Autenticador OIDC + +A primeira opção é utilizar-se do autenticador `oidc` do kubectl, o qual define o valor do `id_token` como um _bearer token_ para todas as requisições e irá atualizar o token quando o mesmo expirar. Após você efetuar o login no seu provedor, utilize o kubectl para adicionar os seus `id_token`, `refresh_token`, `client_id`, e `client_secret` para configurar o plugin. + +Provedores os quais não retornem um `id_token` como parte da sua resposta de _refresh token_ não são suportados por este plugin e devem utilizar a opção 2 abaixo. + +```bash +kubectl config set-credentials USER_NAME \ + --auth-provider=oidc \ + --auth-provider-arg=idp-issuer-url=( issuer url ) \ + --auth-provider-arg=client-id=( your client id ) \ + --auth-provider-arg=client-secret=( your client secret ) \ + --auth-provider-arg=refresh-token=( your refresh token ) \ + --auth-provider-arg=idp-certificate-authority=( path to your ca certificate ) \ + --auth-provider-arg=id-token=( your id_token ) +``` + +Um exemplo, executando o comando abaixo após autenticar-se no seu provedor de identidades: + +```bash +kubectl config set-credentials mmosley \ + --auth-provider=oidc \ + --auth-provider-arg=idp-issuer-url=https://oidcidp.tremolo.lan:8443/auth/idp/OidcIdP \ + --auth-provider-arg=client-id=kubernetes \ + --auth-provider-arg=client-secret=1db158f6-177d-4d9c-8a8b-d36869918ec5 \ + --auth-provider-arg=refresh-token=q1bKLFOyUiosTfawzA93TzZIDzH2TNa2SMm0zEiPKTUwME6BkEo6Sql5yUWVBSWpKUGphaWpxSVAfekBOZbBhaEW+VlFUeVRGcluyVF5JT4+haZmPsluFoFu5XkpXk5BXqHega4GAXlF+ma+vmYpFcHe5eZR+slBFpZKtQA= \ + --auth-provider-arg=idp-certificate-authority=/root/ca.pem \ + --auth-provider-arg=id-token=eyJraWQiOiJDTj1vaWRjaWRwLnRyZW1vbG8ubGFuLCBPVT1EZW1vLCBPPVRybWVvbG8gU2VjdXJpdHksIEw9QXJsaW5ndG9uLCBTVD1WaXJnaW5pYSwgQz1VUy1DTj1rdWJlLWNhLTEyMDIxNDc5MjEwMzYwNzMyMTUyIiwiYWxnIjoiUlMyNTYifQ.eyJpc3MiOiJodHRwczovL29pZGNpZHAudHJlbW9sby5sYW46ODQ0My9hdXRoL2lkcC9PaWRjSWRQIiwiYXVkIjoia3ViZXJuZXRlcyIsImV4cCI6MTQ4MzU0OTUxMSwianRpIjoiMm96US15TXdFcHV4WDlHZUhQdy1hZyIsImlhdCI6MTQ4MzU0OTQ1MSwibmJmIjoxNDgzNTQ5MzMxLCJzdWIiOiI0YWViMzdiYS1iNjQ1LTQ4ZmQtYWIzMC0xYTAxZWU0MWUyMTgifQ.w6p4J_6qQ1HzTG9nrEOrubxIMb9K5hzcMPxc9IxPx2K4xO9l-oFiUw93daH3m5pluP6K7eOE6txBuRVfEcpJSwlelsOsW8gb8VJcnzMS9EnZpeA0tW_p-mnkFc3VcfyXuhe5R3G7aa5d8uHv70yJ9Y3-UhjiN9EhpMdfPAoEB9fYKKkJRzF7utTTIPGrSaSU6d2pcpfYKaxIwePzEkT4DfcQthoZdy9ucNvvLoi1DIC-UocFD8HLs8LYKEqSxQvOcvnThbObJ9af71EwmuE21fO5KzMW20KtAeget1gnldOosPtz1G5EwvaQ401-RPQzPGMVBld0_zMCAwZttJ4knw +``` + +O qual irá produzir a configuração abaixo: + +```yaml +users: +- name: mmosley + user: + auth-provider: + config: + client-id: kubernetes + client-secret: 1db158f6-177d-4d9c-8a8b-d36869918ec5 + id-token: eyJraWQiOiJDTj1vaWRjaWRwLnRyZW1vbG8ubGFuLCBPVT1EZW1vLCBPPVRybWVvbG8gU2VjdXJpdHksIEw9QXJsaW5ndG9uLCBTVD1WaXJnaW5pYSwgQz1VUy1DTj1rdWJlLWNhLTEyMDIxNDc5MjEwMzYwNzMyMTUyIiwiYWxnIjoiUlMyNTYifQ.eyJpc3MiOiJodHRwczovL29pZGNpZHAudHJlbW9sby5sYW46ODQ0My9hdXRoL2lkcC9PaWRjSWRQIiwiYXVkIjoia3ViZXJuZXRlcyIsImV4cCI6MTQ4MzU0OTUxMSwianRpIjoiMm96US15TXdFcHV4WDlHZUhQdy1hZyIsImlhdCI6MTQ4MzU0OTQ1MSwibmJmIjoxNDgzNTQ5MzMxLCJzdWIiOiI0YWViMzdiYS1iNjQ1LTQ4ZmQtYWIzMC0xYTAxZWU0MWUyMTgifQ.w6p4J_6qQ1HzTG9nrEOrubxIMb9K5hzcMPxc9IxPx2K4xO9l-oFiUw93daH3m5pluP6K7eOE6txBuRVfEcpJSwlelsOsW8gb8VJcnzMS9EnZpeA0tW_p-mnkFc3VcfyXuhe5R3G7aa5d8uHv70yJ9Y3-UhjiN9EhpMdfPAoEB9fYKKkJRzF7utTTIPGrSaSU6d2pcpfYKaxIwePzEkT4DfcQthoZdy9ucNvvLoi1DIC-UocFD8HLs8LYKEqSxQvOcvnThbObJ9af71EwmuE21fO5KzMW20KtAeget1gnldOosPtz1G5EwvaQ401-RPQzPGMVBld0_zMCAwZttJ4knw + idp-certificate-authority: /root/ca.pem + idp-issuer-url: https://oidcidp.tremolo.lan:8443/auth/idp/OidcIdP + refresh-token: q1bKLFOyUiosTfawzA93TzZIDzH2TNa2SMm0zEiPKTUwME6BkEo6Sql5yUWVBSWpKUGphaWpxSVAfekBOZbBhaEW+VlFUeVRGcluyVF5JT4+haZmPsluFoFu5XkpXk5BXq + name: oidc +``` +Uma vez que seu `id_token` expire, `kubectl` irá tentar atualizar o seu `id_token` utilizando-se do seu `refresh_token` e `client_secret` armazenando os novos valores para `refresh_token` e `id_token` no seu arquivo de configuração `.kube/config`. + +##### Opção 2 - Utilize a opção `--token` + +O comando `kubectl` o permite passar o valor de um token utilizando a opção `--token`. Copie e cole o valor do seu `id_token` nesta opção: + +```bash +kubectl --token=eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczovL21sYi50cmVtb2xvLmxhbjo4MDQzL2F1dGgvaWRwL29pZGMiLCJhdWQiOiJrdWJlcm5ldGVzIiwiZXhwIjoxNDc0NTk2NjY5LCJqdGkiOiI2RDUzNXoxUEpFNjJOR3QxaWVyYm9RIiwiaWF0IjoxNDc0NTk2MzY5LCJuYmYiOjE0NzQ1OTYyNDksInN1YiI6Im13aW5kdSIsInVzZXJfcm9sZSI6WyJ1c2VycyIsIm5ldy1uYW1lc3BhY2Utdmlld2VyIl0sImVtYWlsIjoibXdpbmR1QG5vbW9yZWplZGkuY29tIn0.f2As579n9VNoaKzoF-dOQGmXkFKf1FMyNV0-va_B63jn-_n9LGSCca_6IVMP8pO-Zb4KvRqGyTP0r3HkHxYy5c81AnIh8ijarruczl-TK_yF5akjSTHFZD-0gRzlevBDiH8Q79NAr-ky0P4iIXS8lY9Vnjch5MF74Zx0c3alKJHJUnnpjIACByfF2SCaYzbWFMUNat-K1PaUk5-ujMBG7yYnr95xD-63n8CO8teGUAAEMx6zRjzfhnhbzX-ajwZLGwGUBT4WqjMs70-6a7_8gZmLZb2az1cZynkFRj2BaCkVT3A2RrjeEwZEtGXlMqKJ1_I2ulrOVsYx01_yD35-rw get nodes +``` + +### Token de autenticação via Webhook + +Webhook de autenticação é usado para verificar _bearer tokens_ + +* `--authentication-token-webhook-config-file` arquivo de configuração descrevendo como acessar o serviço remoto de webhook. +* `--authentication-token-webhook-cache-ttl` por quanto tempo guardar em cache decisões de autenticação. Configuração padrão definida para dois minutos. +* `--authentication-token-webhook-version` determina quando usar o apiVersion `authentication.k8s.io/v1beta1` ou `authentication.k8s.io/v1` para objetos `TokenReview` quando enviar/receber informações do webhook. Valor padrão `v1beta1`. + +O arquivo de configuração usa o formato de arquivo do [kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/). Dentro do arquivo, `clusters` refere-se ao serviço remoto e `users` refere-se ao servidor de API do webhook. Um exemplo seria: + +```yaml +# versão da API do Kubernetes +apiVersion: v1 +# tipo do objeto da API +kind: Config +# clusters refere-se ao serviço remoto +clusters: + - name: name-of-remote-authn-service + cluster: + certificate-authority: /path/to/ca.pem # CA para verificar o serviço remoto + server: https://authn.example.com/authenticate # URL para procurar o serviço remoto. Deve utilizar 'https'. + +# users refere-se a configuração do webhook do servidor de API +users: + - name: name-of-api-server + user: + client-certificate: /path/to/cert.pem # certificado para ser utilizado pelo plugin de webhook + client-key: /path/to/key.pem # chave referente ao certificado + +# arquivos kubeconfig requerem um contexto. Especifique um para o servidor de API. +current-context: webhook +contexts: +- context: + cluster: name-of-remote-authn-service + user: name-of-api-server + name: webhook +``` + +Quando um cliente tenta autenticar-se com o servidor de API utilizando um _bearer token_ como discutido [acima](#adicionando-um-bearer-token-em-uma-requisição), o webhook de autenticação envia um objeto JSON serializado do tipo `TokenReview` contendo o valor do _token_ para o serviço remoto. + +Note que objetos de API do tipo _webhook_ estão sujeitos às mesmas [regras de compatibilidade de versão](/docs/concepts/overview/kubernetes-api/) como outros objetos de API Kubernetes. +Implementadores devem verificar o campo de versão da API (`apiVersion`) da requisição para garantir a correta deserialização e **devem** responder com um objeto do tipo `TokenReview` da mesma versão da requisição. + +{{< tabs name="TokenReview_request" >}} +{{% tab name="authentication.k8s.io/v1" %}} +{{< note >}} +O servidor de API Kubernetes envia por padrão revisão de tokens para a API `authentication.k8s.io/v1beta1` para fins de compatibilidade com versões anteriores. + +Para optar receber revisão de tokens de versão `authentication.k8s.io/v1`, o servidor de API deve ser inicializado com a opção `--authentication-token-webhook-version=v1`. +{{< /note >}} + +```yaml +{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenReview", + "spec": { + # Bearer token opaco enviado para o servidor de API + "token": "014fbff9a07c...", + + # Lista opcional de identificadores de audiência para o servidor ao qual o token foi apresentado + # Autenticadores de token sensíveis a audiência (por exemplo, autenticadores de token OIDC) + # deve-se verificar que o token foi direcionado a pelo menos um membro da lista de audiência + # e retornar a interseção desta lista a audiência válida para o token no estado da resposta + # Isto garante com que o token é válido para autenticar-se no servidor ao qual foi apresentado + # Se nenhuma audiência for especificada, o token deve ser validado para autenticar-se ao servidor de API do Kubernetes + "audiences": ["https://myserver.example.com", "https://myserver.internal.example.com"] + } +} +``` +{{% /tab %}} +{{% tab name="authentication.k8s.io/v1beta1" %}} +```yaml +{ + "apiVersion": "authentication.k8s.io/v1beta1", + "kind": "TokenReview", + "spec": { + # Bearer token opaco enviado para o servidor de API + "token": "014fbff9a07c...", + + # Lista opcional de identificadores de audiência para o servidor ao qual o token foi apresentado + # Autenticadores de token sensíveis a audiência (por exemplo, autenticadores de token OIDC) + # deve-se verificar que o token foi direcionado a pelo menos um membro da lista de audiência + # e retornar a interseção desta lista a audiência válida para o token no estado da resposta + # Isto garante com que o token é válido para autenticar-se no servidor ao qual foi apresentado + # Se nenhuma audiência for especificada, o token deve ser validado para autenticar-se ao servidor de API do Kubernetes + "audiences": ["https://myserver.example.com", "https://myserver.internal.example.com"] + } +} +``` +{{% /tab %}} +{{< /tabs >}} + +É esperado que o serviço remoto preencha o campo `status` da requisição para indicar o sucesso do login. +O campo `spec` do corpo de resposta é ignorado e pode ser omitido. +O serviço remoto deverá retornar uma resposta usando a mesma versão de API do objeto `TokenReview` que foi recebido. +Uma validação bem sucedida deveria retornar: + +{{< tabs name="TokenReview_response_success" >}} +{{% tab name="authentication.k8s.io/v1" %}} +```yaml +{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenReview", + "status": { + "authenticated": true, + "user": { + # Obrigatório + "username": "janedoe@example.com", + # Opcional + "uid": "42", + # Opcional: lista de grupos associados + "groups": ["developers", "qa"], + # Opcional: informação adicional provida pelo autenticador. + # Isto não deve conter dados confidenciais, pois pode ser registrados em logs ou em objetos de API e estarão disponíveis para webhooks de admissão + "extra": { + "extrafield1": [ + "extravalue1", + "extravalue2" + ] + } + }, + # Lista opcional de Autenticadores de token sensíveis a audiência que podem ser retornados, + # contendo as audiências da lista `spec.audiences` válido para o token apresentado. + # Se este campo for omitido, o token é considerado válido para autenticar-se no servidor de API Kubernetes + "audiences": ["https://myserver.example.com"] + } +} +``` +{{% /tab %}} +{{% tab name="authentication.k8s.io/v1beta1" %}} +```yaml +{ + "apiVersion": "authentication.k8s.io/v1beta1", + "kind": "TokenReview", + "status": { + "authenticated": true, + "user": { + # Obrigatório + "username": "janedoe@example.com", + # Opcional + "uid": "42", + # Opcional: lista de grupos associados + "groups": ["developers", "qa"], + # Opcional: informação adicional provida pelo autenticador. + # Isto não deve conter dados confidenciais, pois pode ser registrados em logs ou em objetos de API e estarão disponíveis para webhooks de admissão + "extra": { + "extrafield1": [ + "extravalue1", + "extravalue2" + ] + } + }, + # Lista opcional de Autenticadores de token sensíveis a audiência que podem ser retornados, + # contendo as audiências da lista `spec.audiences` válido para o token apresentado. + # Se este campo for omitido, o token é considerado válido para autenticar-se no servidor de API Kubernetes + "audiences": ["https://myserver.example.com"] + } +} +``` +{{% /tab %}} +{{< /tabs >}} + +Uma requisição mal sucedida retornaria: + +{{< tabs name="TokenReview_response_error" >}} +{{% tab name="authentication.k8s.io/v1" %}} +```yaml +{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenReview", + "status": { + "authenticated": false, + # Opcionalmente inclui detalhes sobre o porque a autenticação falhou + # Se nenhum erro é fornecido, a API irá retornar uma mensagem genérica de "Não autorizado" + # O campo de erro é ignorado quando authenticated=true. + "error": "Credenciais expiradas" + } +} +``` +{{% /tab %}} +{{% tab name="authentication.k8s.io/v1beta1" %}} +```yaml +{ + "apiVersion": "authentication.k8s.io/v1beta1", + "kind": "TokenReview", + "status": { + "authenticated": false, + # Opcionalmente inclui detalhes sobre o porque a autenticação falhou + # Se nenhum erro é fornecido, a API irá retornar uma mensagem genérica de "Não autorizado" + # O campo de erro é ignorado quando authenticated=true. + "error": "Credenciais expiradas" + } +} +``` +{{% /tab %}} +{{< /tabs >}} + +### Autenticando com Proxy + +O servidor de API pode ser configurado para identificar usuários através de valores de cabeçalho de requisição, como por exemplo `X-Remote-User`. +Isto é projetado para o uso em combinação com um proxy de autenticação, o qual irá atribuir o valor do cabeçalho da requisição. + +* `--requestheader-username-headers` Obrigatório, não faz distinção entre caracteres maiúsculos/minúsculos. Nomes de cabeçalhos a serem verificados, em ordem, para a identidade do usuário. O primeiro cabeçalho contendo um valor será usado para o nome do usuário. +* `--requestheader-group-headers` 1.6+. Opcional, não faz distinção entre caracteres maiúsculos/minúsculos. "X-Remote-Group" é recomendado. Nomes de cabeçalhos a serem verificados, em ordem, para os grupos do usuário. Todos os valores especificados em todos os cabeçalhos serão utilizados como nome dos grupos do usuário. + +* `--requestheader-extra-headers-prefix` 1.6+. Opcional, não faz distinção entre caracteres maiúsculos/minúsculos. "X-Remote-Extra-" é recomendado. Prefixos de cabeçalhos para serem utilizados para definir informações extras sobre o usuário (normalmente utilizado por um plugin de autorização). Todos os cabeçalhos que começam com qualquer um dos prefixos especificados têm o prefixo removido. O restante do nome do cabeçalho é transformado em letra minúscula, decodificado [percent-decoded](https://tools.ietf.org/html/rfc3986#section-2.1) e torna-se uma chave extra, e o valor do cabeçalho torna-se um valor extra. + +{{< note >}} +Antes da versão 1.11.3 (e 1.10.7, 1.9.11), a chave extra só poderia conter caracteres os quais fossem [legais em rótulos de cabeçalhos HTTP](https://tools.ietf.org/html/rfc7230#section-3.2.6). +{{< /note >}} + +Por exemplo, com esta configuração: + +``` +--requestheader-username-headers=X-Remote-User +--requestheader-group-headers=X-Remote-Group +--requestheader-extra-headers-prefix=X-Remote-Extra- +``` + +e esta requisição: + +```http +GET / HTTP/1.1 +X-Remote-User: fido +X-Remote-Group: dogs +X-Remote-Group: dachshunds +X-Remote-Extra-Acme.com%2Fproject: some-project +X-Remote-Extra-Scopes: openid +X-Remote-Extra-Scopes: profile +``` + +resultaria nesta informação de usuário: + +```yaml +name: fido +groups: +- dogs +- dachshunds +extra: + acme.com/project: + - some-project + scopes: + - openid + - profile +``` + +Para prevenir falsificação de cabeçalhos, o proxy de autenticação deverá apresentar um certificado de cliente válido para o servidor de API para que possa ser validado com a autoridade de certificados (CA) antes que os cabeçalhos de requisições sejam verificados. AVISO: **não** re-utilize uma autoridade de certificados (CA) que esteja sendo utilizado em um contexto diferente ao menos que você entenda os riscos e os mecanismos de proteção da utilização de uma autoridade de certificados. + +* `--requestheader-client-ca-file` Obrigatório. Pacote de certificados no formato PEM. Um certificado válido deve ser apresentado e validado com a autoridade de certificados no arquivo especificado antes da verificação de cabeçalhos de requisição para os nomes do usuário. + +* `--requestheader-allowed-names` Opcional. Lista de valores de nomes comuns (CNs). Se especificado, um certificado de cliente válido contendo uma lista de nomes comuns denominados deve ser apresentado na verificação de cabeçalhos de requisição para os nomes do usuário. Se vazio, qualquer valor de nomes comuns será permitido. + +## Requisições anônimas + +Quando habilitado, requisições que não são rejeitadas por outros métodos de autenticação configurados são tratadas como requisições anônimas e são dadas o nome de usuário `system:anonymous` e filiação ao grupo `system:unauthenticated`. + +Por exemplo, uma requisição especificando um _bearer token_ invalido chega a um servidor com token de autenticação configurado e acesso anônimo habilitado e receberia um erro de acesso não autorizado `401 Unauthorized`. Já uma requisição não especificando nenhum _bearer token_ seria tratada como uma requisição anônima. + +Nas versões 1.5.1-1.5.x, acesso anônimo é desabilitado por padrão e pode ser habilitado passando a opção `--anonymous-auth=true` durante a inicialização do servidor de API. + +Na versão 1.6 e acima, acesso anônimo é habilitado por padrão se um modo de autorização diferente de `AlwaysAllow` é utilizado e pode ser desabilitado passando a opção `--anonymous-auth=false` durante a inicialização do servidor de API. +Começando na versão 1.6, os autorizadores _ABAC (Controle de Acesso Baseado em Atributos)_ e _RBAC (Controle de Acesso Baseado em Função)_ requerem autorização explícita do usuário `system:anonymous` e do grupo `system:unauthenticated`, portanto, regras de políticas legadas que permitam acesso a usuário `*` e grupo `*` nao incluíram usuários anônimos. + +## Personificação de usuário + +Um usuário pode agir como outro através de cabeçalhos de personificação. Os mesmos permitem que requisições manualmente sobrescrevam as informações ao quais o usuário irá se autenticar como. Por exemplo, um administrador pode utilizar-se desta funcionalidade para investigar um problema com uma política de autorização e assim, temporariamente, personificar um outro usuário e ver se/como sua requisição está sendo negada. + +Requisições de personificação primeiramente são autenticadas como o usuário requerente, então trocando para os detalhes de informação do usuário personificado. + +O fluxo é: + +* Um usuário faz uma chamada de API com suas credenciais _e_ cabeçalhos de personificação. +* O servidor de API autentica o usuário. +* O servidor de API garante que o usuário autenticado possui permissão de personificação. +* Detalhes de informação do usuário da requisição tem seus valores substituídos com os detalhes de personificação. +* A requisição é avaliada e a autorização é feita sobre os detalhes do usuário personificado. + +Os seguintes cabeçalhos HTTP podem ser usados para realizar uma requisição de personificação: + +* `Impersonate-User`: O nome do usuário para se executar ações em seu nome. +* `Impersonate-Group`: Um nome de grupo para se executar ações em seu nome. Pode ser especificado múltiplas vezes para fornecer múltiplos grupos. Opcional. Requer "Impersonate-User". +* `Impersonate-Extra-( extra name )`: Um cabeçalho dinâmico usado para associar campos extras do usuário. Opcional. Requer "Impersonate-User". Para que seja preservado consistentemente, `( extra name )` deve ser somente minúsculo, e qualquer caracter que não seja [legal em rótulos de cabeçalhos HTTP](https://tools.ietf.org/html/rfc7230#section-3.2.6) DEVE ser utf8 e [codificado](https://tools.ietf.org/html/rfc3986#section-2.1). + +{{< note >}} +Antes da versão 1.11.3 (e 1.10.7, 1.9.11), `( extra name )` só poderia conter caracteres que fossem [legais em rótulos de cabeçalhos HTTP](https://tools.ietf.org/html/rfc7230#section-3.2.6). +{{< /note >}} + +Um exemplo de conjunto de cabeçalhos HTTP: + +```http +Impersonate-User: jane.doe@example.com +Impersonate-Group: developers +Impersonate-Group: admins +Impersonate-Extra-dn: cn=jane,ou=engineers,dc=example,dc=com +Impersonate-Extra-acme.com%2Fproject: some-project +Impersonate-Extra-scopes: view +Impersonate-Extra-scopes: development +``` + +Quando utilizando-se o `kubectl` especifique a opção `--as` para determinar o cabeçalho `Impersonate-User`, especifique a opção `--as-group` para determinar o cabeçalho `Impersonate-Group`. + +```bash +kubectl drain mynode +``` + +```none +Error from server (Forbidden): User "clark" cannot get nodes at the cluster scope. (get nodes mynode) +``` + +Especificando as opções `--as` e `--as-group`: + +```bash +kubectl drain mynode --as=superman --as-group=system:masters +``` + +```none +node/mynode cordoned +node/mynode drained +``` + +Para personificar um usuário, grupo ou especificar campos extras, o usuário efetuando a personificação deve possuir a permissão de executar o verbo "impersonate" no tipo de atributo sendo personificado ("user", "group", etc.). Para clusters com o plugin de autorização _RBAC_ habilitados, a seguinte ClusterRole abrange as regras necessárias para definir os cabeçalhos de personificação de usuário e grupo: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: impersonator +rules: +- apiGroups: [""] + resources: ["users", "groups", "serviceaccounts"] + verbs: ["impersonate"] +``` + +Campos extras são avaliados como sub-recursos de um recurso denominado "userextras". Para permitir ao usuário que utilize os cabeçalhos de personificação para o campo extra "scopes", o usuário deve receber a seguinte permissão: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scopes-impersonator +rules: +# Pode definir o cabeçalho "Impersonate-Extra-scopes". +- apiGroups: ["authentication.k8s.io"] + resources: ["userextras/scopes"] + verbs: ["impersonate"] +``` +Os valores dos cabeçalhos de personificação podem também ser restringidos ao limitar o conjunto de nomes de recursos (`resourceNames`) que um recurso pode ter. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: limited-impersonator +rules: +# Pode personificar o usuário "jane.doe@example.com" +- apiGroups: [""] + resources: ["users"] + verbs: ["impersonate"] + resourceNames: ["jane.doe@example.com"] + +# Pode assumir os grupos "developers" and "admins" +- apiGroups: [""] + resources: ["groups"] + verbs: ["impersonate"] + resourceNames: ["developers","admins"] + +# Pode personificar os campos extras "scopes" com valores "view" e "development" +- apiGroups: ["authentication.k8s.io"] + resources: ["userextras/scopes"] + verbs: ["impersonate"] + resourceNames: ["view", "development"] +``` + +## Plugins de credenciais client-go +{{< feature-state for_k8s_version="v1.11" state="beta" >}} + +Ferramentas como `kubectl` e `kubelet` utilizando-se do `k8s.io/client-go` são capazes de executar um comando externo para receber credenciais de usuário. + +Esta funcionalidade é direcionada à integração do lado cliente, com protocolos de autenticação não suportados nativamente pelo `k8s.io/client-go` como: LDAP, Kerberos, OAuth2, SAML, etc. O plugin implementa a lógica específica do protocolo e então retorna credenciais opacas para serem utilizadas. Quase todos os casos de usos de plugins de credenciais requerem um componente de lado do servidor com suporte para um [autenticador de token webhook](#token-de-autenticação-via-webhook) para interpretar o formato das credenciais produzidas pelo plugin cliente. + +### Exemplo de caso de uso + +Num caso de uso hipotético, uma organização executaria um serviço externo que efetuaria a troca de credenciais LDAP por tokens assinados para um usuário específico. Este serviço seria também capaz de responder requisições do [autenticador de token webhook](#token-de-autenticação-via-webhook) para validar tokens. Usuários seriam obrigados a instalar um plugin de credencial em sua estação de trabalho. + +Para autenticar na API: +* O usuário entra um comando `kubectl`. +* O plugin de credencial solicita ao usuário a entrada de credenciais LDAP e efetua troca das credenciais por um token via um serviço externo. +* O plugin de credenciais retorna um token para o client-go, o qual o utiliza como um bearer token no servidor de API. +* O servidor de API usa o [autenticador de token webhook](#token-de-autenticação-via-webhook) para submeter um objeto `TokenReview` para o serviço externo. +* O serviço externo verifica a assinatura do token e retorna o nome e grupos do usuário. + +### Configuração + +plugins de credencial são configurados através de [arquivos de configuração do kubectl](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) como parte dos campos de usuário. + +```yaml +apiVersion: v1 +kind: Config +users: +- name: my-user + user: + exec: + # Comando a ser executado. Obrigatório. + command: "example-client-go-exec-plugin" + + # Versão da API a ser utilizada quando decodificar o recurso ExecCredentials. Obrigatório + # + # A versão da API retornada pelo plugin DEVE ser a mesma versão listada aqui. + # + # Para integrar com ferramentas que suportem múltiplas versões (tal como client.authentication.k8s.io/v1alpha1), + # defina uma variável de ambiente ou passe um argumento para a ferramenta que indique qual versão o plugin de execução deve esperar. + apiVersion: "client.authentication.k8s.io/v1beta1" + + # Variáveis de ambiente a serem configuradas ao executar o plugin. Opcional + env: + - name: "FOO" + value: "bar" + + # Argumentos a serem passados ao executar o plugin. Opcional + args: + - "arg1" + - "arg2" + + # Texto exibido para o usuário quando o executável não parece estar presente. Opcional + installHint: | + example-client-go-exec-plugin é necessário para autenticar no cluster atual. Pode ser instalado via: + + Em macOS: brew install example-client-go-exec-plugin + + Em Ubuntu: apt-get install example-client-go-exec-plugin + + Em Fedora: dnf install example-client-go-exec-plugin + + ... + + # Deve-se ou não fornecer informações do cluster, que podem potencialmente conter grande quantidade de dados do CA, + # para esse plugin de execução como parte da variável de ambiente KUBERNETES_EXEC_INFO + provideClusterInfo: true +clusters: +- name: my-cluster + cluster: + server: "https://172.17.4.100:6443" + certificate-authority: "/etc/kubernetes/ca.pem" + extensions: + - name: client.authentication.k8s.io/exec # nome de extensão reservado para configuração exclusiva do cluster + extension: + arbitrary: config + this: pode ser fornecido através da variável de ambiente KUBERNETES_EXEC_INFO na configuracao de provideClusterInfo + you: ["coloque", "qualquer", "coisa", "aqui"] +contexts: +- name: my-cluster + context: + cluster: my-cluster + user: my-user +current-context: my-cluster +``` + +Os caminhos relativos do comando são interpretados como relativo ao diretório do arquivo de configuração. Se +KUBECONFIG está configurado para o caminho `/home/jane/kubeconfig` e o comando executado é `./bin/example-client-go-exec-plugin`, +o binario `/home/jane/bin/example-client-go-exec-plugin` será executado. + +```yaml +- name: my-user + user: + exec: + # Caminho relativo para o diretorio do kubeconfig + command: "./bin/example-client-go-exec-plugin" + apiVersion: "client.authentication.k8s.io/v1beta1" +``` + +### Formatos de entrada e saída + +O comando executado imprime um objeto `ExecCredential` para o `stdout`. `k8s.io/client-go` +autentica na API do Kubernetes utilizando as credenciais retornadas no `status`. + +Quando executando uma sessão interativa, `stdin` é exposto diretamente para o plugin. plugins devem utilizar +um [TTY check](https://godoc.org/golang.org/x/crypto/ssh/terminal#IsTerminal) para determinar se é +apropriado solicitar um usuário interativamente. + +Para usar credenciais do tipo _bearer token_, o plugin retorna um token no status do objeto `ExecCredential`. + +```json +{ + "apiVersion": "client.authentication.k8s.io/v1beta1", + "kind": "ExecCredential", + "status": { + "token": "my-bearer-token" + } +} +``` + +Alternativamente, um certificado de cliente e chave codificados em PEM podem ser retornados para serem utilizados em autenticação de cliente TLS. +Se o plugin retornar um certificado e chave diferentes numa chamada subsequente, `k8s.io/client-go` +Irá fechar conexões existentes com o servidor para forçar uma nova troca TLS. + +Se especificado, `clientKeyData` e `clientCertificateData` devem ambos estar presentes. + +`clientCertificateData` pode conter certificados intermediários adicionais a serem enviados para o servidor. + +```json +{ + "apiVersion": "client.authentication.k8s.io/v1beta1", + "kind": "ExecCredential", + "status": { + "clientCertificateData": "-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----", + "clientKeyData": "-----BEGIN RSA PRIVATE KEY-----\n...\n-----END RSA PRIVATE KEY-----" + } +} +``` + +Opcionalmente, a resposta pode incluir a validade da credencial em formato +RFC3339 de data/hora. A presença ou ausência de validade pode ter o seguinte impacto: + +- Se uma validade está incluída, o _bearer token_ e as credenciais TLS são guardadas em cache até +a o tempo de expiração é atingido ou se o servidor responder com um codigo de status HTTP 401 +ou se o processo terminar. + +- Se uma validate está ausente, o _bearer token_ e as credenciais TLS são guardadas em cache até +o servidor responder com um código de status HTTP 401 ou até o processo terminar. + +```json +{ + "apiVersion": "client.authentication.k8s.io/v1beta1", + "kind": "ExecCredential", + "status": { + "token": "my-bearer-token", + "expirationTimestamp": "2018-03-05T17:30:20-08:00" + } +} +``` + +Para habilitar o plugin de execução para obter informações específicas do cluster, define `provideClusterInfo` no campo `user.exec` +dentro do arquivo de configuração [kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/). +O plugin irá então prover a variável de ambiente `KUBERNETES_EXEC_INFO`. +As informações desta variável de ambiente podem ser utilizadas para executar lógicas de aquisição +de credentiais específicas do cluster. +O manifesto `ExecCredential` abaixo descreve um exemplo de informação de cluster. + +```json +{ + "apiVersion": "client.authentication.k8s.io/v1beta1", + "kind": "ExecCredential", + "spec": { + "cluster": { + "server": "https://172.17.4.100:6443", + "certificate-authority-data": "LS0t...", + "config": { + "arbitrary": "config", + "this": "pode ser fornecido por meio da variável de ambiente KUBERNETES_EXEC_INFO na configuração de provideClusterInfo", + "you": ["coloque", "qualquer", "coisa", "aqui"] + } + } + } +} +``` \ No newline at end of file diff --git a/content/pt-br/docs/reference/access-authn-authz/bootstrap-tokens.md b/content/pt-br/docs/reference/access-authn-authz/bootstrap-tokens.md new file mode 100644 index 0000000000000..67f23e2bb63ac --- /dev/null +++ b/content/pt-br/docs/reference/access-authn-authz/bootstrap-tokens.md @@ -0,0 +1,169 @@ +--- +title: Autenticando com Tokens de Inicialização +content_type: concept +weight: 20 +--- + + + +{{< feature-state for_k8s_version="v1.18" state="stable" >}} + +Os tokens de inicialização são um _bearer token_ simples que devem ser utilizados +ao criar novos clusters ou para quando novos nós são registrados a clusters existentes. Eles foram construídos +para suportar a ferramenta [kubeadm](/docs/reference/setup-tools/kubeadm/), mas podem ser utilizados em outros contextos para usuários que desejam inicializar clusters sem utilizar o `kubeadm`. +Foram também construídos para funcionar, via políticas RBAC, com o sistema de [Inicialização do Kubelet via TLS](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). + + +## Visão geral dos tokens de inicialização + +Os tokens de inicialização são definidos com um tipo especifico de _secrets_ (`bootstrap.kubernetes.io/token`) que existem no namespace `kube-system`. Estes _secrets_ são então lidos pelo autenticador de inicialização do servidor de API. +Tokens expirados são removidos pelo controlador _TokenCleaner_ no gerenciador de controle - kube-controller-manager. +Os tokens também são utilizados para criar uma assinatura para um ConfigMap específico usado no processo de descoberta através de um controlador denominado `BootstrapSigner`. + +## Formato do Token + +Tokens de inicialização tem o formato `abcdef.0123456789abcdef`. Mais formalmente, eles devem corresponder a expressão regular `[a-z0-9]{6}\.[a-z0-9]{16}`. + +A primeira parte do token é um identificador ("Token ID") e é considerado informação pública. +Ele é utilizado para se referir a um token sem vazar a parte secreta usada para autenticação. +A segunda parte é o _secret_ do token e somente deve ser compartilhado com partes confiáveis. + +## Habilitando autenticação com tokens de inicialização + +O autenticador de tokens de inicialização pode ser habilitado utilizando a seguinte opção no servidor de API: + +``` +--enable-bootstrap-token-auth +``` + +Quando habilitado, tokens de inicialização podem ser utilizado como credenciais _bearer token_ +para autenticar requisições no servidor de API. + +```http +Authorization: Bearer 07401b.f395accd246ae52d +``` + +Tokens são autenticados como o usuário `system:bootstrap:` e são membros +do grupo `system:bootstrappers`. Grupos adicionais podem ser +especificados dentro do _secret_ do token. + +Tokens expirados podem ser removidos automaticamente ao habilitar o controlador `tokencleaner` +do gerenciador de controle - kube-controller-manager. + +``` +--controllers=*,tokencleaner +``` + +## Formato do _secret_ dos tokens de inicialização + +Cada token válido possui um _secret_ no namespace `kube-system`. Você pode +encontrar a documentação completa [aqui](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md). + +Um _secret_ de token se parece com o exemplo abaixo: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + # Nome DEVE seguir o formato "bootstrap-token-" + name: bootstrap-token-07401b + namespace: kube-system + +# Tipo DEVE ser 'bootstrap.kubernetes.io/token' +type: bootstrap.kubernetes.io/token +stringData: + # Descrição legível. Opcional. + description: "The default bootstrap token generated by 'kubeadm init'." + + # identificador do token e _secret_. Obrigatório. + token-id: 07401b + token-secret: f395accd246ae52d + + # Validade. Opcional. + expiration: 2017-03-10T03:22:11Z + + # Usos permitidos. + usage-bootstrap-authentication: "true" + usage-bootstrap-signing: "true" + + # Grupos adicionais para autenticar o token. Devem começar com "system:bootstrappers:" + auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress +``` + +O tipo do _secret_ deve ser `bootstrap.kubernetes.io/token` e o nome deve seguir o formato `bootstrap-token-`. Ele também tem que existir no namespace `kube-system`. + +Os membros listados em `usage-bootstrap-*` indicam qual a intenção de uso deste _secret_. O valor `true` deve ser definido para que seja ativado. + +* `usage-bootstrap-authentication` indica que o token pode ser utilizado para autenticar no servidor de API como um _bearer token_. +* `usage-bootstrap-signing` indica que o token pode ser utilizado para assinar o ConfigMap `cluster-info` como descrito abaixo. + +O campo `expiration` controla a expiração do token. Tokens expirados são +rejeitados quando usados para autenticação e ignorados durante assinatura de ConfigMaps. +O valor de expiração é codificado como um tempo absoluto UTC utilizando a RFC3339. Para automaticamente +remover tokens expirados basta habilitar o controlador `tokencleaner`. + +## Gerenciamento de tokens com kubeadm + +Você pode usar a ferramenta `kubeadm` para gerenciar tokens em um cluster. Veja [documentação de tokens kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm-token/) para mais detalhes. + +## Assinatura de ConfigMap + +Além de autenticação, os tokens podem ser utilizados para assinar um ConfigMap. Isto pode +ser utilizado em estágio inicial do processo de inicialização de um cluster, antes que o cliente confie +no servidor de API. O Configmap assinado pode ser autenticado por um token compartilhado. + +Habilite a assinatura de ConfigMap ao habilitar o controlador `bootstrapsigner` no gerenciador de controle - kube-controller-manager. + +``` +--controllers=*,bootstrapsigner +``` +O ConfigMap assinado é o `cluster-info` no namespace `kube-public`. +No fluxo típico, um cliente lê o ConfigMap enquanto ainda não autenticado +e ignora os erros da camada de transporte seguro (TLS). +Ele então valida o conteúdo do ConfigMap ao verificar a assinatura contida no ConfigMap. + +O ConfigMap pode se parecer com o exemplo abaixo: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-info + namespace: kube-public +data: + jws-kubeconfig-07401b: eyJhbGciOiJIUzI1NiIsImtpZCI6IjA3NDAxYiJ9..tYEfbo6zDNo40MQE07aZcQX2m3EB2rO3NuXtxVMYm9U + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: + server: https://10.138.0.2:6443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: [] +``` + +O membro `kubeconfig` do ConfigMap é um arquivo de configuração contendo somente +as informações do cluster preenchidas. A informação chave sendo comunicada aqui +está em `certificate-authority-data`. Isto poderá ser expandido no futuro. + +A assinatura é feita utilizando-se assinatura JWS em modo "separado". Para validar +a assinatura, o usuário deve codificar o conteúdo do `kubeconfig` de acordo com as regras do JWS +(codificando em base64 e descartando qualquer `=` ao final). O conteúdo codificado +e então usado para formar um JWS inteiro, inserindo-o entre os 2 pontos. Você pode +verificar o JWS utilizando o esquema `HS256` (HMAC-SHA256) com o token completo +(por exemplo: `07401b.f395accd246ae52d`) como o _secret_ compartilhado. Usuários _devem_ +verificar que o algoritmo HS256 (que é um método de assinatura simétrica) está sendo utilizado. + + +{{< warning >}} +Qualquer parte em posse de um token de inicialização pode criar uma assinatura válida +daquele token. Não é recomendável, quando utilizando assinatura de ConfigMap, que se compartilhe +o mesmo token com muitos clientes, uma vez que um cliente comprometido pode abrir brecha para potenciais +"homem no meio" entre outro cliente que confia na assinatura para estabelecer inicialização via camada de transporte seguro (TLS). +{{< /warning >}} + +Consulte a seção de [detalhes de implementação do kubeadm](/docs/reference/setup-tools/kubeadm/implementation-details/) para mais informações. \ No newline at end of file diff --git a/content/pt-br/docs/reference/glossary/alternate-x509-schemes.md b/content/pt-br/docs/reference/glossary/alternate-x509-schemes.md new file mode 100644 index 0000000000000..307479595d9fc --- /dev/null +++ b/content/pt-br/docs/reference/glossary/alternate-x509-schemes.md @@ -0,0 +1,27 @@ +--- +title: Esquemas alternativos x509 +id: alternate-x509-schemes +date: 2021-03-16 +full_link: +short_description: > + X.509 é um formato padrão para certificados de chave pública, documentos digitais que associam com segurança pares de chaves criptográficas a identidades como sites, indivíduos ou organizações. + +aka: +tags: +- authentication +--- + + + +X.509 é um formato padrão para certificados de chave pública, documentos digitais que associam com segurança pares de chaves criptográficas a identidades como sites, indivíduos ou organizações. + +Introduzido pela primeira vez em 1988 junto com os padrões X.500 para serviços de diretório eletrônico, o X.509 foi adaptado para uso na Internet pelo grupo de trabalho Public-Key Infrastructure (X.509) (PKIX) da IETF. O RFC 5280 define o perfil do certificado X.509 v3, a lista de revogação de certificado X.509 v2 (CRL) e descreve um algoritmo para a validação do caminho do certificado X.509. + +As aplicações comuns de certificados X.509 incluem: + + - SSL / TLS e HTTPS para navegação na web autenticada e criptografada + - E-mail assinado e criptografado por meio do protocolo S / MIME + - Assinatura de código + - Assinatura de documento + - Autenticação de cliente + - Identificação eletrônica emitida pelo governo diff --git a/content/pt-br/docs/reference/glossary/cloud-controller-manager.md b/content/pt-br/docs/reference/glossary/cloud-controller-manager.md new file mode 100644 index 0000000000000..622d3f842e914 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/cloud-controller-manager.md @@ -0,0 +1,21 @@ +--- +title: Gerenciador de controle de nuvem +id: cloud-controller-manager +date: 2018-04-12 +full_link: /docs/concepts/architecture/cloud-controller/ +short_description: > + Componente da camada de gerenciamento que integra Kubernetes com provedores de nuvem de terceiros. +aka: +tags: +- core-object +- architecture +- operation +--- + + Um componente da {{< glossary_tooltip text="camada de gerenciamento" term_id="control-plane" >}} do Kubernetes + que incorpora a lógica de controle específica da nuvem. O gerenciador de controle de nuvem permite que você vincule seu + _cluster_ na API do seu provedor de nuvem, e separar os componentes que interagem com essa plataforma de nuvem a partir de componentes que apenas interagem com seu cluster. + + + +Desassociando a lógica de interoperabilidade entre o Kubernetes e a infraestrutura de nuvem subjacente, o componente gerenciador de controle de nuvem permite que os provedores de nuvem desenvolvam e disponibilizem recursos em um ritmo diferente em comparação com o projeto principal do Kubernetes. diff --git a/content/pt/docs/reference/glossary/cluster.md b/content/pt-br/docs/reference/glossary/cluster.md similarity index 100% rename from content/pt/docs/reference/glossary/cluster.md rename to content/pt-br/docs/reference/glossary/cluster.md diff --git a/content/pt-br/docs/reference/glossary/cncf.md b/content/pt-br/docs/reference/glossary/cncf.md new file mode 100644 index 0000000000000..f9ad2495478db --- /dev/null +++ b/content/pt-br/docs/reference/glossary/cncf.md @@ -0,0 +1,20 @@ +--- +title: Cloud Native Computing Foundation (CNCF) +id: cncf +date: 2019-05-26 +full_link: https://cncf.io/ +short_description: > + Cloud Native Computing Foundation + +aka: +tags: +- community +--- + A **Cloud Native Computing Foundation (CNCF)** constrói um ecossistema sustentável e promove uma comunidade no entorno dos [projetos](https://www.cncf.io/projects/) que orquestram contêineres como parte de uma arquitetura de microserviços. + +**Kubernetes** é um projeto CNCF. + + + +A **CNCF** é uma sub-fundação da [Linux Foundation](https://www.linuxfoundation.org/). +Sua missão é tornar a computação nativa em nuvem onipresente. diff --git a/content/pt-br/docs/reference/glossary/cni.md b/content/pt-br/docs/reference/glossary/cni.md new file mode 100644 index 0000000000000..6cb909f962849 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/cni.md @@ -0,0 +1,18 @@ +--- +title: Container network interface (CNI) +id: cni +date: 2018-05-25 +full_link: /docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni +short_description: > + Plugins Container network interface (CNI) são um tipo de plugin de Rede em conformidade com a especificação appc/CNI. + + +aka: +tags: +- networking +--- + Plugins Container network interface (CNI) são um tipo de plugin de Rede em conformidade com a especificação appc/CNI. + + +* Para informações sobre Kubernetes e CNI, veja [aqui](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni). +* Para informações sobre Kubernetes e CNI, veja ["Plugins de rede"](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni). diff --git a/content/pt-br/docs/reference/glossary/container-runtime.md b/content/pt-br/docs/reference/glossary/container-runtime.md new file mode 100644 index 0000000000000..8c1cb808ef116 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/container-runtime.md @@ -0,0 +1,18 @@ +--- +title: Agente de execução de contêiner +id: container-runtime +date: 2019-06-05 +full_link: /docs/setup/production-environment/container-runtimes +short_description: > + O agente de execução de contêiner é o software responsável por executar os contêineres. + +aka: +tags: +- fundamental +- workload +--- + O agente de execução (_runtime_) de contêiner é o software responsável por executar os contêineres. + + + +O Kubernetes suporta diversos agentes de execução de contêineres: {{< glossary_tooltip term_id="docker">}}, {{< glossary_tooltip term_id="containerd" >}}, {{< glossary_tooltip term_id="cri-o" >}}, e qualquer implementação do [Kubernetes CRI (Container Runtime Interface)](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md). diff --git a/content/pt-br/docs/reference/glossary/containerd.md b/content/pt-br/docs/reference/glossary/containerd.md new file mode 100644 index 0000000000000..0eee599829f60 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/containerd.md @@ -0,0 +1,19 @@ +--- +title: containerd +id: containerd +date: 2019-05-14 +full_link: https://containerd.io/docs/ +short_description: > + Um agente de execução de contêiner com enfase em simplicidade, robustez e portabilidade + +aka: +tags: +- tool +--- + Um agente de execução de contêiner com enfase em simplicidade, robustez e portabilidade + + + +containerd é um agente de execução de {{< glossary_tooltip text="contêiner" term_id="container" >}} +que executa um serviço no Linux ou Windows. containerd é responsável por buscar e +armazenar as imagens de contêiner, executar contêineres, prover acesso à rede e mais. diff --git a/content/pt-br/docs/reference/glossary/control-plane.md b/content/pt-br/docs/reference/glossary/control-plane.md new file mode 100644 index 0000000000000..c65759b83bd4c --- /dev/null +++ b/content/pt-br/docs/reference/glossary/control-plane.md @@ -0,0 +1,13 @@ +--- +title: Camada de gerenciamento +id: control-plane +date: 2020-04-19 +full_link: +short_description: > + A camada de gerenciamento de contêiner que expõe a API e as interfaces para definir, implantar e gerenciar o ciclo de vida dos contêineres. + +aka: +tags: +- fundamental +--- + A camada de gerenciamento de contêiner que expõe a API e as interfaces para definir, implantar e gerenciar o ciclo de vida dos contêineres. diff --git a/content/pt/docs/reference/glossary/controller.md b/content/pt-br/docs/reference/glossary/controller.md similarity index 100% rename from content/pt/docs/reference/glossary/controller.md rename to content/pt-br/docs/reference/glossary/controller.md diff --git a/content/pt-br/docs/reference/glossary/cri-o.md b/content/pt-br/docs/reference/glossary/cri-o.md new file mode 100644 index 0000000000000..37da1679a1cdb --- /dev/null +++ b/content/pt-br/docs/reference/glossary/cri-o.md @@ -0,0 +1,23 @@ +--- +title: CRI-O +id: cri-o +date: 2019-05-14 +full_link: https://cri-o.io/#what-is-cri-o +short_description: > + Um agente de execução leve de contêineres criado especificamente para o Kubernetes + +aka: +tags: +- tool +--- +Uma ferramenta que permite você usar agentes de execução de contêineres OCI com o CRI do Kubernetes + + + +CRI-O é uma implementação do {{< glossary_tooltip term_id="cri" >}} +que permite usar agentes de execução de {{< glossary_tooltip text="contêiner" term_id="container" >}} +compatíveis com as [especificações](https://www.github.com/opencontainers/runtime-spec) da Open Container Initiative (OCI). + +Usar o CRI-O permite ao Kubernetes utilizar-se de qualquer agente de execução compatível +com o OCI para executar {{< glossary_tooltip text="Pods" term_id="pod" >}}, e obter imagens +de contêineres de registros remotos. diff --git a/content/pt-br/docs/reference/glossary/cri.md b/content/pt-br/docs/reference/glossary/cri.md new file mode 100644 index 0000000000000..cced4ad43064b --- /dev/null +++ b/content/pt-br/docs/reference/glossary/cri.md @@ -0,0 +1,18 @@ +--- +title: Container runtime interface (CRI) +id: cri +date: 2019-03-07 +full_link: /docs/concepts/overview/components/#container-runtime +short_description: > + Uma API para agentes de execução de contêineres se integrarem com o kubelet + + +aka: +tags: +- fundamental +--- +A interface de execução de contêiner (CRI) é uma API para agentes de execução de +contêineres se integrarem com o kubelet em um nó. + + +Para maiores informações, veja as APIs e especificações do [CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md). diff --git a/content/pt-br/docs/reference/glossary/customresourcedefinition.md b/content/pt-br/docs/reference/glossary/customresourcedefinition.md new file mode 100755 index 0000000000000..7de24c036b4d0 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/customresourcedefinition.md @@ -0,0 +1,19 @@ +--- +title: CustomResourceDefinition +id: CustomResourceDefinition +date: 2018-04-12 +full_link: /docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/ +short_description: > + Código customizado que define um recurso a ser adicionado ao seu servidor de API Kubernetes sem a necessidade de construir um servidor customizado. + +aka: +tags: +- fundamental +- operation +- extension +--- + Código customizado que define um recurso a ser adicionado ao seu servidor de API Kubernetes sem a necessidade de construir um servidor customizado. + + + +CustomResourceDefinitions permitem você extender a API do Kubernetes para seu ambiente caso as APIs atuais não cumpram com seus requisitos. diff --git a/content/pt-br/docs/reference/glossary/etcd.md b/content/pt-br/docs/reference/glossary/etcd.md new file mode 100644 index 0000000000000..0761a53865789 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/etcd.md @@ -0,0 +1,19 @@ +--- +title: etcd +id: etcd +date: 2018-04-12 +full_link: /docs/tasks/administer-cluster/configure-upgrade-etcd/ +short_description: > + Armazenamento do tipo Chave-Valor consistente e em alta-disponibilidade usado como repositório de apoio do Kubernetes para todos os dados do cluster. +aka: +tags: +- architecture +- storage +--- + Armazenamento do tipo Chave-Valor consistente e em alta-disponibilidade usado como repositório de apoio do Kubernetes para todos os dados do cluster. + + + +Se o seu cluster Kubernetes usa **etcd** como seu armazenamento de apoio, certifique-se de ter um plano de [back up](/docs/tasks/administer-cluster/configure-upgrade-etcd/#backing-up-an-etcd-cluster) para seus dados. + +Você pode encontrar informações detalhadas sobre o etcd na seção oficial da [documentação](https://etcd.io/docs/). diff --git a/content/pt-br/docs/reference/glossary/kerberos.md b/content/pt-br/docs/reference/glossary/kerberos.md new file mode 100644 index 0000000000000..7752654082ed9 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/kerberos.md @@ -0,0 +1,27 @@ +--- +title: Kerberos +id: kerberos +date: 2021-03-16 +full_link: +short_description: > + Kerberos é um protocolo de rede que usa criptografia de chave secreta para autenticar aplicativos cliente-servidor. O Kerberos solicita um tíquete criptografado por meio de uma sequência de servidor autenticada para usar os serviços. + + O protocolo recebe o nome do cão de três cabeças (Kerberos ou Cerberus) que guardava os portões de Hades na mitologia grega. + +aka: +tags: +- authentication +--- + + + +Kerberos é um protocolo de rede que usa criptografia de chave secreta para autenticar aplicativos cliente-servidor. O Kerberos solicita um tíquete criptografado por meio de uma sequência de servidor autenticada para usar os serviços. + +Kerberos foi desenvolvido pelo Project Athena - um projeto conjunto entre o Massachusetts Institute of Technology (MIT), Digital Equipment Corporation e IBM que funcionou entre 1983 e 1991. + +Um servidor de autenticação usa um tíquete Kerberos para conceder acesso ao servidor e, em seguida, cria uma chave de sessão com base na senha do solicitante e outro valor aleatório. O tíquete de concessão de tíquete (TGT) é enviado ao servidor de concessão de tíquete (TGS), que é necessário para usar o mesmo servidor de autenticação. + +O solicitante recebe uma chave TGS criptografada com um registro de data e hora e um tíquete de serviço, que é retornado ao solicitante e descriptografado. O solicitante envia ao TGS essas informações e encaminha a chave criptografada ao servidor para obter o serviço desejado. Se todas as ações forem tratadas corretamente, o servidor aceita o tíquete e realiza o atendimento ao usuário desejado, que deve descriptografar a chave, verificar a data e hora e entrar em contato com o centro de distribuição para obter as chaves de sessão. Essa chave de sessão é enviada ao solicitante, que descriptografa o tíquete. + +Se as chaves e o carimbo de data / hora forem válidos, a comunicação cliente-servidor continuará. O tíquete TGS tem carimbo de data / hora, o que permite solicitações simultâneas dentro do período de tempo alocado. + diff --git a/content/pt-br/docs/reference/glossary/keystone.md b/content/pt-br/docs/reference/glossary/keystone.md new file mode 100644 index 0000000000000..6a59f1d1b9098 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/keystone.md @@ -0,0 +1,16 @@ +--- +title: Keystone +id: keystore +date: 2020-08-03 +full_link: +short_description: > + Keystone é o serviço de identidade usado pelo OpenStack para autenticação (authN) e autorização de alto nível (authZ). Atualmente, ele oferece suporte a authN com base em token e autorização de serviço do usuário. Recentemente, foi reprojetado para permitir a expansão para oferecer suporte a serviços externos de proxy e mecanismos AuthN / AuthZ, como oAuth, SAML e openID em versões futuras. + +aka: +tags: +- authentication +--- + + +Keystone é o serviço de identidade usado pelo OpenStack para autenticação (authN) e autorização de alto nível (authZ). Atualmente, ele oferece suporte a authN com base em token e autorização de serviço do usuário. Recentemente, foi reprojetado para permitir a expansão para oferecer suporte a serviços externos de proxy e mecanismos AuthN / AuthZ, como oAuth, SAML e openID em versões futuras. + diff --git a/content/pt-br/docs/reference/glossary/kube-apiserver.md b/content/pt-br/docs/reference/glossary/kube-apiserver.md new file mode 100644 index 0000000000000..f5ce3dba1aa68 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/kube-apiserver.md @@ -0,0 +1,22 @@ +--- +title: API server +id: kube-apiserver +date: 2018-04-12 +full_link: /docs/concepts/overview/components/#kube-apiserver +short_description: > + O componente da camada de gerenciamento que serve a API do Kubernetes. + +aka: +- kube-apiserver +tags: +- architecture +- fundamental +--- + O servidor de API é um componente da {{< glossary_tooltip text="Camada de gerenciamento" term_id="control-plane" >}} do Kubernetes que expõe a API do Kubernetes. +O servidor de API é o _front end_ para a camada de gerenciamento do Kubernetes. + + + +A principal implementação de um servidor de API do Kubernetes é [kube-apiserver](/docs/reference/generated/kube-apiserver/). +O kube-apiserver foi projetado para ser escalonado horizontalmente — ou seja, ele pode ser escalado com a implantação de mais instâncias. +Você pode executar várias instâncias do kube-apiserver e balancear (balanceamento de carga, etc) o tráfego entre essas instâncias. diff --git a/content/pt-br/docs/reference/glossary/kube-controller-manager.md b/content/pt-br/docs/reference/glossary/kube-controller-manager.md new file mode 100644 index 0000000000000..0a52ec27eabb8 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/kube-controller-manager.md @@ -0,0 +1,18 @@ +--- +title: kube-controller-manager +id: kube-controller-manager +date: 2018-04-12 +full_link: /docs/reference/command-line-tools-reference/kube-controller-manager/ +short_description: > + Componente da camada de gerenciamento que executa os processos de controle. + +aka: +tags: +- architecture +- fundamental +--- + Componente da camada de gerenciamento que executa os processos de {{< glossary_tooltip text="controlador" term_id="controller" >}}. + + + +Logicamente, cada _{{< glossary_tooltip text="controlador" term_id="controller" >}}_ está em um processo separado, mas para reduzir a complexidade, eles todos são compilados num único binário e executam em um processo único. diff --git a/content/pt-br/docs/reference/glossary/kube-proxy.md b/content/pt-br/docs/reference/glossary/kube-proxy.md new file mode 100644 index 0000000000000..1f9a075bd6bc5 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/kube-proxy.md @@ -0,0 +1,22 @@ +--- +title: kube-proxy +id: kube-proxy +date: 2018-04-12 +full_link: /docs/reference/command-line-tools-reference/kube-proxy/ +short_description: > + `kube-proxy` é um _proxy_ de rede executado em cada nó do _cluster_. + +aka: +tags: +- fundamental +- networking +--- + kube-proxy é um _proxy_ de rede executado em cada {{< glossary_tooltip text="nó" term_id="node" >}} no seu _cluster_, +implementando parte do conceito de {{< glossary_tooltip text="serviço" term_id="service">}} do Kubernetes. + + + +[kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) +mantém regras de rede nos nós. Estas regras de rede permitem a comunicação de rede com seus _pods_ a partir de sessões de rede dentro ou fora de seu _cluster_. + +kube-proxy usa a camada de filtragem de pacotes do sistema operacional se houver uma e estiver disponível. Caso contrário, o kube-proxy encaminha o tráfego ele mesmo. diff --git a/content/pt-br/docs/reference/glossary/kube-scheduler.md b/content/pt-br/docs/reference/glossary/kube-scheduler.md new file mode 100644 index 0000000000000..1030d27853a6d --- /dev/null +++ b/content/pt-br/docs/reference/glossary/kube-scheduler.md @@ -0,0 +1,17 @@ +--- +title: kube-scheduler +id: kube-scheduler +date: 2018-04-12 +full_link: /docs/reference/generated/kube-scheduler/ +short_description: > + Componente da camada de gerenciamento que observa os _pods_ recém-criados sem nenhum nó atribuído, e seleciona um nó para executá-los. +aka: +tags: +- architecture +--- +Componente da camada de gerenciamento que observa os _{{< glossary_tooltip term_id="pod" text="pods" >}}_ recém-criados sem nenhum {{< glossary_tooltip term_id="node" text="nó">}} atribuído, e seleciona um nó para executá-los. + + + +Os fatores levados em consideração para as decisões de agendamento incluem: +requisitos de recursos individuais e coletivos, hardware/software/política de restrições, especificações de afinidade e antiafinidade, localidade de dados, interferência entre cargas de trabalho, e prazos. diff --git a/content/pt/docs/reference/glossary/kubelet.md b/content/pt-br/docs/reference/glossary/kubelet.md similarity index 100% rename from content/pt/docs/reference/glossary/kubelet.md rename to content/pt-br/docs/reference/glossary/kubelet.md diff --git a/content/pt-br/docs/reference/glossary/ldap.md b/content/pt-br/docs/reference/glossary/ldap.md new file mode 100644 index 0000000000000..2a15eaa6abc1c --- /dev/null +++ b/content/pt-br/docs/reference/glossary/ldap.md @@ -0,0 +1,28 @@ +--- +title: LDAP +id: ldap +date: 2021-03-16 +full_link: +short_description: > + Abreviatura para "Lightweight Directory Access Protocol". Se você deseja disponibilizar informações de diretório na Internet, esta é a maneira de fazê-lo. O LDAP é uma versão simplificada de um padrão de diretório anterior denominado X.500. + +aka: +tags: +- authentication +--- + + +Abreviatura para "Lightweight Directory Access Protocol". Se você deseja disponibilizar informações de diretório na Internet, esta é a maneira de fazê-lo. O LDAP é uma versão simplificada de um padrão de diretório anterior denominado X.500. O que torna o LDAP tão útil é que ele funciona muito bem em redes TCP / IP (ao contrário do X.500), de modo que as informações podem ser acessadas por meio do LDAP por qualquer pessoa com uma conexão à Internet. Também é um protocolo aberto, o que significa que os diretórios podem ser armazenados em qualquer tipo de máquina (por exemplo, Windows 2000, Red Hat Linux, Mac OS X). + +Para dar uma ideia de como um diretório LDAP é organizado, aqui estão os diferentes níveis de uma hierarquia de árvore LDAP simples: + +O diretório raiz +Países +Organizações +Divisões, departamentos, etc. +Indivíduos +Recursos individuais, como arquivos e impressoras. +A maior parte da conectividade LDAP é feita nos bastidores, então o usuário típico provavelmente não notará ao navegar na web. No entanto, é uma boa tecnologia para se conhecer. Se nada mais, é outro termo para impressionar seus pais. + + + diff --git a/content/pt/docs/reference/glossary/node.md b/content/pt-br/docs/reference/glossary/node.md similarity index 100% rename from content/pt/docs/reference/glossary/node.md rename to content/pt-br/docs/reference/glossary/node.md diff --git a/content/pt/docs/reference/glossary/pod.md b/content/pt-br/docs/reference/glossary/pod.md similarity index 100% rename from content/pt/docs/reference/glossary/pod.md rename to content/pt-br/docs/reference/glossary/pod.md diff --git a/content/pt-br/docs/reference/glossary/saml.md b/content/pt-br/docs/reference/glossary/saml.md new file mode 100644 index 0000000000000..e2831797426e6 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/saml.md @@ -0,0 +1,36 @@ +--- +title: SAML +id: saml +date: 2021-03-16 +full_link: +short_description: > + SAML significa Linguagem de Marcação para Asserção de Segurança. É um padrão aberto baseado em XML para transferência de dados de identidade entre duas partes: um provedor de identidade (IdP) e um provedor de serviços (SP). + +aka: +tags: +- authentication +--- + + +SAML significa Linguagem de Marcação para Asserção de Segurança. É um padrão aberto baseado em XML para transferência de dados de identidade entre duas partes: um provedor de identidade (IdP) e um provedor de serviços (SP). + +Provedor de identidade - executa autenticação e passa a identidade do usuário e o nível de autorização para o provedor de serviços. + +Provedor de serviços - confia no provedor de identidade e autoriza o usuário fornecido a acessar o recurso solicitado. + +A autenticação de logon único SAML normalmente envolve um provedor de serviços e um provedor de identidade. O fluxo do processo geralmente envolve os estágios de estabelecimento de confiança e fluxo de autenticação. + +Considere este exemplo: + +Nosso provedor de identidade é Auth0 +Nosso provedor de serviços é um serviço fictício, Zagadat +Nota: O provedor de identidade pode ser qualquer plataforma de gerenciamento de identidade. + +Agora, um usuário está tentando obter acesso ao Zagadat usando a autenticação SAML. + +Este é o fluxo do processo: + +O usuário tenta fazer login no Zagadat a partir de um navegador. +O Zagadat responde gerando uma solicitação SAML. + + diff --git a/content/pt-br/docs/reference/glossary/tls-common-name.md b/content/pt-br/docs/reference/glossary/tls-common-name.md new file mode 100644 index 0000000000000..afa5af96e0a6b --- /dev/null +++ b/content/pt-br/docs/reference/glossary/tls-common-name.md @@ -0,0 +1,18 @@ +--- +title: TLS Common Name +id: tls-common-name +date: 2021-03-16 +full_link: +short_description: > + O nome comum é normalmente composto de Host + Nome de domínio e será semelhante a www.seusite.com ou seusite.com. Os certificados de servidor SSL são específicos para o nome comum para o qual foram emitidos no nível do host. + +aka: +tags: +- authentication +--- + + + +O nome comum é normalmente composto de Host + Nome de domínio e será semelhante a www.seusite.com ou seusite.com. Os certificados de servidor SSL são específicos para o nome comum para o qual foram emitidos no nível do host. + +O nome comum deve ser igual ao endereço da Web que você acessará ao se conectar a um site seguro. Por exemplo, um certificado de servidor SSL para o domínio domínio.com receberá um aviso do navegador se o acesso a um site chamado www.domain.com ou secure.domain.com, pois www.domain.com e secure.domain.com são diferentes de dominio.com. Você precisaria criar um CSR para o nome comum correto. \ No newline at end of file diff --git a/content/pt-br/docs/reference/glossary/uid.md b/content/pt-br/docs/reference/glossary/uid.md new file mode 100644 index 0000000000000..c5e34fd185862 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/uid.md @@ -0,0 +1,13 @@ +--- +title: UID +id: uid +date: 2021-03-16 +full_link: +short_description: > + Um identificador exclusivo (UID) é uma sequência numérica ou alfanumérica associada a uma única entidade em um determinado sistema. + +aka: +tags: +- authentication +--- +Um identificador exclusivo (UID) é uma sequência numérica ou alfanumérica associada a uma única entidade em um determinado sistema. Os UIDs tornam possível endereçar essa entidade para que ela possa ser acessada e interagida. Cada usuário é identificado no sistema por seu UID e os nomes de usuário geralmente são usados apenas como uma interface para humanos. \ No newline at end of file diff --git a/content/pt-br/docs/reference/glossary/username.md b/content/pt-br/docs/reference/glossary/username.md new file mode 100644 index 0000000000000..30b516eb5d5fc --- /dev/null +++ b/content/pt-br/docs/reference/glossary/username.md @@ -0,0 +1,16 @@ +--- +title: Username +id: username +date: 2021-03-16 +full_link: +short_description: > + Um nome de usuário é um nome que identifica exclusivamente alguém em um sistema de computador. + +aka: +tags: +- authentication +--- +Um nome de usuário é um nome que identifica exclusivamente alguém em um sistema de computador. Por exemplo, um computador pode ser configurado com várias contas, com nomes de usuário diferentes para cada conta. Muitos sites permitem que os usuários escolham um nome de usuário para que possam personalizar suas configurações ou configurar uma conta online. Por exemplo, seu banco pode permitir que você escolha um nome de usuário para acessar suas informações bancárias. Você pode precisar escolher um nome de usuário para postar mensagens em um determinado quadro de mensagens na web. Os serviços de e-mail, como o Hotmail, exigem que os usuários escolham um nome de usuário para usar o serviço. + + +Um nome de usuário geralmente é pareado com uma senha. Essa combinação de nome de usuário / senha é conhecida como login e geralmente é necessária para que os usuários façam login em sites. Por exemplo, para acessar seu e-mail pela Web, é necessário inserir seu nome de usuário e senha. Depois de fazer o login, seu nome de usuário pode aparecer na tela, mas sua senha é mantida em segredo. Ao manter sua senha privada, as pessoas podem criar contas seguras para vários sites. A maioria dos nomes de usuário pode conter letras e números, mas não espaços. Quando você escolhe um nome de usuário para uma conta de e-mail, a parte antes de "@" é o seu nome de usuário. diff --git a/content/pt/docs/reference/kubectl/_index.md b/content/pt-br/docs/reference/kubectl/_index.md similarity index 100% rename from content/pt/docs/reference/kubectl/_index.md rename to content/pt-br/docs/reference/kubectl/_index.md diff --git a/content/pt/docs/reference/kubectl/cheatsheet.md b/content/pt-br/docs/reference/kubectl/cheatsheet.md similarity index 100% rename from content/pt/docs/reference/kubectl/cheatsheet.md rename to content/pt-br/docs/reference/kubectl/cheatsheet.md diff --git a/content/pt/docs/reference/tools.md b/content/pt-br/docs/reference/tools.md similarity index 100% rename from content/pt/docs/reference/tools.md rename to content/pt-br/docs/reference/tools.md diff --git a/content/pt/docs/setup/_index.md b/content/pt-br/docs/setup/_index.md similarity index 100% rename from content/pt/docs/setup/_index.md rename to content/pt-br/docs/setup/_index.md diff --git a/content/pt/docs/sitemap.md b/content/pt-br/docs/sitemap.md similarity index 100% rename from content/pt/docs/sitemap.md rename to content/pt-br/docs/sitemap.md diff --git a/content/pt/docs/tasks/_index.md b/content/pt-br/docs/tasks/_index.md similarity index 100% rename from content/pt/docs/tasks/_index.md rename to content/pt-br/docs/tasks/_index.md diff --git a/content/pt/docs/templates/feature-state-alpha.txt b/content/pt-br/docs/templates/feature-state-alpha.txt similarity index 100% rename from content/pt/docs/templates/feature-state-alpha.txt rename to content/pt-br/docs/templates/feature-state-alpha.txt diff --git a/content/pt/docs/templates/feature-state-beta.txt b/content/pt-br/docs/templates/feature-state-beta.txt similarity index 100% rename from content/pt/docs/templates/feature-state-beta.txt rename to content/pt-br/docs/templates/feature-state-beta.txt diff --git a/content/pt/docs/templates/feature-state-deprecated.txt b/content/pt-br/docs/templates/feature-state-deprecated.txt similarity index 100% rename from content/pt/docs/templates/feature-state-deprecated.txt rename to content/pt-br/docs/templates/feature-state-deprecated.txt diff --git a/content/pt/docs/templates/feature-state-stable.txt b/content/pt-br/docs/templates/feature-state-stable.txt similarity index 100% rename from content/pt/docs/templates/feature-state-stable.txt rename to content/pt-br/docs/templates/feature-state-stable.txt diff --git a/content/id/docs/templates/index.md b/content/pt-br/docs/templates/index.md similarity index 100% rename from content/id/docs/templates/index.md rename to content/pt-br/docs/templates/index.md diff --git a/content/pt/docs/tutorials/_index.md b/content/pt-br/docs/tutorials/_index.md similarity index 93% rename from content/pt/docs/tutorials/_index.md rename to content/pt-br/docs/tutorials/_index.md index a488f84388248..bc39fd817a79b 100644 --- a/content/pt/docs/tutorials/_index.md +++ b/content/pt-br/docs/tutorials/_index.md @@ -21,7 +21,7 @@ Antes de iniciar um tutorial, é interessante que vocẽ salve a página de [Glo * [Introdução ao Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) é um curso gratuíto da edX que te guia no entendimento do Kubernetes, seus conceitos, bem como na execução de tarefas mais simples. -* [Hello Minikube](/docs/tutorials/hello-minikube/) é um "Hello World" que te permite testar rapidamente o Kubernetes em sua estação com o uso do Minikube +* [Olá, Minikube!](/pt/docs/tutorials/hello-minikube/) é um "Hello World" que te permite testar rapidamente o Kubernetes em sua estação com o uso do Minikube ## Configuração diff --git a/content/pt-br/docs/tutorials/hello-minikube.md b/content/pt-br/docs/tutorials/hello-minikube.md new file mode 100644 index 0000000000000..0db5d20ddcea5 --- /dev/null +++ b/content/pt-br/docs/tutorials/hello-minikube.md @@ -0,0 +1,258 @@ +--- +title: Olá, Minikube! +content_type: tutorial +weight: 5 +menu: + main: + title: "Iniciar" + weight: 10 + post: > +

    Pronto para meter a mão na massa? Vamos criar um cluster Kubernetes simples e executar uma aplicação exemplo.

    +card: + name: tutorials + weight: 10 +--- + + + +Este tutorial mostra como executar uma aplicação exemplo no Kubernetes utilizando o [Minikube](https://minikube.sigs.k8s.io) e o [Katacoda](https://www.katacoda.com). O Katacoda disponibiliza um ambiente Kubernetes gratuito e acessível via navegador. + +{{< note >}} +Você também consegue seguir os passos desse tutorial instalando o Minikube localmente. Para instruções de instalação, acesse: [iniciando com minikube](https://minikube.sigs.k8s.io/docs/start/). +{{< /note >}} + +## Objetivos + +* Instalar uma aplicação exemplo no minikube. +* Executar a aplicação. +* Visualizar os logs da aplicação. + +## Antes de você iniciar + +Este tutorial disponibiliza uma imagem de contêiner que utiliza o NGINX para retornar todas as requisições. + + + +## Criando um cluster do Minikube + +1. Clique no botão abaixo **para iniciar o terminal do Katacoda**. + + {{< kat-button >}} + +{{< note >}} +Se você instalou o Minikube localmente, execute: `minikube start`. +{{< /note >}} + +2. Abra o painel do Kubernetes em um navegador: + + ```shell + minikube dashboard + ``` + +3. Apenas no ambiente do Katacoda: Na parte superior do terminal, clique em **Preview Port 30000**. + +## Criando um Deployment + +Um [*Pod*](/docs/concepts/workloads/pods/) Kubernetes consiste em um ou mais contêineres agrupados para fins de administração e gerenciamento de rede. O Pod desse tutorial possui apenas um contêiner. Um [*Deployment*](/docs/concepts/workloads/controllers/deployment/) Kubernetes verifica a saúde do seu Pod e reinicia o contêiner do Pod caso o mesmo seja finalizado. Deployments são a maneira recomendada de gerenciar a criação e escalonamento dos Pods. + +1. Usando o comando `kubectl create` para criar um Deployment que gerencia um Pod. O Pod executa um contêiner baseado na imagem docker disponibilizada. + + ```shell + kubectl create deployment hello-node --image=k8s.gcr.io/echoserver:1.4 + ``` + +2. Visualizando o Deployment: + + ```shell + kubectl get deployments + ``` + + A saída será semelhante a: + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + hello-node 1/1 1 1 1m + ``` + +3. Visualizando o Pod: + + ```shell + kubectl get pods + ``` + + A saída será semelhante a: + + ``` + NAME READY STATUS RESTARTS AGE + hello-node-5f76cf6ccf-br9b5 1/1 Running 0 1m + ``` + +4. Visualizando os eventos do cluster: + + ```shell + kubectl get events + ``` + +5. Visualizando a configuração do `kubectl`: + + ```shell + kubectl config view + ``` + +{{< note >}} +Para mais informações sobre o comando `kubectl`, veja o [kubectl overview](/docs/reference/kubectl/overview/). +{{< /note >}} + +## Criando um serviço + +Por padrão, um Pod só é acessível utilizando o seu endereço IP interno no cluster Kubernetes. Para dispobiblilizar o contêiner `hello-node` fora da rede virtual do Kubernetes, você deve expor o Pod como um [*serviço*](/docs/concepts/services-networking/service/) Kubernetes. + +1. Expondo o Pod usando o comando `kubectl expose`: + + ```shell + kubectl expose deployment hello-node --type=LoadBalancer --port=8080 + ``` + + O parâmetro `--type=LoadBalancer` indica que você deseja expor o seu serviço fora do cluster Kubernetes. + + A aplicação dentro da imagem `k8s.gcr.io/echoserver` "escuta" apenas na porta TCP 8080. Se você usou + `kubectl expose` para expor uma porta diferente, os clientes não conseguirão se conectar a essa outra porta. + +2. Visualizando o serviço que você acabou de criar: + + ```shell + kubectl get services + ``` + + A saída será semelhante a: + + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + hello-node LoadBalancer 10.108.144.78 8080:30369/TCP 21s + kubernetes ClusterIP 10.96.0.1 443/TCP 23m + ``` + + Em provedores de Cloud que fornecem serviços de balanceamento de carga para o Kubernetes, um IP externo seria provisionado para acessar o serviço. No Minikube, o tipo `LoadBalancer` torna o serviço acessível por meio do comando `minikube service`. + +3. Executar o comando a seguir: + + ```shell + minikube service hello-node + ``` + +4. (**Apenas no ambiente do Katacoda**) Clicar no sinal de mais e então clicar em **Select port to view on Host 1**. + +5. (**Apenas no ambiente do Katacoda**) Observe o número da porta com 5 dígitos exibido ao lado de `8080` na saída do serviço. Este número de porta é gerado aleatoriamente e pode ser diferente para você. Digite seu número na caixa de texto do número da porta e clique em **Display Port**. Usando o exemplo anterior, você digitaria `30369`. + +Isso abre uma janela do navegador, acessa o seu aplicativo e mostra o retorno da requisição. + +## Habilitando Complementos (addons) + +O Minikube inclui um conjunto integrado de {{< glossary_tooltip text="complementos" term_id="addons" >}} que podem ser habilitados, desabilitados e executados no ambiente Kubernetes local. + +1. Listando os complementos suportados atualmente: + + ```shell + minikube addons list + ``` + + A saída será semelhante a: + + ``` + addon-manager: enabled + dashboard: enabled + default-storageclass: enabled + efk: disabled + freshpod: disabled + gvisor: disabled + helm-tiller: disabled + ingress: disabled + ingress-dns: disabled + logviewer: disabled + metrics-server: disabled + nvidia-driver-installer: disabled + nvidia-gpu-device-plugin: disabled + registry: disabled + registry-creds: disabled + storage-provisioner: enabled + storage-provisioner-gluster: disabled + ``` + +2. Habilitando um complemento, por exemplo, `metrics-server`: + + ```shell + minikube addons enable metrics-server + ``` + + A saída será semelhante a: + + ``` + metrics-server was successfully enabled + ``` + +3. Visualizando os Pods e os Serviços que você acabou de criar: + + ```shell + kubectl get pod,svc -n kube-system + ``` + + A saída será semelhante a: + + ``` + NAME READY STATUS RESTARTS AGE + pod/coredns-5644d7b6d9-mh9ll 1/1 Running 0 34m + pod/coredns-5644d7b6d9-pqd2t 1/1 Running 0 34m + pod/metrics-server-67fb648c5 1/1 Running 0 26s + pod/etcd-minikube 1/1 Running 0 34m + pod/influxdb-grafana-b29w8 2/2 Running 0 26s + pod/kube-addon-manager-minikube 1/1 Running 0 34m + pod/kube-apiserver-minikube 1/1 Running 0 34m + pod/kube-controller-manager-minikube 1/1 Running 0 34m + pod/kube-proxy-rnlps 1/1 Running 0 34m + pod/kube-scheduler-minikube 1/1 Running 0 34m + pod/storage-provisioner 1/1 Running 0 34m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/metrics-server ClusterIP 10.96.241.45 80/TCP 26s + service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 34m + service/monitoring-grafana NodePort 10.99.24.54 80:30002/TCP 26s + service/monitoring-influxdb ClusterIP 10.111.169.94 8083/TCP,8086/TCP 26s + ``` + +4. Desabilitando o complemento `metrics-server`: + + ```shell + minikube addons disable metrics-server + ``` + + A saída será semelhante a: + + ``` + metrics-server was successfully disabled + ``` + +## Removendo os recursos do Minikube + +Agora você pode remover todos os recursos criados no seu cluster: + +```shell +kubectl delete service hello-node +kubectl delete deployment hello-node +``` +(**Opcional**) Pare a máquina virtual (VM) do Minikube: + +```shell +minikube stop +``` +(**Opcional**) Remova a VM do Minikube: + +```shell +minikube delete +``` + +## Próximos passos + +* Aprender mais sobre [Deployment objects](/docs/concepts/workloads/controllers/deployment/). +* Aprender mais sobre [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/). +* Aprender mais sobre [Service objects](/docs/concepts/services-networking/service/). + diff --git a/content/pt/docs/tutorials/kubernetes-basics/_index.html b/content/pt-br/docs/tutorials/kubernetes-basics/_index.html similarity index 71% rename from content/pt/docs/tutorials/kubernetes-basics/_index.html rename to content/pt-br/docs/tutorials/kubernetes-basics/_index.html index 90f89ac3daa21..b397afba37866 100644 --- a/content/pt/docs/tutorials/kubernetes-basics/_index.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/_index.html @@ -24,7 +24,7 @@

    Básico do Kubernetes

    -

    Este tutorial fornece instruções básicas sobre o sistema de orquestração de cluster do Kubernetes. Cada módulo contém algumas informações básicas sobre os principais recursos e conceitos do Kubernetes e inclui um tutorial online interativo. Esses tutoriais interativos permitem que você mesmo gerencie um cluster simples e seus aplicativos em contêineres.

    +

    Este tutorial fornece instruções básicas sobre o sistema de orquestração de cluster do Kubernetes. Cada módulo contém algumas informações básicas sobre os principais recursos e conceitos do Kubernetes e inclui um tutorial online interativo. Esses tutoriais interativos permitem que você mesmo gerencie um cluster simples e seus aplicativos em contêineres.

    Usando os tutoriais interativos, você pode aprender a:

    • Implante um aplicativo em contêiner em um cluster.
    • @@ -46,7 +46,7 @@

      O que o Kubernetes pode fazer por você?


    - +

    Módulos básicos do Kubernetes

    @@ -54,25 +54,25 @@

    Módulos básicos do Kubernetes

    @@ -82,17 +82,17 @@

    Módulos básicos do Kubernetes

    diff --git a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/_index.md similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/create-cluster/_index.md rename to content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/_index.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html similarity index 84% rename from content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html rename to content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html index 9be46e849db35..5ef10a9920ee8 100644 --- a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -25,7 +25,7 @@
    diff --git a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html similarity index 57% rename from content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html rename to content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index fd5025ab45277..8301e8890c4f4 100644 --- a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -29,16 +29,16 @@

    Objetivos

    Clusters do Kubernetes

    - O Kubernetes coordena um cluster altamente disponível de computadores conectados para funcionar como uma única unidade. - As abstrações no Kubernetes permitem implantar aplicativos em contêineres em um cluster sem amarrá-los especificamente a máquinas individuais. - Para fazer uso desse novo modelo de implantação, os aplicativos precisam ser empacotados de uma forma que os desacople dos hosts individuais: eles precisam ser colocados em contêineres. Os aplicativos em contêineres são mais flexíveis e disponíveis do que nos modelos de implantação anteriores, nos quais os aplicativos eram instalados diretamente em máquinas específicas como pacotes profundamente integrados ao host. + O Kubernetes coordena um cluster com alta disponibilidade de computadores conectados para funcionar como uma única unidade. + As abstrações no Kubernetes permitem implantar aplicativos em contêineres em um cluster sem amarrá-los especificamente as máquinas individuais. + Para fazer uso desse novo modelo de implantação, os aplicativos precisam ser empacotados de uma forma que os desacoplem dos hosts individuais: eles precisam ser empacotados em contêineres. Os aplicativos em contêineres são mais flexíveis e disponíveis do que nos modelos de implantação anteriores, nos quais os aplicativos eram instalados diretamente em máquinas específicas como pacotes profundamente integrados ao host. O Kubernetes automatiza a distribuição e o agendamento de contêineres de aplicativos em um cluster de maneira mais eficiente. O Kubernetes é uma plataforma de código aberto e está pronto para produção.

    Um cluster Kubernetes consiste em dois tipos de recursos:

      -
    • O Master coordena o cluster
    • -
    • Os Nodes são os trabalhadores que executam aplicativos
    • +
    • A Camada de gerenciamento (Control Plane) coordena o cluster
    • +
    • Os Nós (Nodes) são os nós de processamento que executam aplicativos

    @@ -75,22 +75,22 @@

    Diagrama de Cluster

    -

    O mestre é responsável por gerenciar o cluster. O mestre coordena todas as atividades em seu cluster, como programação de aplicativos, manutenção do estado desejado dos aplicativos, escalonamento de aplicativos e lançamento de novas atualizações.

    -

    Um nó é uma VM ou um computador físico que atua como uma máquina de trabalho em um cluster Kubernetes. Cada nó tem um Kubelet, que é um agente para gerenciar o nó e se comunicar com o mestre do Kubernetes. O nó também deve ter ferramentas para lidar com operações de contêiner, como containerd ou Docker. Um cluster Kubernetes que lida com o tráfego de produção deve ter no mínimo três nós.

    +

    A camada de gerenciamento é responsável por gerenciar o cluster. A camada de gerenciamento coordena todas as atividades em seu cluster, como programação de aplicativos, manutenção do estado desejado dos aplicativos, escalonamento de aplicativos e lançamento de novas atualizações.

    +

    Um nó é uma VM ou um computador físico que atua como um nó de processamento em um cluster Kubernetes. Cada nó tem um Kubelet, que é um agente para gerenciar o nó e se comunicar com a camada de gerenciamento do Kubernetes. O nó também deve ter ferramentas para lidar com operações de contêiner, como containerd ou Docker. Um cluster Kubernetes que lida com o tráfego de produção deve ter no mínimo três nós.

    -

    Os mestres gerenciam o cluster e os nós que são usados ​​para hospedar os aplicativos em execução.

    +

    As camadas de gerenciamento gerenciam o cluster e os nós que são usados ​​para hospedar os aplicativos em execução.

    -

    Ao implantar aplicativos no Kubernetes, você diz ao mestre para iniciar os contêineres de aplicativos. O mestre agenda os contêineres para serem executados nos nós do cluster. Os nós se comunicam com o mestre usando a API Kubernetes , que o mestre expõe. Os usuários finais também podem usar a API Kubernetes diretamente para interagir com o cluster.

    +

    Ao implantar aplicativos no Kubernetes, você diz à camada de gerenciamento para iniciar os contêineres de aplicativos. A camada de gerenciamento agenda os contêineres para serem executados nos nós do cluster. Os nós se comunicam com o camada de gerenciamento usando a API do Kubernetes , que a camada de gerenciamento expõe. Os usuários finais também podem usar a API do Kubernetes diretamente para interagir com o cluster.

    -

    Um cluster Kubernetes pode ser implantado em máquinas físicas ou virtuais. Para começar o desenvolvimento do Kubernetes, você pode usar o Minikube. O Minikube é uma implementação leve do Kubernetes que cria uma VM em sua máquina local e implanta um cluster simples contendo apenas um nó. O Minikube está disponível para sistemas Linux, macOS e Windows. O Minikube CLI fornece operações básicas de inicialização para trabalhar com seu cluster, incluindo iniciar, parar, status e excluir. Para este tutorial, no entanto, você usará um terminal online fornecido com o Minikube pré-instalado.

    +

    Um cluster Kubernetes pode ser implantado em máquinas físicas ou virtuais. Para começar o desenvolvimento do Kubernetes, você pode usar o Minikube. O Minikube é uma implementação leve do Kubernetes que cria uma VM em sua máquina local e implanta um cluster simples contendo apenas um nó. O Minikube está disponível para sistemas Linux, macOS e Windows. A linha de comando (cli) do Minikube fornece operações básicas de inicialização para trabalhar com seu cluster, incluindo iniciar, parar, status e excluir. Para este tutorial, no entanto, você usará um terminal online fornecido com o Minikube pré-instalado.

    Agora que você sabe o que é Kubernetes, vamos para o tutorial online e iniciar nosso primeiro cluster!

    @@ -100,7 +100,7 @@

    Diagrama de Cluster

    diff --git a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/_index.md similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/deploy-app/_index.md rename to content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/_index.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html similarity index 88% rename from content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html rename to content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index cbc22b0d60950..96f73e9250cd6 100644 --- a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -37,7 +37,7 @@
    diff --git a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html similarity index 93% rename from content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html rename to content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index a4f60e374cb60..1d927cf038167 100644 --- a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -31,7 +31,7 @@

    Implantações do Kubernetes

    Assim que o seu cluster Kubernetes estiver em execução você pode implementar seu aplicativo em contêiners nele. Para fazer isso, você precisa criar uma configuração do tipo Deployment do Kubernetes. O Deployment define como criar e atualizar instâncias do seu aplicativo. Depois de criar um Deployment, o Master do Kubernetes - agenda as instâncias do aplicativo incluídas nesse Deployment para ser executado em nós individuais do CLuster. + agenda as instâncias do aplicativo incluídas nesse Deployment para ser executado em nós individuais do Cluster.

    Depois que as instâncias do aplicativo são criadas, um Controlador do Kubernetes Deployment monitora continuamente essas instâncias. @@ -93,7 +93,7 @@

    Implantar seu primeiro aplicativo no Kubernetes

    - Para sua primeira implantação, você usará um aplicativo Node.js empacotado em um contêiner Docker.(Se você ainda não tentou criar um aplicativo Node.js e implantá-lo usando um contêiner, você pode fazer isso primeiro seguindo as instruções do tutorial Hello Minikube). + Para sua primeira implantação, você usará um aplicativo Node.js empacotado em um contêiner Docker.(Se você ainda não tentou criar um aplicativo Node.js e implantá-lo usando um contêiner, você pode fazer isso primeiro seguindo as instruções do tutorial Olá, Minikube!).

    Agora que você sabe o que são implantações (Deployment), vamos para o tutorial online e implantar nosso primeiro aplicativo!

    @@ -103,7 +103,7 @@

    Implantar seu primeiro aplicativo no Kubernetes

    diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/explore/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/explore/_index.md new file mode 100644 index 0000000000000..c95e5366763d1 --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/explore/_index.md @@ -0,0 +1,4 @@ +--- +title: Explore seu aplicativo +weight: 30 +--- diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-interactive.html new file mode 100644 index 0000000000000..d4d93e7f7d917 --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-interactive.html @@ -0,0 +1,41 @@ +--- +title: Tutorial Interativo - Explorando seu aplicativo +weight: 20 +--- + + + + + + + + + + + +
    + +
    + +
    +
    + +
    + Para interagir com o Terminal, por favor, use a versão para desktop ou table. +
    + +
    +
    +
    + + +
    + +
    + + + diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-intro.html new file mode 100644 index 0000000000000..c9720995dc13c --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -0,0 +1,143 @@ +--- +title: Visualizando Pods e Nós (Nodes) +weight: 10 +--- + + + + + + + + + + +
    + +
    + +
    + +
    +

    Objetivos

    +
      +
    • Aprenda sobre Pods do Kubernetes.
    • +
    • Aprenda sobre Nós do Kubernetes.
    • +
    • Solucionar problemas de aplicativos implantados no Kubernetes.
    • +
    +
    + +
    +

    Kubernetes Pods

    +

    Quando você criou um Deployment no Módulo 2, o Kubernetes criou um Pod para hospedar a instância do seu aplicativo. Um Pod é uma abstração do Kubernetes que representa um grupo de um ou mais contêineres de aplicativos (como Docker) e alguns recursos compartilhados para esses contêineres. Esses recursos incluem:

    +
      +
    • Armazenamento compartilhado, como Volumes
    • +
    • Rede, como um endereço IP único no cluster
    • +
    • Informações sobre como executar cada contêiner, como a versão da imagem do contêiner ou portas específicas a serem usadas
    • +
    +

    Um Pod define um "host lógico" específico para o aplicativo e pode conter diferentes contêineres que, na maioria dos casos, são fortemente acoplados. Por exemplo, um Pod pode incluir o contêiner com seu aplicativo Node.js, bem como um outro contêiner que alimenta os dados a serem publicados pelo servidor web Node.js. Os contêineres de um Pod compartilham um endereço IP e intervalo de portas; são sempre localizados, programados e executam em um contexto compartilhado no mesmo Nó.

    + +

    Pods são a unidade atômica na plataforma Kubernetes. Quando criamos um Deployment no Kubernetes, esse Deployment cria Pods com contêineres dentro dele (em vez de você criar contêineres diretamente). Cada Pod está vinculado ao nó onde está programado (scheduled) e lá permanece até o encerramento (de acordo com a política de reinicialização) ou exclusão. Em caso de falha do nó, Pods idênticos são programados em outros nós disponíveis no cluster.

    + +
    +
    +
    +

    Sumário:

    +
      +
    • Pods
    • +
    • Nós (Nodes)
    • +
    • Principais comandos do Kubectl
    • +
    +
    +
    +

    + Um Pod é um grupo de um ou mais contêineres de aplicativos (como Docker) que inclui armazenamento compartilhado (volumes), endereço IP e informações sobre como executá-los. +

    +
    +
    +
    +
    + +
    +
    +

    Visão geral sobre os Pods

    +
    +
    + +
    +
    +

    +
    +
    +
    + +
    +
    +

    Nós (Nodes)

    +

    Um Pod sempre será executando em um . Um Nó é uma máquina de processamento em um cluster Kubernetes e pode ser uma máquina física ou virtual. Cada Nó é gerenciado pelo Control Plane. Um Nó pode possuir múltiplos Pods e o Control Plane do Kubernetes gerencia automaticamente o agendamento dos Pods nos nós do cluster. Para o agendamento automático dos Pods, o Control Plane leva em consideração os recursos disponíveis em cada Nó.

    + +

    Cada Nó do Kubernetes executa pelo menos:

    +
      +
    • Kubelet, o processo responsável pela comunicação entre o Control Plane e o Nó; gerencia os Pods e os contêineres rodando em uma máquina.
    • +
    • Um runtime de contêiner (por exemplo o Docker) é responsável por baixar a imagem do contêiner de um registro de imagens (por exemplo o Docker Hub), extrair o contêiner e executar a aplicação.
    • +
    + +
    +
    +
    +

    Os contêineres só devem ser agendados juntos em um único Pod se estiverem fortemente acoplados e precisarem compartilhar recursos, como disco e IP.

    +
    +
    +
    + +
    + +
    +
    +

    Visão Geral sobre os Nós

    +
    +
    + +
    +
    +

    +
    +
    +
    + +
    +
    +

    Solucionar problemas usando o comando kubectl

    +

    No Módulo 2, você usou o comando Kubectl. Você pode continuar utilizando o Kubectl no Módulo 3 para obter informação sobre Deployment realizado e seus recursos. As operações mais comuns podem ser realizadas com os comandos abaixo:

    +
      +
    • kubectl get - listar recursos
    • +
    • kubectl describe - mostrar informações detalhadas sobre um recurso
    • +
    • kubectl logs - mostrar os logs de um container em um Pod
    • +
    • kubectl exec - executar um comando em um contêiner em um Pod
    • +
    + +

    Você pode usar esses comandos para verificar quando o Deployment foi realizado, qual seu status atual, ondes os Pods estão rodando e qual são as suas configurações.

    + +

    Agora que sabemos mais sobre os componentes de um cluster Kubernetes e o comando kubectl, vamos explorar a nossa aplicação.

    + +
    +
    +
    +

    Um nó é uma máquina operária do Kubernetes e pode ser uma VM ou máquina física, dependendo do cluster. Vários Pods podem ser executados em um nó.

    +
    +
    +
    +
    + + + +
    + +
    + + + diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/expose/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/expose/_index.md new file mode 100644 index 0000000000000..c8f0d50a0ec34 --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/expose/_index.md @@ -0,0 +1,4 @@ +--- +title: Exponha publicamente seu aplicativo +weight: 40 +--- diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-interactive.html new file mode 100644 index 0000000000000..cf24ae985ee8b --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-interactive.html @@ -0,0 +1,38 @@ +--- +title: Tutorial Interativo - Expondo seu aplicativo +weight: 20 +--- + + + + + + + + + + + +
    + +
    + +
    +
    + Para interagir com o terminal, favor utilizar a versão desktop/tablet +
    +
    +
    +
    + + +
    + +
    + + + diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-intro.html new file mode 100644 index 0000000000000..4e66601116119 --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -0,0 +1,103 @@ +--- +title: Utilizando um serviço para expor seu aplicativo +weight: 10 +--- + + + + + + + + + +
    + +
    + +
    +
    +

    Objetivos

    +
      +
    • Aprenda sobre um Serviço no Kubernetes
    • +
    • Entenda como os objetos labels e LabelSelector se relacionam a um Serviço
    • +
    • Exponha uma aplicação externamente ao cluster Kubernetes usando um Serviço
    • +
    +
    + +
    +

    Visão Geral de Serviços Kubernetes

    + +

    Pods Kubernetes são efêmeros. Na verdade, Pods possuem um ciclo de vida. Quando um nó de processamento morre, os Pods executados no nó também são perdidos. A partir disso, o ReplicaSet pode dinamicamente retornar o cluster ao estado desejado através da criação de novos Pods para manter sua aplicação em execução. Como outro exemplo, considere um backend de processamento de imagens com 3 réplicas. Estas réplicas são intercambiáveis; o sistema front-end não deveria se importar com as réplicas backend ou ainda se um Pod é perdido ou recriado. Dito isso, cada Pod em um cluster Kubernetes tem um único endereço IP, mesmo Pods no mesmo nó, então há necessidade de ter uma forma de reconciliar automaticamente mudanças entre Pods de modo que sua aplicação continue funcionando.

    + +

    Um serviço no Kubernetes é uma abstração que define um conjunto lógico de Pods e uma política pela qual acessá-los. Serviços permitem um baixo acoplamento entre os Pods dependentes. Um serviço é definido usando YAML (preferencialmente) ou JSON, como todos objetos Kubernetes. O conjunto de Pods selecionados por um Serviço é geralmente determinado por um seletor de rótulos LabelSelector (veja abaixo o motivo pelo qual você pode querer um Serviço sem incluir um seletor selector na especificação spec).

    + +

    Embora cada Pod tenha um endereço IP único, estes IPs não são expostos externamente ao cluster sem um Serviço. Serviços permitem que suas aplicações recebam tráfego. Serviços podem ser expostos de formas diferentes especificando um tipo type na especificação do serviço ServiceSpec:

    +
      +
    • ClusterIP (padrão) - Expõe o serviço sob um endereço IP interno no cluster. Este tipo faz do serviço somente alcançável de dentro do cluster.
    • +
    • NodePort - Expõe o serviço sob a mesma porta em cada nó selecionado no cluster usando NAT. Faz o serviço acessível externamente ao cluster usando <NodeIP>:<NodePort>. Superconjunto de ClusterIP.
    • +
    • LoadBalancer - Cria um balanceador de carga externo no provedor de nuvem atual (se suportado) e assinala um endereço IP fixo e externo para o serviço. Superconjunto de NodePort.
    • +
    • ExternalName - Expõe o serviço usando um nome arbitrário (especificado através de externalName na especificação spec) retornando um registro de CNAME com o nome. Nenhum proxy é utilizado. Este tipo requer v1.7 ou mais recente de kube-dns.
    • +
    +

    Mais informações sobre diferentes tipos de Serviços podem ser encontradas no tutorial Utilizando IP de origem. Também confira Conectando aplicações com serviços.

    +

    Adicionalmente, note que existem alguns casos de uso com serviços que envolvem a não definição de selector em spec. Serviços criados sem selector também não criarão objetos Endpoints correspondentes. Isto permite usuários mapear manualmente um serviço a endpoints específicos. Outra possibilidade na qual pode não haver seletores é ao se utilizar estritamente type: ExternalName.

    +
    +
    +
    +

    Resumo

    +
      +
    • Expõe Pods ao tráfego externo
    • +
    • Tráfego de balanceamento de carga entre múltiplos Pods
    • +
    • Uso de rótulos labels
    • +
    +
    +
    +

    Um serviço Kubernetes é uma camada de abstração que define um conjunto lógico de Pods e habilita a exposição ao tráfego externo, balanceamento de carga e descoberta de serviço para esses Pods.

    +
    +
    +
    +
    + +
    +
    +

    Serviços e Rótulos

    +
    +
    + +
    +
    +

    Um serviço roteia tráfego entre um conjunto de Pods. Serviço é a abstração que permite pods morrerem e se replicarem no Kubernetes sem impactar sua aplicação. A descoberta e o roteamento entre Pods dependentes (tal como componentes frontend e backend dentro de uma aplicação) são controlados por serviços Kubernetes.

    +

    Serviços relacionam um conjunto de Pods usando Rótulos e seletores, um agrupamento primitivo que permite operações lógicas sobre objetos Kubernetes. Rótulos são pares de chave/valor anexados à objetos e podem ser usados de inúmeras formas:

    +
      +
    • Designar objetos para desenvolvimento, teste e produção
    • +
    • Adicionar tags de versão
    • +
    • Classificar um objeto usando tags
    • +
    +
    + +
    + +
    + +
    +
    +

    +
    +
    +
    +
    +
    +

    Rótulos podem ser anexados à objetos no momento de sua criação ou posteriormente. Eles podem ser modificados a qualquer tempo. Vamos agora expor sua aplicação usando um serviço e aplicar alguns rótulos.

    +
    +
    +
    + +
    +
    + + + diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/scale/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/scale/_index.md new file mode 100644 index 0000000000000..9e6d5b418e236 --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/scale/_index.md @@ -0,0 +1,4 @@ +--- +title: Escale seu aplicativo +weight: 50 +--- diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-interactive.html new file mode 100644 index 0000000000000..a4ce38ded1e71 --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-interactive.html @@ -0,0 +1,40 @@ +--- +title: Tutorial Interativo - Escalando seu aplicativo +weight: 20 +--- + + + + + + + + + + + +
    + +
    + +
    +
    + Para interagir com o terminal, favor utilizar a versão desktop/tablet +
    +
    +
    +
    + + +
    + + + +
    + + + diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html new file mode 100644 index 0000000000000..351f4e01fef59 --- /dev/null +++ b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -0,0 +1,121 @@ +--- +title: Executando múltiplas instâncias de seu aplicativo +weight: 10 +--- + + + + + + + + + +
    + +
    + +
    + +
    +

    Objetivos

    +
      +
    • Escalar uma aplicação usando kubectl.
    • +
    +
    + +
    +

    Escalando uma aplicação

    + +

    Nos módulos anteriores nós criamos um Deployment, e então o expusemos publicamente através de um serviço (Service). O Deployment criou apenas um único Pod para executar nossa aplicação. Quando o tráfego aumentar nós precisaremos escalar a aplicação para suportar a demanda de usuários.

    + +

    O escalonamento é obtido pela mudança do número de réplicas em um Deployment

    + +
    +
    +
    +

    Resumo:

    +
      +
    • Escalando um Deployment
    • +
    +
    +
    +

    Você pode criar desde o início um Deployment com múltiplas instâncias usando o parâmetro --replicas para que o kubectl crie o comando de deployment

    +
    +
    +
    +
    + +
    +
    +

    Visão geral sobre escalonamento

    +
    +
    + +
    +
    +
    + +
    +
    + +
    + +
    +
    + +

    Escalar um Deployment garantirá que novos Pods serão criados e agendados para nós de processamento com recursos disponíveis. O escalonamento aumentará o número de Pods para o novo estado desejado. O Kubernetes também suporta o auto-escalonamento (autoscaling) de Pods, mas isso está fora do escopo deste tutorial. Escalar para zero também é possível, e isso terminará todos os Pods do Deployment especificado.

    + +

    Executar múltiplas instâncias de uma aplicação irá requerer uma forma de distribuir o tráfego entre todas elas. Serviços possuem um balanceador de carga integrado que distribuirá o tráfego de rede entre todos os Pods de um Deployment exposto. Serviços irão monitorar continuamente os Pods em execução usando endpoints para garantir que o tráfego seja enviado apenas para Pods disponíveis.

    + +
    +
    +
    +

    O Escalonamento é obtido pela mudança do número de réplicas em um Deployment.

    +
    +
    +
    + +
    + +
    +
    +

    No momento em que tiver múltiplas instâncias de uma aplicação em execução, será capaz de fazer atualizações graduais sem indisponibilidade. Nós cobriremos isso no próximo módulo. Agora, vamos ao terminal online e escalar nossa aplicação.

    +
    +
    +
    + + + +
    + +
    + + + diff --git a/content/pt/examples/admin/logging/fluentd-sidecar-config.yaml b/content/pt-br/examples/admin/logging/fluentd-sidecar-config.yaml similarity index 100% rename from content/pt/examples/admin/logging/fluentd-sidecar-config.yaml rename to content/pt-br/examples/admin/logging/fluentd-sidecar-config.yaml diff --git a/content/pt/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml b/content/pt-br/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml similarity index 100% rename from content/pt/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml rename to content/pt-br/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml diff --git a/content/pt/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml b/content/pt-br/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml similarity index 100% rename from content/pt/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml rename to content/pt-br/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml diff --git a/content/pt/examples/admin/logging/two-files-counter-pod.yaml b/content/pt-br/examples/admin/logging/two-files-counter-pod.yaml similarity index 100% rename from content/pt/examples/admin/logging/two-files-counter-pod.yaml rename to content/pt-br/examples/admin/logging/two-files-counter-pod.yaml diff --git a/content/pt/examples/debug/counter-pod.yaml b/content/pt-br/examples/debug/counter-pod.yaml similarity index 100% rename from content/pt/examples/debug/counter-pod.yaml rename to content/pt-br/examples/debug/counter-pod.yaml diff --git a/content/pt/includes/index.md b/content/pt-br/includes/index.md similarity index 100% rename from content/pt/includes/index.md rename to content/pt-br/includes/index.md diff --git a/content/pt/partners/_index.html b/content/pt-br/partners/_index.html similarity index 100% rename from content/pt/partners/_index.html rename to content/pt-br/partners/_index.html diff --git a/content/pt/docs/concepts/cluster-administration/_index.md b/content/pt/docs/concepts/cluster-administration/_index.md deleted file mode 100755 index 75c4425176ad2..0000000000000 --- a/content/pt/docs/concepts/cluster-administration/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Administração de Cluster" -weight: 100 ---- - diff --git a/content/pt/docs/concepts/scheduling/_index.md b/content/pt/docs/concepts/scheduling/_index.md deleted file mode 100644 index 577dbb8c8789a..0000000000000 --- a/content/pt/docs/concepts/scheduling/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Escalonamento" -weight: 90 ---- - diff --git a/content/pt/docs/reference/glossary/control-plane.md b/content/pt/docs/reference/glossary/control-plane.md deleted file mode 100644 index 0465d5a2b8805..0000000000000 --- a/content/pt/docs/reference/glossary/control-plane.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Ambiente de gerenciamento -id: control-plane -date: 2020-04-19 -full_link: -short_description: > - A camada de orquestração de contêiner que expõe a API e as interfaces para definir, implantar e gerenciar o ciclo de vida dos contêineres. - -aka: -tags: -- fundamental ---- - A camada de orquestração de contêiner que expõe a API e as interfaces para definir, implantar e gerenciar o ciclo de vida dos contêineres. diff --git a/content/pt/docs/templates/index.md b/content/pt/docs/templates/index.md deleted file mode 100644 index 9d7bccd143f5f..0000000000000 --- a/content/pt/docs/templates/index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -headless: true - -resources: -- src: "*alpha*" - title: "alpha" -- src: "*beta*" - title: "beta" -- src: "*deprecated*" - title: "deprecated" -- src: "*stable*" - title: "stable" ---- diff --git a/content/ru/docs/contribute/intermediate.md b/content/ru/docs/contribute/intermediate.md index ba3b06511f9d0..303a0877e5f75 100644 --- a/content/ru/docs/contribute/intermediate.md +++ b/content/ru/docs/contribute/intermediate.md @@ -217,8 +217,8 @@ PR объединяется, когда у него есть комментар ```bash origin git@github.com:/website.git (fetch) origin git@github.com:/website.git (push) - upstream https://github.com/kubernetes/website (fetch) - upstream https://github.com/kubernetes/website (push) + upstream https://github.com/kubernetes/website.git (fetch) + upstream https://github.com/kubernetes/website.git (push) ``` ### Работа в локальном репозитории diff --git a/content/ru/docs/reference/kubectl/cheatsheet.md b/content/ru/docs/reference/kubectl/cheatsheet.md index d2be7e9c0c1a0..02a8a9bc4af1f 100644 --- a/content/ru/docs/reference/kubectl/cheatsheet.md +++ b/content/ru/docs/reference/kubectl/cheatsheet.md @@ -186,6 +186,9 @@ kubectl get pods --show-labels JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \ && kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True" +# Вывод декодированных секретов без внешних инструментов +kubectl get secret my-secret -o go-template='{{range $k,$v := .data}}{{"### "}}{{$k}}{{"\n"}}{{$v|base64decode}}{{"\n\n"}}{{end}}' + # Вывести все секреты, используемые сейчас в поде. kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq diff --git a/content/ru/docs/reference/kubectl/docker-cli-to-kubectl.md b/content/ru/docs/reference/kubectl/docker-cli-to-kubectl.md index 99a9e20d8c86c..4d834098c6c05 100644 --- a/content/ru/docs/reference/kubectl/docker-cli-to-kubectl.md +++ b/content/ru/docs/reference/kubectl/docker-cli-to-kubectl.md @@ -34,12 +34,20 @@ kubectl: ```shell # запустить под, в котором работает nginx -kubectl run --image=nginx nginx-app --port=80 --env="DOMAIN=cluster" +kubectl create deployment --image=nginx nginx-app ``` ``` deployment "nginx-app" created ``` +```shell +# add env to nginx-app +kubectl set env deployment/nginx-app DOMAIN=cluster +``` +``` +deployment.apps/nginx-app env updated +``` + {{< note >}} Команды `kubectl` выводят тип и имя созданного или измененного ресурса, который затем может быть использован в последующих командах. После создания объекта Deployment можно открыть новый сервис Service. {{< /note >}} @@ -260,7 +268,7 @@ nginx-app 1/1 1 1 2m ``` ```shell -kubectl get po -l run=nginx-app +kubectl get po -l app=nginx-app ``` ``` NAME READY STATUS RESTARTS AGE @@ -274,7 +282,7 @@ deployment "nginx-app" deleted ``` ```shell -kubectl get po -l run=nginx-app +kubectl get po -l app=nginx-app # Return nothing ``` diff --git a/content/vi/community/static/cncf-code-of-conduct.md b/content/vi/community/static/cncf-code-of-conduct.md index 9d7008e902c7b..12c5472142bde 100644 --- a/content/vi/community/static/cncf-code-of-conduct.md +++ b/content/vi/community/static/cncf-code-of-conduct.md @@ -23,8 +23,8 @@ Quy tắc ứng xử này áp dụng cả trong không gian dự án và trong k Các trường hợp lạm dụng, quấy rối hoặc hành vi không thể chấp nhận được trong Kubernetes có thể được báo cáo bằng cách liên hệ với [Ủy ban Quy tắc ứng xử Kubernetes](https://git.k8s.io/community/committee-code-of-conduct) thông qua . Đối với các dự án khác, vui lòng liên hệ với người bảo trì dự án CNCF hoặc hòa giải viên của chúng tôi, Mishi Choudhary . -Quy tắc ứng xử này được điều chỉnh từ Giao ước cộng tác viên (http://contributor-covenant.org), phiên bản 1.2.0, có sẵn tại -http://contributor-covenant.org/version/1/2/0/ +Quy tắc ứng xử này được điều chỉnh từ Giao ước cộng tác viên (https://contributor-covenant.org), phiên bản 1.2.0, có sẵn tại +https://contributor-covenant.org/version/1/2/0/ ### Quy tắc ứng xử sự kiện CNCF diff --git a/content/vi/docs/reference/glossary/api-group.md b/content/vi/docs/reference/glossary/api-group.md index ce2cfd45327b0..66c215e0a67fd 100644 --- a/content/vi/docs/reference/glossary/api-group.md +++ b/content/vi/docs/reference/glossary/api-group.md @@ -2,7 +2,7 @@ title: API Group id: api-group date: 2019-12-16 -full_link: /docs/concepts/overview/kubernetes-api/#api-groups +full_link: /docs/concepts/overview/kubernetes-api/#api-groups-and-versioning short_description: > Một tập những đường dẫn tương đối đến Kubernetes API. @@ -18,4 +18,4 @@ Một tập những đường dẫn tương đối đến Kubernetes API. Bạn có thể cho phép hay vô hiệu từng API group bằng cách thay đổi cấu hình trên API server của mình. Đồng thời bạn cũng có thể vô hiệu hay kích hoạt các đường dẫn cho những tài nguyên cụ thể. API group đơn giản hóa việc mở rộng Kubernetes API. Nó được chỉ định dưới dạng REST và trong trường `apiVersion` của một đối tượng đã được chuyển hóa. -- Đọc thêm về [API Group](/docs/concepts/overview/kubernetes-api/#api-groups). \ No newline at end of file +- Đọc thêm về [API Group](/docs/concepts/overview/kubernetes-api/#api-groups-and-versioning). \ No newline at end of file diff --git a/content/vi/docs/reference/kubectl/cheatsheet.md b/content/vi/docs/reference/kubectl/cheatsheet.md index 38e51750e63f4..079b36a42b6a9 100644 --- a/content/vi/docs/reference/kubectl/cheatsheet.md +++ b/content/vi/docs/reference/kubectl/cheatsheet.md @@ -1,381 +1,381 @@ ---- -title: kubectl Cheat Sheet -reviewers: -- ngtuna -content_type: concept -card: - name: reference - weight: 30 ---- - - - -Xem thêm: [Kubectl Overview](/docs/reference/kubectl/overview/) và [JsonPath Guide](/docs/reference/kubectl/jsonpath). - -Trang này là trang tổng quan của lệnh `kubectl`. - - - - - -# kubectl - Cheat Sheet - -## Kubectl Autocomplete - -### BASH - -```bash -source <(kubectl completion bash) # thiết lập autocomplete trong bash vào shell hiện tại, gói bash-completion nên được cài đặt trước tiên -echo "source <(kubectl completion bash)" >> ~/.bashrc # thêm vĩnh viễn autocomplete vào trong bash shell -``` - -Bạn có thể dùng một alias cho `kubectl` cũng hoạt động với completion: - -```bash -alias k=kubectl -complete -F __start_kubectl k -``` - -### ZSH - -```bash -source <(kubectl completion zsh) # thiết lập autocomplete trong zsh vào shell hiện tại -echo "if [ $commands[kubectl] ]; then source <(kubectl completion zsh); fi" >> ~/.zshrc # thêm vĩnh viễn autocomplete vào trong zsh shell -``` - -## Ngữ cảnh và cấu hình kubectl - -Thiết lập cụm Kubernetes nào mà `kubectl` sẽ giao tiếp với và sửa đổi thông tin cấu hình. -Xem tài liệu [Xác thực giữa các cụm với kubeconfig](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) -để biết thông tin chi tiết của tệp cấu hình. - -```bash -kubectl config view # Hiển thị các thiết lập kubeconfig đã được merged - -# sử dụng nhiều tệp kubeconfig cùng một lúc và xem cấu hình hợp nhất -KUBECONFIG=~/.kube/config:~/.kube/kubconfig2 - -kubectl config view - -# lấy mật khẩu cho người dùng e2e -kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' - -kubectl config view -o jsonpath='{.users[].name}' # hiển thị người dùng đầu tiên -kubectl config view -o jsonpath='{.users[*].name}' # lấy danh sách người dùng -kubectl config get-contexts # hiển thị danh sách các ngữ cảnh -kubectl config current-context # hiển thị ngữ cảnh hiện tại -kubectl config use-context my-cluster-name # thiết lập ngữ cảnh mặc định cho my-cluster-name - -# thêm một cụm mới vào kubeconf hỗ trợ xác thực cơ bản -kubectl config set-credentials kubeuser/foo.kubernetes.com --username=kubeuser --password=kubepassword - -# lưu vĩnh viễn namespace cho tất cả các lệnh kubectl tiếp theo trong ngữ cảnh đó -kubectl config set-context --current --namespace=ggckad-s2 - -# thiết lập ngữ cảnh sử dụng tên người dùng và namespace cụ thể -kubectl config set-context gce --user=cluster-admin --namespace=foo \ - && kubectl config use-context gce - -kubectl config unset users.foo # xóa người dùng foo -``` - -## Apply -`apply` quản lý các ứng dụng thông qua các tệp định nghĩa tài nguyên Kubernetes. Nó tạo và cập nhật các tài nguyên trong một cụm thông qua việc chạy `kubectl apply`. Đây là cách được đề xuất để quản lý các ứng dụng Kubernetes trong thực tế. Xem thêm [Kubectl Book](https://kubectl.docs.kubernetes.io). - -## Tạo một đối tượng - -Kubernetes manifests có thể được định nghĩa trong tệp json hoặc yaml. Phần mở rộng `.yaml`, -`.yml`, và `.json` có thể được dùng. - -```bash -kubectl apply -f ./my-manifest.yaml # tạo tài nguyên -kubectl apply -f ./my1.yaml -f ./my2.yaml # tạo từ nhiều tệp -kubectl apply -f ./dir # tạo tài nguyên từ tất cả các tệp manifest trong thư mục dir -kubectl apply -f https://git.io/vPieo # tạo tài nguyên từ url -kubectl create deployment nginx --image=nginx # tạo một deployment nginx -kubectl explain pods,svc # lấy thông tin pod và service manifest - -# Tạo nhiều đối tượng YAML từ stdin -cat < pod.yaml - -kubectl attach my-pod -i # Đính kèm với container đang chạy -kubectl port-forward my-pod 5000:6000 # Lắng nghe trên cổng 5000 của máy local và chuyển tiếp sang cổng 6000 trên pod my-pod -kubectl exec my-pod -- ls / # Chạy lệnh trong một pod (trường hợp 1 container) -kubectl exec my-pod -c my-container -- ls / # Chạy lệnh trong pod (trường hợp nhiều container) -kubectl top pod POD_NAME --containers # Hiển thị số liệu của pod và container chạy trong nó -``` - -## Tương tác với các nodes và cụm - -```bash -kubectl cordon my-node # Đánh dấu my-node là không thể lập lịch -kubectl drain my-node # Gỡ my-node ra khỏi cụm để chuẩn bị cho việc bảo trì -kubectl uncordon my-node # Đánh dấu my-node có thể lập lịch trở lại -kubectl top node my-node # Hiển thị số liệu của node -kubectl cluster-info # Hiển thị địa chỉ master và các services -kubectl cluster-info dump # Kết xuất trạng thái hiện tại của cụm ra ngoài stdout -kubectl cluster-info dump --output-directory=/path/to/cluster-state # Kết xuất trạng thái hiện tại của cụm vào /path/to/cluster-state - -kubectl taint nodes foo dedicated=special-user:NoSchedule -``` - -### Các loại tài nguyên - -Liệt kê tất cả các loại tài nguyên được hỗ trợ cùng với tên viết tắt của chúng, [API group](/docs/concepts/overview/kubernetes-api/#api-groups), cho dù chúng là [namespaced](/docs/concepts/overview/working-with-objects/namespaces), và [Kind](/docs/concepts/overview/working-with-objects/kubernetes-objects): - -```bash -kubectl api-resources -``` - -Các hoạt động khác để khám phá các tài nguyên API: - -```bash -kubectl api-resources --namespaced=true # Tất cả các tài nguyên được đặt tên -kubectl api-resources --namespaced=false # Tất cả các tài nguyên không được đặt tên -kubectl api-resources -o name # Tất cả các tài nguyên với đầu ra đơn giản (chỉ gồm tên tài nguyên) -kubectl api-resources -o wide # Tất cả các tài nguyên với đầu ra mở rộng -kubectl api-resources --verbs=list,get # Tất cả các tài nguyên hỗ trợ yêu cầu "list" và "get" -kubectl api-resources --api-group=extensions # Tất cả tài nguyên trong nhóm API "tiện ích mở rộng" -``` - -### Định dạng đầu ra - -Để xuất thông tin chi tiết ra cửa sổ terminal của bạn theo một định dạng cụ thể, bạn có thể thêm các cờ `-o` hoặc `--output` vào lệnh `kubectl` được hỗ trợ. - -Định dạng đầu ra | Mô tả ---------------| ----------- -`-o=custom-columns=` | In một bảng bằng danh sách, các cột tùy chỉnh được phân tách bằng dấu phẩy -`-o=custom-columns-file=` | In một bảng bằng cách sử dụng mẫu cột tùy chỉnh trong tệp `` -`-o=json` | Xuất ra một đối tượng API theo định dạng JSON -`-o=jsonpath=