diff --git a/.github/workflows/4testing_repo.yaml b/.github/workflows/4testing_repo.yaml index 218e98d..53571c9 100644 --- a/.github/workflows/4testing_repo.yaml +++ b/.github/workflows/4testing_repo.yaml @@ -14,7 +14,7 @@ jobs: - name: Set chart version run: | wget https://download.onlyoffice.com/charts/4testing/index.yaml -P /tmp - LATEST_VERSION=$(awk '/docs-shards:/{f=1};f{print}' /tmp/index.yaml | awk '/version:/ {print $2;}' | head -1) + LATEST_VERSION=$(awk '/docs-shards:/{f=1};f{print}' /tmp/index.yaml | awk '/^ version:/ {print $2;}' | head -1) NEW_VERSION=$(awk '/version:/ {print $2;}' Chart.yaml | head -1) if [[ "$LATEST_VERSION" == *"$NEW_VERSION"* ]]; then RC=${LATEST_VERSION: -1} diff --git a/CHANGELOG.md b/CHANGELOG.md index edeb5b2..63b60b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 2.0.0 + +### New Features + +* Added request routing based on Docs version during updates +* Added a signal control processing handler as the first process in Docs containers + +### Changes + +* Requests load balancing mechanism has been redesigned +* Pod remains in `Terminating` status while documents are being edited on it until `terminationGracePeriodSeconds` expires +* Released ONLYOFFICE Docs [v8.2.0](https://github.com/ONLYOFFICE/DocumentServer/blob/master/CHANGELOG.md#820) + ## 1.0.1 ### Changes diff --git a/Chart.yaml b/Chart.yaml index 8dbbccd..fe3846c 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -4,12 +4,6 @@ description: Helm chart for installing ONLYOFFICE Docs Shards in Kubernetes type: application -version: 1.0.1 +version: 2.0.0 -appVersion: 8.1.3 - -dependencies: -- name: ingress-nginx - version: 4.11.1 - repository: https://kubernetes.github.io/ingress-nginx - condition: ingress-nginx.enabled +appVersion: 8.2.0 diff --git a/LICENSES/LICENSE-APACHE2.0 b/LICENSES/LICENSE-APACHE2.0 deleted file mode 100644 index 657d417..0000000 --- a/LICENSES/LICENSE-APACHE2.0 +++ /dev/null @@ -1,193 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at diff --git a/README.md b/README.md index 4be1f40..43baccc 100644 --- a/README.md +++ b/README.md @@ -7,20 +7,18 @@ ONLYOFFICE Docs for Kubernetes * [1. Add Helm repositories](#1-add-helm-repositories) * [2. Install Persistent Storage](#2-install-persistent-storage) * [3. Deploy Redis](#3-deploy-redis) - * [4. Configure dependent charts](#4-configure-dependent-charts) - + [4.1 Configure ingress-nginx/kubernetes subchart](#41-configure-ingress-nginxkubernetes-subchart) - * [5. Deploy StatsD exporter](#5-deploy-statsd-exporter) - + [5.1 Add Helm repositories](#51-add-helm-repositories) - + [5.2 Installing Prometheus](#52-installing-prometheus) - + [5.3 Installing StatsD exporter](#53-installing-statsd-exporter) - * [6. Make changes to Node-config configuration files](#6-make-changes-to-Node-config-configuration-files) - + [6.1 Create a ConfigMap containing a json file](#61-create-a-configmap-containing-a-json-file) - + [6.2 Specify parameters when installing ONLYOFFICE Docs](#62-specify-parameters-when-installing-onlyoffice-docs) - * [7. Add custom Fonts](#7-add-custom-fonts) - * [8. Add Plugins](#8-add-plugins) - * [9. Change interface themes](#9-change-interface-themes) - + [9.1 Create a ConfigMap containing a json file](#91-create-a-configmap-containing-a-json-file) - + [9.2 Specify parameters when installing ONLYOFFICE Docs](#92-specify-parameters-when-installing-onlyoffice-docs) + * [4. Deploy StatsD exporter](#4-deploy-statsd-exporter) + + [4.1 Add Helm repositories](#41-add-helm-repositories) + + [4.2 Installing Prometheus](#42-installing-prometheus) + + [4.3 Installing StatsD exporter](#43-installing-statsd-exporter) + * [5. Make changes to Node-config configuration files](#5-make-changes-to-Node-config-configuration-files) + + [5.1 Create a ConfigMap containing a json file](#51-create-a-configmap-containing-a-json-file) + + [5.2 Specify parameters when installing ONLYOFFICE Docs](#52-specify-parameters-when-installing-onlyoffice-docs) + * [6. Add custom Fonts](#6-add-custom-fonts) + * [7. Add Plugins](#7-add-plugins) + * [8. Change interface themes](#8-change-interface-themes) + + [8.1 Create a ConfigMap containing a json file](#81-create-a-configmap-containing-a-json-file) + + [8.2 Specify parameters when installing ONLYOFFICE Docs](#82-specify-parameters-when-installing-onlyoffice-docs) - [Deploy ONLYOFFICE Docs](#deploy-onlyoffice-docs) * [1. Deploy the ONLYOFFICE Docs license](#1-deploy-the-onlyoffice-docs-license) + [1.1 Create secret](#11-create-secret) @@ -31,7 +29,12 @@ ONLYOFFICE Docs for Kubernetes * [5. Configuration and installation details](#5-configuration-and-installation-details) * [5.1 Example deployment (optional)](#51-example-deployment-optional) * [5.2 Metrics deployment (optional)](#52-metrics-deployment-optional) - * [5.3 Expose ONLYOFFICE Docs via HTTPS](#53-expose-onlyoffice-docs-via-https) + * [5.3 Expose ONLYOFFICE Docs](#53-expose-onlyoffice-docs) + + [5.3.1 Expose ONLYOFFICE Docs via Service (HTTP Only)](#531-expose-onlyoffice-docs-via-service-http-only) + + [5.3.2 Expose ONLYOFFICE Docs via Ingress](#532-expose-onlyoffice-docs-via-ingress) + + [5.3.2.1 Installing the Kubernetes Nginx Ingress Controller](#5321-installing-the-kubernetes-nginx-ingress-controller) + + [5.3.2.2 Expose ONLYOFFICE Docs via HTTP](#5322-expose-onlyoffice-docs-via-http) + + [5.3.2.3 Expose ONLYOFFICE Docs via HTTPS](#5323-expose-onlyoffice-docs-via-https) * [6. Scale ONLYOFFICE Docs (optional)](#6-scale-onlyoffice-docs-optional) + [6.1 Horizontal Pod Autoscaling](#61-horizontal-pod-autoscaling) + [6.2 Manual scaling](#62-manual-scaling) @@ -39,8 +42,6 @@ ONLYOFFICE Docs for Kubernetes * [8. Update ONLYOFFICE Docs license (optional)](#8-update-onlyoffice-docs-license-optional) * [9. ONLYOFFICE Docs installation test (optional)](#9-onlyoffice-docs-installation-test-optional) * [10. Access to the info page (optional)](#10-access-to-the-info-page-optional) - * [11. Deploy ONLYOFFICE Docs with your own dependency (optional)](#11-deploy-onlyoffice-docs-with-your-own-dependency-optional) - * [11.1 Use your own nginx-ingress controller](#111-use-your-own-nginx-ingress-controller) - [Using Grafana to visualize metrics (optional)](#using-grafana-to-visualize-metrics-optional) * [1. Deploy Grafana](#1-deploy-grafana) + [1.1 Deploy Grafana without installing ready-made dashboards](#11-deploy-grafana-without-installing-ready-made-dashboards) @@ -117,41 +118,11 @@ Note: Set the `metrics.enabled=true` to enable exposing Redis metrics to be gath See more details about installing Redis via Helm [here](https://github.com/bitnami/charts/tree/main/bitnami/redis). -### 4. Configure dependent charts +### 4. Deploy StatsD exporter -ONLYOFFICE Docs use ingress-nginx by kubernetes as dependencies chart. Bundle nginx-ingress+Redis is used to implement balancing in sharded mode. You can manage the configuration of dependent chart, or disable it to use your own nginx-ingress controller. +*This step is optional. You can skip step [#4](#4-deploy-statsd-exporter) entirely if you don't want to run StatsD exporter* -If you want to manage the configuration of ingress-nginx controller dependent chart, please check section [#4.1](#41-configure-ingress-nginxkubernetes-subchart) - -(Optional) Also, you can use your own ingress-nginx controller, for more information please refer to step [#11](#11-deploy-onlyoffice-docs-with-your-own-dependency-optional) - -#### 4.1 Configure ingress-nginx/kubernetes subchart - -ingress-nginx/kubernetes subchart is **enabled by default** - -Docs working in high scalability mode (more than 1 shard) only with enabled ingress-nginx controller by kubernetes. - -### Ingress-nginx subchart parameters - -Some overridden values ​​for the ingress-nginx/Kubernetes subchart can be found in the table below: - -| Parameter | Description | Default | -|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| -| `ingress-nginx.enabled` | Define that to enable or disable ingress-nginx subchart during deployment | `true` | -| `ingress-nginx.controller.replicaCount` | Number of deployed controller replicas | `2` | -| `ingress-nginx.namespaceOverride` | Override the ingress-nginx deployment namespace | `default` | -| `ingress-nginx.controller.allowSnippetAnnotations` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | `true` | -| `ingress-nginx.service.annotations` | Annotations to be added to the external controller service. See controller.service.internal.annotations for annotations to be added to the internal controller service. | `{}` | -| `ingress-nginx.controller.extraVolumeMounts` | Additional volumeMounts to the controller main container. Note: These parameters are used to add configuration to allow custom balancing. For more information please check values.yaml | `[]` | -| `ingress-nginx.controller.extraVolumes` | Additional volumes to the controller pod. Note: These parameters are used to add configuration to allow custom balancing. For more information please check values.yaml | `[]` | - -See more details about installing ingress-nginx via Helm [here](https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx). - -### 5. Deploy StatsD exporter - -*This step is optional. You can skip step [#5](#5-deploy-statsd-exporter) entirely if you don't want to run StatsD exporter* - -#### 5.1 Add Helm repositories +#### 4.1 Add Helm repositories ```bash $ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts @@ -159,7 +130,7 @@ $ helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metri $ helm repo update ``` -#### 5.2 Installing Prometheus +#### 4.2 Installing Prometheus To install Prometheus to your cluster, run the following command: @@ -172,7 +143,7 @@ To change the scrape interval, specify the `server.global.scrape_interval` param See more details about installing Prometheus via Helm [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus). -#### 5.3 Installing StatsD exporter +#### 4.3 Installing StatsD exporter To install StatsD exporter to your cluster, run the following command: @@ -187,11 +158,11 @@ See more details about installing Prometheus StatsD exporter via Helm [here](htt To allow the StatsD metrics in ONLYOFFICE Docs, follow step [5.2](#52-metrics-deployment-optional) -### 6. Make changes to Node-config configuration files +### 5. Make changes to Node-config configuration files -*This step is optional. You can skip step [#6](#6-make-changes-to-node-config-configuration-files) entirely if you don't need to make changes to the configuration files* +*This step is optional. You can skip step [#5](#5-make-changes-to-node-config-configuration-files) entirely if you don't need to make changes to the configuration files* -#### 6.1 Create a ConfigMap containing a json file +#### 5.1 Create a ConfigMap containing a json file In order to create a ConfigMap from a file that contains the `local-production-linux.json` structure, you need to run the following command: @@ -202,33 +173,33 @@ $ kubectl create configmap custom-local-config \ Note: Any name except `local-config` can be used instead of `custom-local-config`. -#### 6.2 Specify parameters when installing ONLYOFFICE Docs +#### 5.2 Specify parameters when installing ONLYOFFICE Docs When installing ONLYOFFICE Docs, specify the `extraConf.configMap=custom-local-config` and `extraConf.filename=local-production-linux.json` parameters -Note: If you need to add a configuration file after the ONLYOFFICE Docs is already installed, you need to execute step [6.1](#61-create-a-configmap-containing-a-json-file) +Note: If you need to add a configuration file after the ONLYOFFICE Docs is already installed, you need to execute step [5.1](#51-create-a-configmap-containing-a-json-file) and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraConf.configMap=custom-local-config --set extraConf.filename=local-production-linux.json` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. -### 7. Add custom Fonts +### 6. Add custom Fonts -*This step is optional. You can skip step [#7](#7-add-custom-fonts) entirely if you don't need to add your fonts* +*This step is optional. You can skip step [#6](#6-add-custom-fonts) entirely if you don't need to add your fonts* In order to add fonts to images, you need to rebuild the images. Refer to the relevant steps in [this](https://github.com/ONLYOFFICE/Docker-Docs#building-onlyoffice-docs) manual. Then specify your images when installing the ONLYOFFICE Docs. -### 8. Add Plugins +### 7. Add Plugins -*This step is optional. You can skip step [#8](#8-add-plugins) entirely if you don't need to add plugins* +*This step is optional. You can skip step [#7](#7-add-plugins) entirely if you don't need to add plugins* In order to add plugins to images, you need to rebuild the images. Refer to the relevant steps in [this](https://github.com/ONLYOFFICE/Docker-Docs#building-onlyoffice-docs) manual. Then specify your images when installing the ONLYOFFICE Docs. -### 9. Change interface themes +### 8. Change interface themes -*This step is optional. You can skip step [#9](#9-change-interface-themes) entirely if you don't need to change the interface themes* +*This step is optional. You can skip step [#8](#8-change-interface-themes) entirely if you don't need to change the interface themes* -#### 9.1 Create a ConfigMap containing a json file +#### 8.1 Create a ConfigMap containing a json file To create a ConfigMap with a json file that contains the interface themes, you need to run the following command: @@ -239,11 +210,11 @@ $ kubectl create configmap custom-themes \ Note: Instead of `custom-themes` and `custom-themes.json` you can use any other names. -#### 9.2 Specify parameters when installing ONLYOFFICE Docs +#### 8.2 Specify parameters when installing ONLYOFFICE Docs When installing ONLYOFFICE Docs, specify the `extraThemes.configMap=custom-themes` and `extraThemes.filename=custom-themes.json` parameters. -Note: If you need to add interface themes after the ONLYOFFICE Docs is already installed, you need to execute step [6.1](#61-create-a-configmap-containing-a-json-file) +Note: If you need to add interface themes after the ONLYOFFICE Docs is already installed, you need to execute step [5.1](#51-create-a-configmap-containing-a-json-file) and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraThemes.configMap=custom-themes --set extraThemes.filename=custom-themes.json` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. @@ -278,8 +249,7 @@ To deploy ONLYOFFICE Docs with the release name `documentserver`: ```bash $ helm install documentserver onlyoffice/docs-shards ``` - -The command deploys ONLYOFFICE Docs on the Kubernetes cluster in the default configuration. The [Parameters](#4-parameters) section lists the parameters that can be configured during installation. +The command deploys ONLYOFFICE Docs on the Kubernetes cluster in the default configuration. The [Parameters](#4-parameters) section lists the parameters that can be configured during installation. ### 3. Uninstall ONLYOFFICE Docs @@ -364,10 +334,10 @@ The `helm delete` command removes all the Kubernetes components associated with | `requestFilteringAgent.allowMetaIPAddress` | Defines if it is allowed to connect meta address or not | `false` | | `requestFilteringAgent.allowIPAddressList` | Defines the list of IP addresses allowed to connect. This values are preferred than `requestFilteringAgent.denyIPAddressList` | `[]` | | `requestFilteringAgent.denyIPAddressList` | Defines the list of IP addresses allowed to connect | `[]` | -| `documentserver.terminationGracePeriodSeconds` | The time to terminate gracefully during which the Pod will have the Terminating status | `60` | +| `documentserver.terminationGracePeriodSeconds` | The time to terminate gracefully during which the Pod will have the Terminating status | `10800` | +| `documentserver.terminationGraceTimeSeconds` | The time to terminate gracefully in seconds, which remains for turning off the shard and assembling documents open on it until the termination grace period is fully completed. Cannot be less than `documentserver.terminationGracePeriodSeconds` | `600` | | `documentserver.keysRedisDBNum` | The number of the database for storing the balancing results | `1` | | `documentserver.KeysExpireTime` | The time in seconds after which the key will be deleted from the balancing database. by default 172800 mean 48 hours | `172800` | -| `documentserver.ingressCustomConfigMapsNamespace` | Define where custom controller configmaps will be deployed | `default` | | `documentserver.annotations` | Defines annotations that will be additionally added to Documentserver Deployment | `{}` | | `documentserver.podAnnotations` | Map of annotations to add to the Documentserver deployment pods | `rollme: "{{ randAlphaNum 5 | quote }}"` | | `documentserver.replicas` | Number of Documentserver replicas to deploy. If the `documentserver.autoscaling.enabled` parameter is enabled, it is ignored. | `3` | @@ -390,7 +360,7 @@ The `helm delete` command removes all the Kubernetes components associated with | `documentserver.autoscaling.customMetricsType` | Custom, additional or external autoscaling metrics for the documentserver deployment | `[]` | | `documentserver.autoscaling.behavior` | Configuring Documentserver deployment scaling behavior policies for the `scaleDown` and `scaleUp` fields | `{}` | | `documentserver.initContainers.image.repository` | Documentserver add-shardkey initContainer image repository | `onlyoffice/docs-utils` | -| `documentserver.initContainers.image.tag` | Documentserver add-shardkey initContainer image tag | `8.1.3-1` | +| `documentserver.initContainers.image.tag` | Documentserver add-shardkey initContainer image tag | `8.2.0-2` | | `documentserver.initContainers.image.pullPolicy` | Documentserver add-shardkey initContainer image pull policy | `IfNotPresent` | | `documentserver.initContainers.containerSecurityContext.enabled` | Configure a Security Context for Documentserver add-shardkey initContainer container in Pod | `false` | | `documentserver.initContainers.resources.requests.memory` | The requested Memory for the Documentserver add-shardkey initContainer | `256Mi` | @@ -404,7 +374,7 @@ The `helm delete` command removes all the Kubernetes components associated with | Parameter | Description | Default | |-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| | `documentserver.docservice.image.repository` | Docservice container image repository* | `onlyoffice/docs-docservice-de` | -| `documentserver.docservice.image.tag` | Docservice container image tag | `8.1.3-1` | +| `documentserver.docservice.image.tag` | Docservice container image tag | `8.2.0-2` | | `documentserver.docservice.image.pullPolicy` | Docservice container image pull policy | `IfNotPresent` | | `documentserver.docservice.containerSecurityContext.enabled`| Enable security context for the Docservice container | `false` | | `documentserver.docservice.containerPorts.http` | Define docservice container port | `8000` | @@ -434,7 +404,7 @@ The `helm delete` command removes all the Kubernetes components associated with | `documentserver.proxy.infoAllowedExistingSecret` | Name of existing secret to use for info auth password. Used if `proxy.infoAllowedUser` is set. Must contain the key specified in `proxy.infoAllowedSecretKeyName`. If set to, it takes priority over the `proxy.infoAllowedPassword` | `""` | | `documentserver.proxy.welcomePage.enabled` | Defines whether the welcome page will be displayed | `true` | | `documentserver.proxy.image.repository` | Docservice Proxy container image repository* | `onlyoffice/docs-proxy-de` | -| `documentserver.proxy.image.tag` | Docservice Proxy container image tag | `8.1.3-1` | +| `documentserver.proxy.image.tag` | Docservice Proxy container image tag | `8.2.0-2` | | `documentserver.proxy.image.pullPolicy` | Docservice Proxy container image pull policy | `IfNotPresent` | | `documentserver.proxy.containerSecurityContext.enabled` | Enable security context for the Proxy container | `false` | | `documentserver.proxy.containerPorts.http` | proxy container port | `8888` | @@ -452,7 +422,7 @@ The `helm delete` command removes all the Kubernetes components associated with |-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| | `documentserver.converter.count` | The mumber of Converter containers in the Documentserver Pod | `3` | | `documentserver.converter.image.repository` | Converter container image repository* | `onlyoffice/docs-converter-de` | -| `documentserver.converter.image.tag` | Converter container image tag | `8.1.3-1` | +| `documentserver.converter.image.tag` | Converter container image tag | `8.2.0-2` | | `documentserver.converter.image.pullPolicy` | Converter container image pull policy | `IfNotPresent` | | `documentserver.converter.containerSecurityContext.enabled` | Enable security context for the Converter container | `false` | | `documentserver.converter.resources.requests.memory` | The requested Memory for the Converter container | `256Mi` | @@ -506,7 +476,7 @@ List of parameters for broker inside the documentserver pod | `example.nodeSelector` | Node labels for Example Pods assignment. If set to, it takes priority over the `nodeSelector` | `{}` | | `example.tolerations` | Tolerations for Example Pods assignment. If set to, it takes priority over the `tolerations` | `[]` | | `example.image.repository` | Example container image name | `onlyoffice/docs-example` | -| `example.image.tag` | Example container image tag | `8.1.3-1` | +| `example.image.tag` | Example container image tag | `8.2.0-2` | | `example.image.pullPolicy` | Example container image pull policy | `IfNotPresent` | | `example.containerSecurityContext.enabled` | Enable security context for the Example container | `false` | | `example.dsUrl` | ONLYOFFICE Docs external address. It should be changed only if it is necessary to check the operation of the conversion in Example (e.g. http://\/) | `/` | @@ -515,11 +485,78 @@ List of parameters for broker inside the documentserver pod | `example.extraConf.configMap` | The name of the ConfigMap containing the json file that override the default values. See an example of creation [here](https://github.com/ONLYOFFICE/Kubernetes-Docs?tab=readme-ov-file#71-create-a-configmap-containing-a-json-file) | `""` | | `example.extraConf.filename` | The name of the json file that contains custom values. Must be the same as the `key` name in `example.extraConf.ConfigMap` | `local.json` | +### Balancer parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `customBalancer.workerConnections` | Set worker connections count for balancer container | `16384` | +| `customBalancer.workerProcesses` | Set worker processes count for balancer container | `1` | +| `customBalancer.terminationGracePeriodSeconds` | The time to terminate gracefully during which the balancer Pod will have the Terminating status | `30` | +| `customBalancer.annotations` | Defines annotations that will be additionally added to Balancer Deployment | `{}` | +| `customBalancer.autoscaling.enabled` | Enable or disable autoscaling for balancer replicas | `false` | +| `customBalancer.autoscaling.annotations` | Defines annotations that will be additionally added to balancer deployment HPA | `{}` | +| `customBalancer.autoscaling.minReplicas` | Balancer deployment autoscaling minimum number of replicas | `2` | +| `customBalancer.autoscaling.maxReplicas` | Balancer deployment autoscaling maximum number of replicas | `4` | +| `customBalancer.autoscaling.targetCPU.enabled` | Enable autoscaling of balancer deployment by CPU usage percentage | `true` | +| `customBalancer.autoscaling.targetCPU.utilizationPercentage`| Balancer deployment autoscaling target CPU percentage | `70` | +| `customBalancer.autoscaling.targetMemory.enabled` | Enable autoscaling of balancer deployment by memory usage percentage | `false` | +| `customBalancer.autoscaling.targetMemory.utilizationPercentage`| Balancer deployment autoscaling target memory percentage | `70` | +| `customBalancer.autoscaling.customMetricsType` | Custom, additional or external autoscaling metrics for the balancer deployment | `[]` | +| `customBalancer.autoscaling.behavior` | Configuring Balancer deployment scaling behavior policies for the `scaleDown` and `scaleUp` fields | `{}` | +| `customBalancer.startupProbe.enabled` | Enable startupProbe for balancer container | `true` | +| `customBalancer.startupProbe.httpGet.path` | Checking the path for startupProbe | `/balancer-healthcheck` | +| `customBalancer.startupProbe.httpGet.port` | Checking the port for startupProbe | `80` | +| `customBalancer.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `customBalancer.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `customBalancer.readinessProbe.enabled` | Enable readinessProbe for balacer container | `true` | +| `customBalancer.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `2` | +| `customBalancer.readinessProbe.httpGet.path` | Checking the path for readinessProbe | `/balancer-healthcheck` | +| `customBalancer.readinessProbe.httpGet.port` | Checking the port for readinessProbe | `80` | +| `customBalancer.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `customBalancer.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customBalancer.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | +| `customBalancer.livenessProbe.enabled` | Enable livenessProbe for balacer container | `true` | +| `customBalancer.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `customBalancer.livenessProbe.httpGet.path` | Checking the path for livenessProbe | `/balancer-healthcheck` | +| `customBalancer.livenessProbe.httpGet.port` | Checking the port for livenessProbe | `80` | +| `customBalancer.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `customBalancer.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `customBalancer.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `3` | +| `customBalancer.resources.requests` | The requested resources for the balancer container | `{}` | +| `customBalancer.resources.limits` | The resources limits for the balancer container | `{}` | +| `customBalancer.containerSecurityContext.enabled` | Enable security context for the Balancer container | `false` | +| `customBalancer.containerSecurityContext.runAsUser` | User ID for the Balancer container | `101` | +| `customBalancer.containerSecurityContext.runAsGroup` | Group ID for the Balancer container | `101` | +| `customBalancer.containerSecurityContext.runAsNonRoot` | Require that the container will run with a user with UID other than 0 | `true` | +| `customBalancer.containerSecurityContext.allowPrivilegeEscalation` | Controls whether a process can gain more privileges than its parent process | `false` | +| `customBalancer.containerSecurityContext.seccompProfile` | Defines the Seccomp profile for the Balancer container | `RuntimeDefaualt` | +| `customBalancer.containerSecurityContext.capabilities` | Defines the privileges granted to the process | `["ALL"]` | +| `customBalancer.customPodAntiAffinity` | Prohibiting the scheduling of balancer Pods relative to other Pods containing the specified labels on the same node | `{}` | +| `customBalancer.podAffinity` | Pod affinity rules for balancer Pods scheduling by nodes relative to other Pods | `{}` | +| `сustomBalancer.nodeAffinity` | Node affinity rules for balancer Pods scheduling by nodes | `{}` | +| `customBalancer.nodeSelector` | Node labels for balancer Pods assignment | `{}` | +| `customBalancer.tolerations` | Tolerations for balancer Pods assignment | `[]` | +| `customBalancer.image.repository` | Specify balancer image repository | `onlyoffice/docs-balancer` | +| `customBalancer.image.tag` | Specify balancer image tag | `8.2.0-2` | +| `customBalancer.image.pullPolicy` | Balancer image pull policy | `IfNotPresent` | +| `customBalancer.replicas` | Number of balancer replicas to deploy If the `customBalancer.autoscaling.enabled` parameter is enabled, it is ignored | `3` | +| `customBalancer.containerPorts` | Balancer container port | `80` | +| `customBalancer.service.annotations` | Map of annotations to add to the ONLYOFFICE Docs balancer service | `{}` | +| `customBalancer.service.existing` | The name of an existing service for balancer. If not set, a service named `docs-balancer` will be created | `""` | +| `customBalancer.service.type` | Balancer service type | 'ClusteIP` | +| `customBalancer.service.port` | Balancer service port | `80` | +| `customBalancer.service.sessionAffinity` | Session Affinity for ONLYOFFICE Docs balancer service | `""` | +| `customBalancer.service.sessionAffinityConfig` | Configuration for ONLYOFFICE Docs balancer service Session Affinity | `{}` | +| `customBalancer.updateStrategy.type` | Balancer deployment update strategy type | `RollingUpdate` | +| `customBalancer.updateStrategy.rollingUpdate.maxUnavailable`| Maximum number of Balancer Pods unavailable during the update process | `25%` | +| `customBalancer.updateStrategy.rollingUpdate.maxSurge` | Maximum number of Balancer Pods created over the desired number of Pods | `25%` | +| `customBalancer.podAnnotations` | Map of annotations to add to the Balancer deployment Pod | `rollme: "{{ randAlphaNum 5 \| quote }}"` | + ### Ingress parameters | Parameter | Description | Default | |-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| -| `ingress.enabled` | Enable the creation of an ingress for the ONLYOFFICE Docs | `true` | +| `ingress.enabled` | Enable the creation of an ingress for the ONLYOFFICE Docs | `false` | | `ingress.annotations` | Map of annotations to add to the Ingress. If set to, it takes priority over the `commonAnnotations` | `nginx.ingress.kubernetes.io/proxy-body-size: 100m` | | `ingress.ingressClassName` | Used to reference the IngressClass that should be used to implement this Ingress | `nginx` | | `ingress.host` | Ingress hostname for the ONLYOFFICE Docs ingress | `""` | @@ -544,7 +581,7 @@ List of parameters for broker inside the documentserver pod | `grafanaDashboard.job.nodeSelector` | Node labels for Grafana Dashboard Job Pod assignment. If set to, it takes priority over the `nodeSelector` | `{}` | | `grafanaDashboard.job.tolerations` | Tolerations for Grafana Dashboard Job Pod assignment. If set to, it takes priority over the `tolerations` | `[]` | | `grafanaDashboard.job.image.repository` | Job by Grafana Dashboard ONLYOFFICE Docs image repository | `onlyoffice/docs-utils` | -| `grafanaDashboard.job.image.tag` | Job by Grafana Dashboard ONLYOFFICE Docs image tag | `8.1.3-1` | +| `grafanaDashboard.job.image.tag` | Job by Grafana Dashboard ONLYOFFICE Docs image tag | `8.2.0-2` | | `grafanaDashboard.job.image.pullPolicy` | Job by Grafana Dashboard ONLYOFFICE Docs image pull policy | `IfNotPresent` | | `grafanaDashboard.job.containerSecurityContext.enabled` | Enable security context for the Grafana Dashboard container | `false` | | `grafanaDashboard.job.resources.requests` | The requested resources for the job Grafana Dashboard container | `{}` | @@ -562,7 +599,7 @@ List of parameters for broker inside the documentserver pod | `tests.nodeSelector` | Node labels for Test Pod assignment. If set to, it takes priority over the `nodeSelector` | `{}` | | `tests.tolerations` | Tolerations for Test Pod assignment. If set to, it takes priority over the `tolerations` | `[]` | | `tests.image.repository` | Test container image name | `onlyoffice/docs-utils` | -| `tests.image.tag` | Test container image tag | `8.1.3-1` | +| `tests.image.tag` | Test container image tag | `8.2.0-2` | | `tests.image.pullPolicy` | Test container image pull policy | `IfNotPresent` | | `tests.containerSecurityContext.enabled` | Enable security context for the Test container | `false` | | `tests.resources.requests` | The requested resources for the test container | `{}` | @@ -616,7 +653,87 @@ If you want to use Grafana to visualize metrics, set `grafana.enabled` to `true` $ helm install documentserver onlyoffice/docs-shards --set grafana.enabled=true --set grafana.ingress.enabled=true ``` -### 5.3 Expose ONLYOFFICE Docs via HTTPS +### 5.3 Expose ONLYOFFICE Docs + +#### 5.3.1 Expose ONLYOFFICE Docs via Service (HTTP Only) + +*You should skip step[#5.3.1](#531-expose-onlyoffice-docs-via-service-http-only) if you are going to expose ONLYOFFICE Docs via HTTPS* + +This type of exposure has the least overheads of performance, it creates a loadbalancer to get access to ONLYOFFICE Docs. +Use this type of exposure if you use external TLS termination, and don't have another WEB application in the k8s cluster. + +To expose ONLYOFFICE Docs via service, set the `customBalancer.service.type` parameter to LoadBalancer: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set customBalancer.service.type=LoadBalancer,customBalancer.service.port=80 + +``` + +Run the following command to get the `documentserver` service IP: + +```bash +$ kubectl get service documentserver -o jsonpath="{.status.loadBalancer.ingress[*].ip}" +``` + +After that, ONLYOFFICE Docs will be available at `http://DOCUMENTSERVER-SERVICE-IP/`. + +If the service IP is empty, try getting the `documentserver` service hostname: + +```bash +$ kubectl get service documentserver -o jsonpath="{.status.loadBalancer.ingress[*].hostname}" +``` + +In this case, ONLYOFFICE Docs will be available at `http://DOCUMENTSERVER-SERVICE-HOSTNAME/`. + + +#### 5.3.2 Expose ONLYOFFICE Docs via Ingress + +#### 5.3.2.1 Installing the Kubernetes Nginx Ingress Controller + +To install the Nginx Ingress Controller to your cluster, run the following command: + +```bash +$ helm install nginx-ingress ingress-nginx/ingress-nginx --set controller.publishService.enabled=true,controller.replicaCount=2 +``` + +Note: To install Nginx Ingress with the same parameters and to enable exposing ingress-nginx metrics to be gathered by Prometheus, run the following command: + +```bash +$ helm install nginx-ingress -f https://raw.githubusercontent.com/ONLYOFFICE/Kubernetes-Docs-Shards/master/sources/ingress_values.yaml ingress-nginx/ingress-nginx +``` + +See more detail about installing Nginx Ingress via Helm [here](https://github.com/kubernetes/ingress-nginx/tree/master/charts/ingress-nginx). + +#### 5.3.2.2 Expose ONLYOFFICE Docs Shards via HTTP + +*You should skip step[5.3.2.2](#5322-expose-onlyoffice-docs-via-http) if you are going to expose ONLYOFFICE Docs via HTTPS* + +This type of exposure has more overheads of performance compared with exposure via service, it also creates a loadbalancer to get access to ONLYOFFICE Docs. +Use this type if you use external TLS termination and when you have several WEB applications in the k8s cluster. You can use the one set of ingress instances and the one loadbalancer for those. It can optimize the entry point performance and reduce your cluster payments, cause providers can charge a fee for each loadbalancer. + +To expose ONLYOFFICE Docs via ingress HTTP, set the `ingress.enabled` parameter to true: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set ingress.enabled=true +``` + +Run the following command to get the `documentserver` ingress IP: + +```bash +$ kubectl get ingress documentserver -o jsonpath="{.status.loadBalancer.ingress[*].ip}" +``` + +After that, ONLYOFFICE Docs Shards will be available at `http://DOCUMENTSERVER-INGRESS-IP/`. + +If the ingress IP is empty, try getting the `documentserver` ingress hostname: + +```bash +$ kubectl get ingress documentserver -o jsonpath="{.status.loadBalancer.ingress[*].hostname}" +``` + +In this case, ONLYOFFICE Docs will be available at `http://DOCUMENTSERVER-INGRESS-HOSTNAME/`. + +#### 5.3.2.3 Expose ONLYOFFICE Docs via HTTPS This type of exposure allows you to enable internal TLS termination for ONLYOFFICE Docs. @@ -649,7 +766,7 @@ $ kubectl get ingress documentserver -o jsonpath="{.status.loadBalancer.ingress[ Associate the `documentserver` ingress IP or hostname with your domain name through your DNS provider. -After that, ONLYOFFICE Docs will be available at `https://your-domain-name/`. +After that, ONLYOFFICE Docs Shards will be available at `https://your-domain-name/`. ### 6. Scale ONLYOFFICE Docs (optional) @@ -757,51 +874,13 @@ Generally the Pods / Nodes / Load Balancer addresses will actually be the client In this case the access to the info page will be available to everyone. You can further limit the access to the `info` page using Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) which you can turn on by setting `documentserver.proxy.infoAllowedUser` parameter value and by setting the password using `documentserver.proxy.infoAllowedPassword` parameter, alternatively you can use the existing secret with password by setting its name with `documentserver.proxy.infoAllowedExistingSecret` parameter. -### 11. Deploy ONLYOFFICE Docs with your own dependency (optional) - -### 11.1 Use your own nginx-ingress controller - -**Note:** ONLYOFFICE Docs support **only** nginx-ingress controller [by the kubernetes](https://github.com/kubernetes/ingress-nginx). - -If you want to deploy ONLYOFFICE Docs in cluster where already exist nginx-ingress controller, please follow the step below. - -**First of all** is to render two configMaps templates with `helm template` command, and apply them. This configMaps are needed for normal functioning of balancing requests between Docs shards. - -**Note:** These config maps must be located in the same namespace as your deployment nginx-ingress controller. To ensure that the generated config maps will be deployed in the same namespace as your nginx-ingress controller, please set the parameter `documentserver.ingressCustomConfigMapsNamespace` if needed. - -**Note:** When creating configMaps manually, check and change if necessary the parameters for connecting to Redis. - -> All available Redis connections parameters present [here](#4-parameters) with the `connections.` prefix - -```bash -helm template docs onlyoffice/docs-shards --set documentserver.ingressCustomConfigMapsNamespace= --show-only templates/configmaps/balancer-snippet.yaml --show-only templates/configmaps/balancer-lua.yaml --dry-run=server > ./ingressConfigMaps.yaml -``` - -**The second step**, apply configMaps that you create with command below: - -```bash -$ kubectl apply -f ./ingressConfigMaps.yaml -``` - -**The third step**, you need to update your nginx-ingress controller deployment with new parameters.That will add volumes with the necessary configmaps that you just created. Follow the commands: - -```bash -$ helm upgrade ingress-nginx --repo https://kubernetes.github.io/ingress-nginx -n -f https://raw.githubusercontent.com/ONLYOFFICE/Kubernetes-Docs-Shards/master/sources/ingress_values.yaml -``` - -**Now**, when your nginx-ingress controller if configure, you can deploy ONLYOFFICE Docs with command: - -```bash -$ helm install docs onlyoffice/docs-shards --set ingress-nginx.enabled=false -``` - ## Using Grafana to visualize metrics (optional) *This step is optional. You can skip this section if you don't want to install Grafana* ### 1. Deploy Grafana -Note: It is assumed that step [#6.2](#62-installing-prometheus) has already been completed. +Note: It is assumed that step [#4.2](#42-installing-prometheus) has already been completed. #### 1.1 Deploy Grafana without installing ready-made dashboards @@ -824,7 +903,7 @@ $ helm install grafana bitnami/grafana \ To install ready-made Grafana dashboards, set the `grafana.enabled` and `grafana.dashboard.enabled` parameters to `true`. If ONLYOFFICE Docs is already installed you need to run the `helm upgrade documentserver onlyoffice/docs-shards --set grafana.enabled=true --set grafana.dashboard.enabled=true` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the [values.yaml](values.yaml) file. As a result, ready-made dashboards in the `JSON` format will be downloaded from the Grafana [website](https://grafana.com/grafana/dashboards), -the necessary edits will be made to them and configmap will be created from them. A dashboard will also be added to visualize metrics coming from the ONLYOFFICE Docs (it is assumed that step [#6](#6-deploy-statsd-exporter) has already been completed). +the necessary edits will be made to them and configmap will be created from them. A dashboard will also be added to visualize metrics coming from the ONLYOFFICE Docs (it is assumed that step [#4](#4-deploy-statsd-exporter) has already been completed). #### 1.2.2 Installing Grafana diff --git a/sources/ingress_values.yaml b/sources/ingress_values.yaml index 769bf27..89c9a25 100644 --- a/sources/ingress_values.yaml +++ b/sources/ingress_values.yaml @@ -1,19 +1,10 @@ controller: - allowSnippetAnnotations: true - extraVolumeMounts: - - name: custom-balancer - mountPath: /etc/nginx/custom_balancer.conf - subPath: custom_balancer.conf - - name: balancer-lua - mountPath: /etc/nginx/lua/balancer.lua - subPath: balancer.lua - extraVolumes: - - name: custom-balancer - configMap: - name: balancer-snippet - - name: balancer-lua - configMap: - name: balancer-lua publishService: enabled: true replicaCount: 2 + metrics: + enabled: true + service: + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '10254' diff --git a/sources/scripts/add_shardkey.py b/sources/scripts/add_shardkey.py index ffdb7d6..e300d68 100644 --- a/sources/scripts/add_shardkey.py +++ b/sources/scripts/add_shardkey.py @@ -1,6 +1,7 @@ import os import sys import logging +from kubernetes import client redisConnectorName = os.environ.get('REDIS_CONNECTOR_NAME') redisHost = os.environ.get('REDIS_SERVER_HOST') @@ -19,10 +20,32 @@ shardKey = os.environ.get('DEFAULT_SHARD_KEY') epIP = os.environ.get('SHARD_IP') epPort = os.environ.get('SHARD_PORT') +dsVersion = os.environ.get('APP_VERSION') + '-' + os.environ.get('DS_VERSION_HASH') ipShard = epIP + ':' + epPort +add_annotations = {"ds-ver-hash": dsVersion} total_result = {} +k8s_host = os.environ["KUBERNETES_SERVICE_HOST"] +api_server = f'https://{k8s_host}' +pathCrt = '/run/secrets/kubernetes.io/serviceaccount/ca.crt' +pathToken = '/run/secrets/kubernetes.io/serviceaccount/token' +pathNS = '/run/secrets/kubernetes.io/serviceaccount/namespace' + +with open(pathToken, "r") as f_tok: + token = f_tok.read() + +with open(pathNS, "r") as f_ns: + ns = f_ns.read() + +configuration = client.Configuration() +configuration.ssl_ca_cert = pathCrt +configuration.host = api_server +configuration.verify_ssl = True +configuration.debug = False +configuration.api_key = {"authorization": "Bearer " + token} +client.Configuration.set_default(configuration) +v1 = client.CoreV1Api() def init_logger(name): logger = logging.getLogger(name) @@ -50,10 +73,10 @@ def get_redis_status(): ) rc.ping() except Exception as msg_redis: - logger_test_ds.error(f'Failed to check the availability of the Redis Standalone... {msg_redis}\n') + logger_endpoints_ds.error(f'Failed to check the availability of the Redis Standalone... {msg_redis}\n') total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('Successful connection to Redis Standalone') + logger_endpoints_ds.info('Successful connection to Redis Standalone') return rc.ping() @@ -72,10 +95,10 @@ def get_redis_cluster_status(): ) rc.ping() except Exception as msg_redis: - logger_test_ds.error(f'Failed to check the availability of the Redis Cluster... {msg_redis}\n') + logger_endpoints_ds.error(f'Failed to check the availability of the Redis Cluster... {msg_redis}\n') total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('Successful connection to Redis Cluster') + logger_endpoints_ds.info('Successful connection to Redis Cluster') return rc.ping() @@ -97,10 +120,10 @@ def get_redis_sentinel_status(): ) rc.ping() except Exception as msg_redis: - logger_test_ds.error(f'Failed to check the availability of the Redis Sentinel... {msg_redis}\n') + logger_endpoints_ds.error(f'Failed to check the availability of the Redis Sentinel... {msg_redis}\n') total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('Successful connection to Redis Sentinel') + logger_endpoints_ds.info('Successful connection to Redis Sentinel') return rc.ping() @@ -109,35 +132,47 @@ def add_redis_key(): rc.set(shardKey, ipShard) rc.append(ipShard, f' {shardKey}') test_key = rc.get(shardKey).decode('utf-8') - logger_test_ds.info(f'Shard Key Endpoint: {shardKey} = {test_key}') except Exception as msg_check_redis: - logger_test_ds.error(f'Error when trying to write a ShardKey to Redis... {msg_check_redis}\n') + logger_endpoints_ds.error(f'Error when trying to write a ShardKey to Redis... {msg_check_redis}\n') total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('The ShardKey was successfully recorded to Redis\n') + logger_endpoints_ds.info(f'ShardKey {shardKey} = {test_key} was successfully recorded to Redis\n') rc.close() +def patch_pod(): + try: + patch = v1.patch_namespaced_pod(shardKey, ns, {"metadata": {"annotations": add_annotations}}) + except Exception as msg_patch_pod: + logger_endpoints_ds.error(f'Error when adding an annotation to the Pod... {msg_patch_pod}') + total_result['PatchPod'] = 'Failed' + else: + logger_endpoints_ds.info(f'The {add_annotations} annotation has been successfully added to the Pod\n') + + def init_redis(): - logger_test_ds.info('Checking Redis availability...') + logger_endpoints_ds.info('Checking Redis availability...') if redisConnectorName == 'redis' and not os.environ.get('REDIS_CLUSTER_NODES'): if get_redis_status() is True: add_redis_key() + patch_pod() elif redisConnectorName == 'redis' and os.environ.get('REDIS_CLUSTER_NODES'): if get_redis_cluster_status() is True: add_redis_key() + patch_pod() elif redisConnectorName == 'ioredis': if get_redis_sentinel_status() is True: add_redis_key() + patch_pod() def total_status(): if 'Failed' in total_result.values(): - logger_test_ds.error('Recording of "ShardKey" in Redis failed') + logger_endpoints_ds.error('Recording of "ShardKey" in Redis failed') sys.exit(1) -init_logger('test') -logger_test_ds = logging.getLogger('test.ds') +init_logger('endpoints') +logger_endpoints_ds = logging.getLogger('endpoints.ds') init_redis() total_status() diff --git a/sources/scripts/balancer-lua.conf b/sources/scripts/balancer-lua.conf new file mode 100644 index 0000000..5c81e16 --- /dev/null +++ b/sources/scripts/balancer-lua.conf @@ -0,0 +1,216 @@ +access_by_lua_block { + local WOPISrc = ngx.var.arg_WOPISrc + local shardkey = ngx.var.arg_shardkey + local service_name = ngx.var.service_name + local redis = require "resty.redis" + local red = redis:new() + local cjson = require("cjson.safe") + local configuration = require("configuration") + local docs_balancer = require("docs_balancer") + local request_uri = ngx.var.request_uri + local requested_version = request_uri:match("/([%d%.%-]+%-[^/]+)/") + local service_port = {{ .Values.documentserver.proxy.containerPorts.http }} + + local function redis_del(key) + local ok, err = red:del(string.format("%s", key)) + if not ok then + ngx.say("failed to del: ", err) + return + end + end + + local function redis_get(wopi) + local user_data_response = red:get(string.format('%s', wopi)) + return user_data_response + end + + local function redis_set(wopi, endpoint) + local response = red:setnx(string.format('%s', wopi), endpoint) + if response == 1 then + print(string.format("DEBUG: --> New api key %s was set in Redis", wopi)) + return true + end + end + + local function redis_set_ipkey(wopi, endpoint) + local wopi_final = (string.format(" %s", wopi)) + local ok, err = red:append(string.format('%s', endpoint), wopi_final) + if not ok then + ngx.say("failed to set: ",err) + return + end + end + + local function redis_expire(wopi, expire) + local ok, err = red:expire(string.format('%s', wopi), expire) + if not ok then + ngx.say("failed to set ttl: ",err) + return + end + end + + local function get_endpoints(backends, upstream) + for _, new_backend in ipairs(backends) do + if new_backend.name == upstream then + local new_endpoints=(new_backend.endpoints) + return new_endpoints + end + end + end + + local function table_contains(tbl, p, x) + local found = false + for _, v in pairs(tbl) do + local endpoint_string = (string.format("%s:%s", v, p)) + if endpoint_string == x then + local found = true + return found + end + end + return found + end + + local function check_endpoint(endpoint) + local docs_upstream = ngx.var.proxy_upstream_name + local backends_data = configuration.get_backends_data() + local backends = cjson.decode(backends_data) + local endpoints = backends + local endpoints_table = {} + for _, endpoint in ipairs(endpoints) do + table.insert(endpoints_table, endpoint.address) + end + print(cjson.encode(endpoints_table)) + local result = table_contains(endpoints_table, service_port, endpoint) + return result + end + + local function get_docs_mode(wopi) + if string.match(wopi, "http://") or string.match(wopi, "https://") then + return "wopi" + else + return "api" + end + end + + local function get_api_arg() + if WOPISrc then + return WOPISrc + end + if shardkey then + return shardkey + end + end + + local function handle_api_key(arg) + if shardkey then + return shardkey + end + if WOPISrc then + local WOPIDecoded = (ngx.unescape_uri(arg)) + local WOPIkey = WOPIDecoded:gsub("%s+", "") + return WOPIkey + end + end + + local function init_redis_connection() + red:set_timeouts(1000, 1000, 1000) -- 1 sec + local ok, err = red:connect({{ .Values.connections.redisHost | quote }}, {{ .Values.connections.redisPort }}) + if not ok then + ngx.say("1: failed to connect: ",err) + return + end + + {{- if eq .Values.connections.redisNoPass false }} + local res, err = red:auth({{ include "ds.redis.pass" . | quote }}) + if not res then + ngx.say("failed to authenticate: ", err) + return + end + {{- end }} + end + + local function api_js_exist() + if string.match(request_uri, "api%.js$") then + print("DEBUG: api.js requested") + return true + else + return false + end + end + + local API_ARG = get_api_arg() + + if API_ARG then + local API_KEY = handle_api_key(API_ARG) + + -- init redis connection and then select the db index + init_redis_connection() + + {{- if ne .Values.documentserver.keysRedisDBNum "0" }} + red:select({{ .Values.documentserver.keysRedisDBNum }}) + {{- end }} + + local exist_endpoint = tostring(redis_get(API_KEY)) + print(exist_endpoint) + if exist_endpoint == 'userdata: NULL' then + local new_custom_endpoint = docs_balancer.balance_ep() + if redis_set(API_KEY, new_custom_endpoint) then + redis_set_ipkey(API_KEY, new_custom_endpoint) + redis_expire(API_KEY, {{ .Values.documentserver.keysExpireTime }}) + ngx.var.custom_endpoint = new_custom_endpoint + else + print("DEBUG: --> Looks like parallel request was made, get endpoint from Redis") + ngx.var.custom_endpoint = tostring(redis_get(API_KEY)) + end + else + local endpoint_found = check_endpoint(exist_endpoint) + print(endpoint_found) + if endpoint_found == false then + print(string.format("ENDPOINT WILL BE REMOVE:%s", exist_endpoint)) + local placeholder = tostring(red:get(string.format("del_%s", exist_endpoint))) + if placeholder == 'userdata: NULL' then + local default_expire = {{ .Values.documentserver.terminationGracePeriodSeconds }} + local placeholder_expire = default_expire + 10 + local set_placeholder = red:set(string.format("del_%s", exist_endpoint), "placeholder") + local set_placeholder_expire = red:expire(string.format("del_%s", exist_endpoint), placeholder_expire) + local keys = tostring(redis_get(exist_endpoint)) + red:init_pipeline() + for i in string.gmatch(keys, "%S+") do + red:expire(string.format('%s', i), default_expire) + end + local results, err = red:commit_pipeline() + if not results then + ngx.say("failed to commit the pipelined requests: ", err) + end + -- Set expire for endpoint key that consist all wopi that also will be removed after expire + local set_endpoint_expire = redis_expire(exist_endpoint, default_expire) + local set_wopi_expire = redis_expire(API_KEY, default_expire) + print("DEBUG: --> Keys remove process is started, send request to upstream") + ngx.var.custom_endpoint = exist_endpoint + else + print("DEBUG: --> Process that reshard keys already exist, send request to upstream") + ngx.var.custom_endpoint = exist_endpoint + end + else + print("DEGUB: --> Endpoint exist, just go forward...") + ngx.var.custom_endpoint = exist_endpoint + if WOPISrc then + redis_expire(API_KEY, {{ .Values.documentserver.keysExpireTime }}) + end + end +end +red:close() +end + +} + +set $docs_shardkey $arg_shardkey; +set $docs_wopisrc $arg_WOPISrc; + +if ($docs_shardkey) { + proxy_pass http://$custom_endpoint; +} + +if ($docs_wopisrc) { + proxy_pass http://$custom_endpoint; +} diff --git a/sources/scripts/remove_shardkey.py b/sources/scripts/remove_shardkey.py index e712703..4ee02b1 100644 --- a/sources/scripts/remove_shardkey.py +++ b/sources/scripts/remove_shardkey.py @@ -2,6 +2,7 @@ import sys import subprocess import logging +import time redisConnectorName = os.environ.get('REDIS_CONNECTOR_NAME') redisHost = os.environ.get('REDIS_SERVER_HOST') @@ -22,6 +23,9 @@ epPort = os.environ.get('SHARD_PORT') ipShard = epIP + ':' + epPort +grace_period = int(os.environ.get('TERMINATION_GRACE_PERIOD')) +grace_time = int(os.environ.get('TERMINATION_GRACE_TIME')) + total_result = {} @@ -51,10 +55,10 @@ def get_redis_status(): ) rc.ping() except Exception as msg_redis: - logger_test_ds.error('Failed to check the availability of the Redis Standalone... {}\n'.format(msg_redis)) + logger_endpoints_ds.error('Failed to check the availability of the Redis Standalone... {}\n'.format(msg_redis)) total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('Successful connection to Redis Standalone') + logger_endpoints_ds.info('Successful connection to Redis Standalone') return rc.ping() @@ -73,10 +77,10 @@ def get_redis_cluster_status(): ) rc.ping() except Exception as msg_redis: - logger_test_ds.error('Failed to check the availability of the Redis Cluster... {}\n'.format(msg_redis)) + logger_endpoints_ds.error('Failed to check the availability of the Redis Cluster... {}\n'.format(msg_redis)) total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('Successful connection to Redis Cluster') + logger_endpoints_ds.info('Successful connection to Redis Cluster') return rc.ping() @@ -98,10 +102,10 @@ def get_redis_sentinel_status(): ) rc.ping() except Exception as msg_redis: - logger_test_ds.error('Failed to check the availability of the Redis Sentinel... {}\n'.format(msg_redis)) + logger_endpoints_ds.error('Failed to check the availability of the Redis Sentinel... {}\n'.format(msg_redis)) total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('Successful connection to Redis Sentinel') + logger_endpoints_ds.info('Successful connection to Redis Sentinel') return rc.ping() @@ -116,17 +120,17 @@ def clear_shard_key(): pipe.execute() rc.delete(ipShard) except Exception as msg_check_redis: - logger_test_ds.error('Error when trying to delete keys belonging to the {sk} shard from Redis... {em}\n'.format(sk=shardKey, em=msg_check_redis)) + logger_endpoints_ds.error('Error when trying to delete keys belonging to the {sk} shard from Redis... {em}\n'.format(sk=shardKey, em=msg_check_redis)) total_result['CheckRedis'] = 'Failed' else: - logger_test_ds.info('Keys belonging to {} have been successfully deleted from Redis\n'.format(shardKey)) - rc.close() + logger_endpoints_ds.info('Keys belonging to {} have been successfully deleted from Redis\n'.format(shardKey)) else: - logger_test_ds.info('Endpoint shard {} was not found in Redis\n'.format(shardKey)) + logger_endpoints_ds.info('Endpoint shard {} was not found in Redis\n'.format(shardKey)) + rc.close() def clear_redis(): - logger_test_ds.info('Checking Redis availability...') + logger_endpoints_ds.info('Checking Redis availability...') if redisConnectorName == 'redis' and not os.environ.get('REDIS_CLUSTER_NODES'): if get_redis_status() is True: clear_shard_key() @@ -138,24 +142,64 @@ def clear_redis(): clear_shard_key() +def get_connect_count(): + try: + connect_count = ["/bin/bash", "-c", "curl http://localhost:8000/internal/connections/edit -s"] + connect_count_process = subprocess.Popen(connect_count, stdout=subprocess.PIPE) + connect_count_result = int(connect_count_process.communicate()[0]) + total_result['GetConnectCount'] = 'Success' + if connect_count_result == 0: + return True + else: + return False + except Exception as msg_get_connect_count: + logger_endpoints_ds.error('Failed when trying to get the number of connections... {}\n'.format(msg_get_connect_count)) + total_result['GetConnectCount'] = 'Failed' + return False + + def shutdown_shard(): - shutdown_cmd = ["/bin/bash", "-c", "curl http://localhost:8000/internal/cluster/inactive -X PUT -s"] - process = subprocess.Popen(shutdown_cmd, stdout=subprocess.PIPE) - shutdown_result = process.communicate()[0].decode('utf-8') - if shutdown_result == "true": - clear_redis() + try: + shutdown_cmd = ["/bin/bash", "-c", "curl http://localhost:8000/internal/cluster/inactive -X PUT -s"] + process = subprocess.Popen(shutdown_cmd, stdout=subprocess.PIPE) + shutdown_result = process.communicate()[0].decode('utf-8') + except Exception as msg_url: + logger_endpoints_ds.error('Failed to check the availability of the DocumentServer... {}\n'.format(msg_url)) + total_result['ShutdownDS'] = 'Failed' else: - logger_test_ds.error('The {} shard could not be disabled'.format(shardKey)) - sys.exit(1) + if shutdown_result == "true": + clear_redis() + build_status = open('/scripts/results/status.txt', 'w') + build_status.write('Completed') + build_status.close() + else: + logger_endpoints_ds.error('The {} shard could not be disabled'.format(shardKey)) + sys.exit(1) + + +def prepare_for_shutdown_shard(): + current_grace_period = grace_period + current_grace_time = grace_time + while True: + if get_connect_count() is True: + shutdown_shard() + break + else: + if current_grace_period < current_grace_time: + shutdown_shard() + break + else: + current_grace_period -= 1 + time.sleep(1) def total_status(): if 'Failed' in total_result.values(): - logger_test_ds.error('Could not clear Redis of keys belonging to {}'.format(shardKey)) + logger_endpoints_ds.error('Could not clear Redis of keys belonging to {}'.format(shardKey)) sys.exit(1) -init_logger('test') -logger_test_ds = logging.getLogger('test.ds') -shutdown_shard() +init_logger('endpoints') +logger_endpoints_ds = logging.getLogger('endpoints.ds') +prepare_for_shutdown_shard() total_status() diff --git a/templates/RBAC/balancer-role.yaml b/templates/RBAC/balancer-role.yaml new file mode 100644 index 0000000..6b69ace --- /dev/null +++ b/templates/RBAC/balancer-role.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ds-balancer-role + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "2" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: +- apiGroups: [""] + resources: ["endpoints", "pods"] + verbs: ["get", "watch", "list"] diff --git a/templates/RBAC/balancer-rolebinding.yaml b/templates/RBAC/balancer-rolebinding.yaml new file mode 100644 index 0000000..8d6acc7 --- /dev/null +++ b/templates/RBAC/balancer-rolebinding.yaml @@ -0,0 +1,23 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ds-balancer-rolebinding + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "3" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: ds-balancer-sa + namespace: {{ include "ds.namespace" . | quote }} +roleRef: + kind: Role + name: ds-balancer-role + apiGroup: rbac.authorization.k8s.io diff --git a/templates/RBAC/docs-role.yaml b/templates/RBAC/docs-role.yaml new file mode 100644 index 0000000..d551d4d --- /dev/null +++ b/templates/RBAC/docs-role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ds-docs-role + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "2" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["patch"] +{{- end }} diff --git a/templates/RBAC/docs-rolebinding.yaml b/templates/RBAC/docs-rolebinding.yaml new file mode 100644 index 0000000..c7a4a24 --- /dev/null +++ b/templates/RBAC/docs-rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ds-docs-rolebinding + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "3" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: ds-docs-sa + namespace: {{ include "ds.namespace" . | quote }} +roleRef: + kind: Role + name: ds-docs-role + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl index bdf1b0e..dc676d7 100644 --- a/templates/_helpers.tpl +++ b/templates/_helpers.tpl @@ -88,7 +88,7 @@ Get the PVC name {{- end -}} {{/* -Return true if a pvc object should be created +Return true if a pvc object for ds-service-files should be created */}} {{- define "ds.pvc.create" -}} {{- if empty .Values.persistence.existingClaim }} @@ -140,18 +140,20 @@ Return true if a secret object should be created for jwt Get the service name for ds */}} {{- define "ds.svc.name" -}} -{{- if .Values.service.existing -}} - {{- printf "%s" (tpl .Values.service.existing $) -}} +{{- if not (empty .Values.customBalancer.service.existing) }} + {{- printf "%s" (tpl .Values.customBalancer.service.existing $) -}} +{{- else if empty .Values.customBalancer.service.existing }} + {{- printf "docs-balancer" -}} {{- else }} {{- printf "documentserver" -}} {{- end -}} {{- end -}} {{/* -Return true if a service object should be created for ds +Return true if a balancer service object should be created for ds */}} -{{- define "ds.svc.create" -}} -{{- if empty .Values.service.existing }} +{{- define "balancer.svc.create" -}} +{{- if empty .Values.customBalancer.service.existing }} {{- true -}} {{- end -}} {{- end -}} diff --git a/templates/configmaps/balancer-lua.yaml b/templates/configmaps/balancer-lua.yaml index d568353..e270fe3 100644 --- a/templates/configmaps/balancer-lua.yaml +++ b/templates/configmaps/balancer-lua.yaml @@ -1,402 +1,17 @@ -# All content from balancer.lua config -# was taken from the main Kubernetes repository and modified by ONLYOFFICE -# for more information please check NOTICE by link below -# https://github.com/ONLYOFFICE/Kubernetes-Docs-Shards/blob/master/NOTICE - -{{- if index .Values "ingress-nginx" "enabled" }} apiVersion: v1 kind: ConfigMap metadata: - name: balancer-lua - namespace: {{ .Values.documentserver.ingressCustomConfigMapsNamespace }} + name: balancer-lua-config + namespace: {{ include "ds.namespace" . | quote }} {{- if .Values.commonLabels }} labels: {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} {{- end }} annotations: - helm.sh/resource-policy: keep - helm.sh/hook: pre-install - helm.sh/hook-weight: "1" + helm.sh/hook: pre-install, pre-upgrade + helm.sh/hook-weight: "-1" {{- if .Values.commonAnnotations }} {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} data: - balancer.lua: | - local ngx_balancer = require("ngx.balancer") - local cjson = require("cjson.safe") - local util = require("util") - local dns_lookup = require("util.dns").lookup - local configuration = require("configuration") - local round_robin = require("balancer.round_robin") - local chash = require("balancer.chash") - local chashsubset = require("balancer.chashsubset") - local sticky_balanced = require("balancer.sticky_balanced") - local sticky_persistent = require("balancer.sticky_persistent") - local ewma = require("balancer.ewma") - local string = string - local ipairs = ipairs - local table = table - local getmetatable = getmetatable - local tostring = tostring - local pairs = pairs - local math = math - local ngx = ngx - - -- measured in seconds - -- for an Nginx worker to pick up the new list of upstream peers - -- it will take + BACKENDS_SYNC_INTERVAL - local BACKENDS_SYNC_INTERVAL = 1 - - local DEFAULT_LB_ALG = "round_robin" - local IMPLEMENTATIONS = { - round_robin = round_robin, - chash = chash, - chashsubset = chashsubset, - sticky_balanced = sticky_balanced, - sticky_persistent = sticky_persistent, - ewma = ewma, - } - - local PROHIBITED_LOCALHOST_PORT = configuration.prohibited_localhost_port or '10246' - local PROHIBITED_PEER_PATTERN = "^127.*:" .. PROHIBITED_LOCALHOST_PORT .. "$" - - local _M = {} - local balancers = {} - local backends_with_external_name = {} - local backends_last_synced_at = 0 - - local function get_implementation(backend) - local name = backend["load-balance"] or DEFAULT_LB_ALG - - if backend["sessionAffinityConfig"] and - backend["sessionAffinityConfig"]["name"] == "cookie" then - if backend["sessionAffinityConfig"]["mode"] == "persistent" then - name = "sticky_persistent" - else - name = "sticky_balanced" - end - - elseif backend["upstreamHashByConfig"] and - backend["upstreamHashByConfig"]["upstream-hash-by"] then - if backend["upstreamHashByConfig"]["upstream-hash-by-subset"] then - name = "chashsubset" - else - name = "chash" - end - end - - local implementation = IMPLEMENTATIONS[name] - if not implementation then - ngx.log(ngx.WARN, backend["load-balance"], " is not supported, ", - "falling back to ", DEFAULT_LB_ALG) - implementation = IMPLEMENTATIONS[DEFAULT_LB_ALG] - end - - return implementation - end - - local function resolve_external_names(original_backend) - local backend = util.deepcopy(original_backend) - local endpoints = {} - for _, endpoint in ipairs(backend.endpoints) do - local ips = dns_lookup(endpoint.address) - for _, ip in ipairs(ips) do - table.insert(endpoints, { address = ip, port = endpoint.port }) - end - end - backend.endpoints = endpoints - return backend - end - - local function format_ipv6_endpoints(endpoints) - local formatted_endpoints = {} - for _, endpoint in ipairs(endpoints) do - local formatted_endpoint = endpoint - if not endpoint.address:match("^%d+.%d+.%d+.%d+$") then - formatted_endpoint.address = string.format("[%s]", endpoint.address) - end - table.insert(formatted_endpoints, formatted_endpoint) - end - return formatted_endpoints - end - - local function is_backend_with_external_name(backend) - local serv_type = backend.service and backend.service.spec - and backend.service.spec["type"] - return serv_type == "ExternalName" - end - - local function sync_backend(backend) - if not backend.endpoints or #backend.endpoints == 0 then - balancers[backend.name] = nil - return - end - - if is_backend_with_external_name(backend) then - backend = resolve_external_names(backend) - end - - backend.endpoints = format_ipv6_endpoints(backend.endpoints) - - local implementation = get_implementation(backend) - local balancer = balancers[backend.name] - - if not balancer then - balancers[backend.name] = implementation:new(backend) - return - end - - -- every implementation is the metatable of its instances (see .new(...) functions) - -- here we check if `balancer` is the instance of `implementation` - -- if it is not then we deduce LB algorithm has changed for the backend - if getmetatable(balancer) ~= implementation then - ngx.log(ngx.INFO, - string.format("LB algorithm changed from %s to %s, resetting the instance", - balancer.name, implementation.name)) - balancers[backend.name] = implementation:new(backend) - return - end - - balancer:sync(backend) - end - - local function sync_backends_with_external_name() - for _, backend_with_external_name in pairs(backends_with_external_name) do - sync_backend(backend_with_external_name) - end - end - - local function sync_backends() - local raw_backends_last_synced_at = configuration.get_raw_backends_last_synced_at() - if raw_backends_last_synced_at <= backends_last_synced_at then - return - end - - local backends_data = configuration.get_backends_data() - if not backends_data then - balancers = {} - return - end - - local new_backends, err = cjson.decode(backends_data) - if not new_backends then - ngx.log(ngx.ERR, "could not parse backends data: ", err) - return - end - - local balancers_to_keep = {} - for _, new_backend in ipairs(new_backends) do - if is_backend_with_external_name(new_backend) then - local backend_with_external_name = util.deepcopy(new_backend) - backends_with_external_name[backend_with_external_name.name] = backend_with_external_name - else - sync_backend(new_backend) - end - balancers_to_keep[new_backend.name] = true - end - - for backend_name, _ in pairs(balancers) do - if not balancers_to_keep[backend_name] then - balancers[backend_name] = nil - backends_with_external_name[backend_name] = nil - end - end - backends_last_synced_at = raw_backends_last_synced_at - end - - local function route_to_alternative_balancer(balancer) - if balancer.is_affinitized(balancer) then - -- If request is already affinitized to a primary balancer, keep the primary balancer. - return false - end - - if not balancer.alternative_backends then - return false - end - - -- TODO: support traffic shaping for n > 1 alternative backends - local backend_name = balancer.alternative_backends[1] - if not backend_name then - ngx.log(ngx.ERR, "empty alternative backend") - return false - end - - local alternative_balancer = balancers[backend_name] - if not alternative_balancer then - ngx.log(ngx.ERR, "no alternative balancer for backend: ", - tostring(backend_name)) - return false - end - - if alternative_balancer.is_affinitized(alternative_balancer) then - -- If request is affinitized to an alternative balancer, instruct caller to - -- switch to alternative. - return true - end - - -- Use traffic shaping policy, if request didn't have affinity set. - local traffic_shaping_policy = alternative_balancer.traffic_shaping_policy - if not traffic_shaping_policy then - ngx.log(ngx.ERR, "traffic shaping policy is not set for balancer ", - "of backend: ", tostring(backend_name)) - return false - end - - local target_header = util.replace_special_char(traffic_shaping_policy.header, - "-", "_") - local header = ngx.var["http_" .. target_header] - if header then - if traffic_shaping_policy.headerValue - and #traffic_shaping_policy.headerValue > 0 then - if traffic_shaping_policy.headerValue == header then - return true - end - elseif traffic_shaping_policy.headerPattern - and #traffic_shaping_policy.headerPattern > 0 then - local m, err = ngx.re.match(header, traffic_shaping_policy.headerPattern) - if m then - return true - elseif err then - ngx.log(ngx.ERR, "error when matching canary-by-header-pattern: '", - traffic_shaping_policy.headerPattern, "', error: ", err) - return false - end - elseif header == "always" then - return true - elseif header == "never" then - return false - end - end - - local target_cookie = traffic_shaping_policy.cookie - local cookie = ngx.var["cookie_" .. target_cookie] - if cookie then - if cookie == "always" then - return true - elseif cookie == "never" then - return false - end - end - - local weightTotal = 100 - if traffic_shaping_policy.weightTotal ~= nil and traffic_shaping_policy.weightTotal > 100 then - weightTotal = traffic_shaping_policy.weightTotal - end - if math.random(weightTotal) <= traffic_shaping_policy.weight then - return true - end - - return false - end - - local function get_balancer_by_upstream_name(upstream_name) - return balancers[upstream_name] - end - - local function get_balancer() - if ngx.ctx.balancer then - return ngx.ctx.balancer - end - - local backend_name = ngx.var.proxy_upstream_name - - local balancer = balancers[backend_name] - if not balancer then - return nil - end - - if route_to_alternative_balancer(balancer) then - local alternative_backend_name = balancer.alternative_backends[1] - ngx.var.proxy_alternative_upstream_name = alternative_backend_name - - balancer = balancers[alternative_backend_name] - end - - ngx.ctx.balancer = balancer - - return balancer - end - - function _M.init_worker() - -- when worker starts, sync non ExternalName backends without delay - sync_backends() - -- we call sync_backends_with_external_name in timer because for endpoints that require - -- DNS resolution it needs to use socket which is not available in - -- init_worker phase - local ok, err = ngx.timer.at(0, sync_backends_with_external_name) - if not ok then - ngx.log(ngx.ERR, "failed to create timer: ", err) - end - - ok, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends) - if not ok then - ngx.log(ngx.ERR, "error when setting up timer.every for sync_backends: ", err) - end - ok, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends_with_external_name) - if not ok then - ngx.log(ngx.ERR, "error when setting up timer.every for sync_backends_with_external_name: ", - err) - end - end - - function _M.rewrite() - local balancer = get_balancer() - if not balancer then - ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE - return ngx.exit(ngx.status) - end - end - - function _M.balance() - local balancer = get_balancer() - if not balancer then - return - end - - local peer = balancer:balance() - if not peer then - ngx.log(ngx.WARN, "no peer was returned, balancer: " .. balancer.name) - return - end - - if peer:match(PROHIBITED_PEER_PATTERN) then - ngx.log(ngx.ERR, "attempted to proxy to self, balancer: ", balancer.name, ", peer: ", peer) - return - end - - ngx_balancer.set_more_tries(1) - if (ngx.var.service_name == {{ default "documentserver" .Values.service.existing | quote }} and (ngx.var.arg_WOPISrc or ngx.var.arg_shardkey)) then - return peer - else - local ok, err = ngx_balancer.set_current_peer(peer) - if not ok then - ngx.log(ngx.ERR, "error while setting current upstream peer ", peer, - ": ", err) - end - end - end - - function _M.log() - local balancer = get_balancer() - if not balancer then - return - end - - if not balancer.after_balance then - return - end - - balancer:after_balance() - end - - setmetatable(_M, {__index = { - get_implementation = get_implementation, - sync_backend = sync_backend, - route_to_alternative_balancer = route_to_alternative_balancer, - get_balancer = get_balancer, - get_balancer_by_upstream_name = get_balancer_by_upstream_name, - }}) - - return _M -{{- end }} +{{ (tpl (.Files.Glob "sources/scripts/balancer-lua.conf").AsConfig . ) | indent 2 }} diff --git a/templates/configmaps/balancer-snippet.yaml b/templates/configmaps/balancer-snippet.yaml deleted file mode 100644 index 1e27b90..0000000 --- a/templates/configmaps/balancer-snippet.yaml +++ /dev/null @@ -1,232 +0,0 @@ -{{- if index .Values "ingress-nginx" "enabled" }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: balancer-snippet - namespace: {{ .Values.documentserver.ingressCustomConfigMapsNamespace }} - {{- if .Values.commonLabels }} - labels: - {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} - {{- end }} - annotations: - helm.sh/resource-policy: keep - helm.sh/hook: pre-install - helm.sh/hook-weight: "1" - {{- if .Values.commonAnnotations }} - {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - custom_balancer.conf: | - access_by_lua_block { - if ngx.var.service_name == {{ default "documentserver" .Values.service.existing | quote }} then - local WOPISrc = ngx.var.arg_WOPISrc - local shardkey = ngx.var.arg_shardkey - local service_name = ngx.var.service_name - local redis = require "resty.redis" - local red = redis:new() - local cjson = require("cjson.safe") - - local function redis_del(key) - local ok, err = red:del(string.format("%s", key)) - if not ok then - ngx.say("failed to del: ", err) - return - end - end - - local function redis_get(wopi) - local user_data_response = red:get(string.format('%s', wopi)) - return user_data_response - end - - local function redis_set(wopi, endpoint) - local response = red:setnx(string.format('%s', wopi), endpoint) - if response == 1 then - print(string.format("DEBUG: --> New api key %s was set in Redis", wopi)) - return true - end - end - - local function redis_set_ipkey(wopi, endpoint) - -- add spaces before add value in key - -- keys should be like " wopi1 wopi2 wopi3" - -- if dont make the space it will be "wopi1wopi2wopi3" - - local wopi_final = (string.format(" %s", wopi)) - local ok, err = red:append(string.format('%s', endpoint), wopi_final) - if not ok then - ngx.say("failed to set: ",err) - return - end - end - - local function redis_expire(wopi, expire) - local ok, err = red:expire(string.format('%s', wopi), expire) - if not ok then - ngx.say("failed to set ttl: ",err) - return - end - end - - local function get_endpoints(backends, upstream) - for _, new_backend in ipairs(backends) do - if new_backend.name == upstream then - local new_endpoints=(new_backend.endpoints) - return new_endpoints - end - end - end - - local function table_contains(tbl, p, x) - local found = false - for _, v in pairs(tbl) do - local endpoint_string = (string.format("%s:%s", v, p)) - if endpoint_string == x then - local found = true - return found - end - end - return found - end - - local function check_endpoint(endpoint) - local configuration = require("configuration") - local docs_upstream = ngx.var.proxy_upstream_name - local service_port = ngx.var.service_port - local backends_data = configuration.get_backends_data() - local backends = cjson.decode(backends_data) - local endpoints = get_endpoints(backends, docs_upstream) - local endpoints_table = {} - - for _, endpoint in ipairs(endpoints) do - table.insert(endpoints_table, endpoint.address) - end - print(cjson.encode(endpoints_table)) - - local result = table_contains(endpoints_table, service_port, endpoint) - - return result - end - - local function get_docs_mode(wopi) - if string.match(wopi, "http://") or string.match(wopi, "https://") then - return "wopi" - else - return "api" - end - end - - local function get_api_arg() - if WOPISrc then - return WOPISrc - end - if shardkey then - return shardkey - end - end - - local function handle_api_key(arg) - if shardkey then - return shardkey - end - if WOPISrc then - local WOPIDecoded = (ngx.unescape_uri(arg)) - local WOPIkey = WOPIDecoded:gsub("%s+", "") - return WOPIkey - end - end - - local API_ARG = get_api_arg() - - if API_ARG then - local API_KEY = handle_api_key(API_ARG) - red:set_timeouts(1000, 1000, 1000) -- 1 sec - local ok, err = red:connect({{ .Values.connections.redisHost | quote }}, {{ .Values.connections.redisPort }}) - if not ok then - ngx.say("1: failed to connect: ",err) - return - end - - {{- if eq .Values.connections.redisNoPass false }} - local res, err = red:auth({{ include "ds.redis.pass" . | quote }}) - if not res then - ngx.say("failed to authenticate: ", err) - return - end - {{- end }} - - {{- if ne .Values.documentserver.keysRedisDBNum "0" }} - red:select({{ .Values.documentserver.keysRedisDBNum }}) - {{- end }} - - local exist_endpoint = tostring(redis_get(API_KEY)) - print(exist_endpoint) - if exist_endpoint == 'userdata: NULL' then - local new_custom_endpoint = balancer.balance() - if redis_set(API_KEY, new_custom_endpoint) then - redis_set_ipkey(API_KEY, new_custom_endpoint) - redis_expire(API_KEY, {{ .Values.documentserver.keysExpireTime }}) - ngx.var.custom_endpoint = new_custom_endpoint - else - print("DEBUG: --> Looks like parallel request was made, get endpoint from Redis") - ngx.var.custom_endpoint = tostring(redis_get(API_KEY)) - end - else - local endpoint_found = check_endpoint(exist_endpoint) - print(endpoint_found) - if endpoint_found == false then - print(string.format("ENDPOINT WILL BE REMOVE:%s", exist_endpoint)) - local placeholder = tostring(red:get(string.format("del_%s", exist_endpoint))) - if placeholder == 'userdata: NULL' then - local default_expire = {{ .Values.documentserver.terminationGracePeriodSeconds }} - local placeholder_expire = default_expire + 10 - local set_placeholder = red:set(string.format("del_%s", exist_endpoint), "placeholder") - local set_placeholder_expire = red:expire(string.format("del_%s", exist_endpoint), placeholder_expire) - local keys = tostring(redis_get(exist_endpoint)) - red:init_pipeline() - for i in string.gmatch(keys, "%S+") do - red:expire(string.format('%s', i), default_expire) - end - local results, err = red:commit_pipeline() - if not results then - ngx.say("failed to commit the pipelined requests: ", err) - end - -- Set expire for endpoint key that consist all wopi that also will be removed after expire - local set_endpoint_expire = redis_expire(exist_endpoint, default_expire) - local set_wopi_expire = redis_expire(API_KEY, default_expire) - print("DEBUG: --> Keys remove process is started, send request to upstream") - ngx.var.custom_endpoint = exist_endpoint - else - print("DEBUG: --> Process that reshard keys already exist, send request to upstream") - ngx.var.custom_endpoint = exist_endpoint - end - else - print("DEGUB: --> Endpoint exist, just go forward...") - ngx.var.custom_endpoint = exist_endpoint - if WOPISrc then - redis_expire(API_KEY, {{ .Values.documentserver.keysExpireTime }}) - end - end - end - end - print(ngx.var.custom_endpoint) - red:close() - end - } - - if ($service_name = {{ default "documentserver" .Values.service.existing }}) { - set $docs_shardkey $arg_shardkey; - } - - if ($service_name = {{ default "documentserver" .Values.service.existing }}) { - set $docs_wopisrc $arg_WOPISrc; - } - - if ($docs_shardkey) { - proxy_pass http://$custom_endpoint; - } - - if ($docs_wopisrc) { - proxy_pass http://$custom_endpoint; - } -{{- end }} diff --git a/templates/configmaps/docs-balancer.yaml b/templates/configmaps/docs-balancer.yaml new file mode 100644 index 0000000..b888814 --- /dev/null +++ b/templates/configmaps/docs-balancer.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: docs-balancer + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + helm.sh/hook: pre-install, pre-upgrade + helm.sh/hook-weight: "-1" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + DS_EP_NAME: "documentserver" + BALANCER_WORKER_CONNECTIONS: {{ .Values.customBalancer.workerConnections | quote }} + BALANCER_WORKER_PROCESSES: {{ .Values.customBalancer.workerProcesses | quote }} + DS_POD_LABEL: "app=documentserver" + SHARD_PORT: {{ .Values.documentserver.proxy.containerPorts.http | quote }} diff --git a/templates/configmaps/pre-stop.yaml b/templates/configmaps/pre-stop.yaml new file mode 100644 index 0000000..90177da --- /dev/null +++ b/templates/configmaps/pre-stop.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: pre-stop + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + pre_stop.sh: |- + #!/bin/sh + until cat /scripts/results/status.txt + do + echo "waiting for the build to complete" + sleep 5 + done diff --git a/templates/deployments/docs-balancer.yaml b/templates/deployments/docs-balancer.yaml new file mode 100644 index 0000000..0563e59 --- /dev/null +++ b/templates/deployments/docs-balancer.yaml @@ -0,0 +1,119 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: docs-balancer + namespace: {{ include "ds.namespace" . | quote }} + labels: + app: docs-balancer + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.customBalancer.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.customBalancer.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.customBalancer.autoscaling.enabled }} + replicas: {{ .Values.customBalancer.replicas }} + {{- end }} + selector: + matchLabels: + app: docs-balancer + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 6 }} + {{- end }} + {{- if .Values.customBalancer.updateStrategy }} + strategy: {{- include "ds.update.strategyType" .Values.customBalancer.updateStrategy }} + {{- end }} + template: + metadata: + labels: + app: docs-balancer + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 8 }} + {{- end }} + {{- if .Values.customBalancer.podAnnotations }} + annotations: + {{- range $key, $value := .Values.customBalancer.podAnnotations }} + {{ $key }}: {{ tpl $value $ }} + {{- end }} + {{- end }} + spec: + serviceAccountName: ds-balancer-sa + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + affinity: + podAntiAffinity: + {{- if eq .Values.podAntiAffinity.type "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - docs-balancer + topologyKey: {{ .Values.podAntiAffinity.topologyKey }} + weight: {{ .Values.podAntiAffinity.weight }} + {{- else if eq .Values.podAntiAffinity.type "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - docs-balancer + topologyKey: {{ .Values.podAntiAffinity.topologyKey }} + {{- end }} + {{- with .Values.customBalancer.customPodAntiAffinity }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.customBalancer.podAffinity }} + podAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.customBalancer.nodeAffinity }} + nodeAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if or .Values.nodeSelector .Values.customBalancer.nodeSelector }} + nodeSelector: {{ toYaml (default .Values.nodeSelector .Values.customBalancer.nodeSelector) | nindent 8 }} + {{- end }} + {{- if or .Values.tolerations .Values.customBalancer.tolerations }} + tolerations: {{ toYaml (default .Values.tolerations .Values.customBalancer.tolerations) | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.customBalancer.terminationGracePeriodSeconds }} + containers: + - name: docs-balancer + image: {{ .Values.customBalancer.image.repository }}:{{ .Values.customBalancer.image.tag }} + imagePullPolicy: {{ .Values.customBalancer.image.pullPolicy }} + {{- if .Values.customBalancer.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.customBalancer.containerSecurityContext.enabled "enabled" | toYaml | nindent 12 }} + {{- end }} + ports: + - containerPort: {{ .Values.customBalancer.containerPorts.http }} + resources: {{ toYaml .Values.customBalancer.resources | nindent 12 }} + {{- if .Values.customBalancer.startupProbe.enabled }} + startupProbe: {{- omit .Values.customBalancer.startupProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.customBalancer.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.customBalancer.readinessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.customBalancer.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.customBalancer.livenessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: docs-balancer + volumeMounts: + - name: balancer-lua-config + mountPath: /etc/nginx/mnt_config + volumes: + - name: balancer-lua-config + configMap: + name: balancer-lua-config diff --git a/templates/deployments/documentserver.yaml b/templates/deployments/documentserver.yaml index 045734d..1f38655 100644 --- a/templates/deployments/documentserver.yaml +++ b/templates/deployments/documentserver.yaml @@ -114,6 +114,8 @@ spec: fieldPath: status.podIP - name: SHARD_PORT value: {{ .Values.documentserver.proxy.containerPorts.http | quote }} + - name: APP_VERSION + value: {{ regexFind "[0-9.]+" .Values.documentserver.docservice.image.tag | quote }} envFrom: - configMapRef: name: documentserver @@ -135,7 +137,7 @@ spec: lifecycle: preStop: exec: - command: ["/bin/sh", "-c", "sleep {{ .Values.documentserver.terminationGracePeriodSeconds }}"] + command: ["/bin/sh", "-c", "/scripts/pre_stop.sh"] ports: - containerPort: {{ .Values.documentserver.proxy.containerPorts.http }} {{- if .Values.documentserver.proxy.startupProbe.enabled }} @@ -148,7 +150,6 @@ spec: livenessProbe: {{- omit .Values.documentserver.proxy.livenessProbe "enabled" | toYaml | nindent 12 }} {{- end }} resources: {{ toYaml .Values.documentserver.proxy.resources | nindent 12 }} - {{- if or .Values.documentserver.proxy.infoAllowedIP .Values.documentserver.proxy.infoAllowedUser }} env: - name: DEFAULT_SHARD_KEY valueFrom: @@ -167,7 +168,6 @@ spec: name: {{ template "ds.info.secretName" . }} key: {{ .Values.documentserver.proxy.infoAllowedSecretKeyName }} {{- end }} - {{- end }} envFrom: - configMapRef: name: documentserver @@ -176,6 +176,11 @@ spec: mountPath: /var/lib/{{ .Values.product.name }}/documentserver/App_Data/cache/files/{{ .Values.documentserver.docservice.image.tag }} - name: ds-service-files mountPath: /var/lib/{{ .Values.product.name }}/documentserver/App_Data/cache/files + - name: pre-stop-scripts + mountPath: /scripts/pre_stop.sh + subPath: pre_stop.sh + - name: pre-stop-status + mountPath: /scripts/results {{- if .Values.extraThemes.configMap }} - name: custom-themes mountPath: /var/www/{{ .Values.product.name }}/documentserver/web-apps/apps/common/main/resources/themes/{{ .Values.extraThemes.filename }} @@ -220,13 +225,19 @@ spec: fieldPath: status.podIP - name: SHARD_PORT value: {{ .Values.documentserver.proxy.containerPorts.http | quote }} + - name: TERMINATION_GRACE_PERIOD + value: {{ .Values.documentserver.terminationGracePeriodSeconds | quote }} - name: REDIS_SERVER_DB_KEYS_NUM value: {{ .Values.documentserver.keysRedisDBNum | quote }} + - name: REDIS_SERVER_DB_DS_VERSION + value: {{ .Values.documentserver.dsVersionHashRedisDBNum | quote }} - name: REDIS_SERVER_PWD valueFrom: secretKeyRef: name: {{ template "ds.redis.secretName" . }} key: {{ .Values.connections.redisSecretKeyName }} + - name: TERMINATION_GRACE_TIME + value: {{ .Values.documentserver.terminationGraceTimeSeconds | quote }} envFrom: - secretRef: name: {{ template "ds.jwt.secretName" . }} @@ -245,6 +256,8 @@ spec: {{- if not .Values.license.existingClaim }} readOnly: true {{- end }} + - name: pre-stop-status + mountPath: /scripts/results {{- if .Values.extraConf.configMap }} - name: custom-file mountPath: /etc/{{ .Values.product.name }}/documentserver/{{ .Values.extraConf.filename }} @@ -272,7 +285,7 @@ spec: lifecycle: preStop: exec: - command: ["/bin/sh", "-c", "sleep {{ $context.Values.documentserver.terminationGracePeriodSeconds }}"] + command: ["/bin/sh", "-c", "/scripts/pre_stop.sh"] env: - name: DEFAULT_SHARD_KEY valueFrom: @@ -301,6 +314,11 @@ spec: {{- if not $context.Values.license.existingClaim }} readOnly: true {{- end }} + - name: pre-stop-scripts + mountPath: /scripts/pre_stop.sh + subPath: pre_stop.sh + - name: pre-stop-status + mountPath: /scripts/results {{- if $context.Values.extraConf.configMap }} - name: custom-file mountPath: /etc/{{ $context.Values.product.name }}/documentserver/{{ $context.Values.extraConf.filename }} @@ -323,7 +341,7 @@ spec: lifecycle: preStop: exec: - command: ["/bin/sh", "-c", "sleep {{ .Values.documentserver.terminationGracePeriodSeconds }}"] + command: ["/bin/sh", "-c", "/scripts/pre_stop.sh"] ports: - containerPort: {{ .Values.documentserver.postgresql.containerPorts.tcp }} env: @@ -343,6 +361,11 @@ spec: - name: createdb mountPath: /docker-entrypoint-initdb.d/createdb.sql subPath: createdb.sql + - name: pre-stop-scripts + mountPath: /scripts/pre_stop.sh + subPath: pre_stop.sh + - name: pre-stop-status + mountPath: /scripts/results - name: rabbitmq image: {{ .Values.documentserver.rabbitmq.image.repository }}:{{ .Values.documentserver.rabbitmq.image.tag }} @@ -354,12 +377,20 @@ spec: lifecycle: preStop: exec: - command: ["/bin/sh", "-c", "sleep {{ .Values.documentserver.terminationGracePeriodSeconds }}"] + command: ["/bin/sh", "-c", "/scripts/pre_stop.sh"] ports: - containerPort: {{ .Values.documentserver.rabbitmq.containerPorts.amqp }} + volumeMounts: + - name: pre-stop-scripts + mountPath: /scripts/pre_stop.sh + subPath: pre_stop.sh + - name: pre-stop-status + mountPath: /scripts/results volumes: - name: ds-files emptyDir: {} + - name: pre-stop-status + emptyDir: {} - name: ds-service-files persistentVolumeClaim: claimName: {{ template "ds.pvc.name" . }} @@ -400,3 +431,7 @@ spec: configMap: name: remove-shardkey defaultMode: 0755 + - name: pre-stop-scripts + configMap: + name: pre-stop + defaultMode: 0755 diff --git a/templates/hpa/docs-balancer.yaml b/templates/hpa/docs-balancer.yaml new file mode 100644 index 0000000..b24dd56 --- /dev/null +++ b/templates/hpa/docs-balancer.yaml @@ -0,0 +1,46 @@ +{{- if .Values.customBalancer.autoscaling.enabled }} +apiVersion: {{ .Capabilities.APIVersions.Has "autoscaling/v2" | ternary "autoscaling/v2" "autoscaling/v2beta2" }} +kind: HorizontalPodAutoscaler +metadata: + name: balancer-hpa + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.customBalancer.autoscaling.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.customBalancer.autoscaling.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: docs-balancer + minReplicas: {{ .Values.customBalancer.autoscaling.minReplicas }} + maxReplicas: {{ .Values.customBalancer.autoscaling.maxReplicas }} + metrics: + {{- if .Values.customBalancer.autoscaling.targetCPU.enabled }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.customBalancer.autoscaling.targetCPU.utilizationPercentage }} + {{- end }} + {{- if .Values.customBalancer.autoscaling.targetMemory.enabled }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.customBalancer.autoscaling.targetMemory.utilizationPercentage }} + {{- end }} + {{- with .Values.customBalancer.autoscaling.customMetricsType }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.customBalancer.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/templates/ingresses/documentserver.yaml b/templates/ingresses/documentserver.yaml index b779294..e235c0b 100644 --- a/templates/ingresses/documentserver.yaml +++ b/templates/ingresses/documentserver.yaml @@ -9,9 +9,6 @@ metadata: {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} {{- end }} annotations: - nginx.ingress.kubernetes.io/configuration-snippet: | - set $custom_endpoint ''; - include /etc/nginx/custom_balancer.conf; {{- if or .Values.commonAnnotations .Values.ingress.annotations }} {{- $dictValue := default .Values.commonAnnotations .Values.ingress.annotations }} {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} @@ -44,5 +41,5 @@ spec: service: name: {{ template "ds.svc.name" . }} port: - number: {{ .Values.service.port }} + number: {{ .Values.customBalancer.service.port }} {{- end }} diff --git a/templates/serviceaccount/docs-balancer.yaml b/templates/serviceaccount/docs-balancer.yaml new file mode 100644 index 0000000..11c8178 --- /dev/null +++ b/templates/serviceaccount/docs-balancer.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ds-balancer-sa + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "1" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: true diff --git a/templates/services/docs-balancer.yaml b/templates/services/docs-balancer.yaml new file mode 100644 index 0000000..375bafe --- /dev/null +++ b/templates/services/docs-balancer.yaml @@ -0,0 +1,36 @@ +{{- if eq (include "balancer.svc.create" .) "true" }} +kind: Service +apiVersion: v1 +metadata: + name: docs-balancer + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.customBalancer.service.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.customBalancer.service.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + app: docs-balancer + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + ports: + - name: http + protocol: TCP + port: {{ .Values.customBalancer.service.port }} + targetPort: {{ .Values.customBalancer.containerPorts.http }} + type: {{ .Values.customBalancer.service.type }} + {{- if .Values.customBalancer.sessionAffinity }} + sessionAffinity: {{ .Values.customBalancer.service.sessionAffinity }} + {{- if .Values.customBalancer.service.sessionAffinityConfig }} + {{- with .Values.customBalancer.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/templates/services/documentserver.yaml b/templates/services/documentserver.yaml index ec793ba..710a1b8 100644 --- a/templates/services/documentserver.yaml +++ b/templates/services/documentserver.yaml @@ -1,4 +1,3 @@ -{{- if eq (include "ds.svc.create" .) "true" }} kind: Service apiVersion: v1 metadata: @@ -33,4 +32,3 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} diff --git a/values.yaml b/values.yaml index d0a5cb0..1fa5242 100644 --- a/values.yaml +++ b/values.yaml @@ -1,29 +1,3 @@ -## Dependencies charts parameters - -## ingress-nginx.enabled parameter for manage ingress controller subchart condition -ingress-nginx: - enabled: true - namespaceOverride: default - controller: - replicaCount: 2 - allowSnippetAnnotations: true - extraVolumeMounts: - - name: custom-balancer - mountPath: /etc/nginx/custom_balancer.conf - subPath: custom_balancer.conf - - name: balancer-lua - mountPath: /etc/nginx/lua/balancer.lua - subPath: balancer.lua - extraVolumes: - - name: custom-balancer - configMap: - name: balancer-snippet - - name: balancer-lua - configMap: - name: balancer-lua - service: - annotations: {} - ## Default values for Onlyoffice Docs ## product.name Specifies name of the product @@ -123,11 +97,12 @@ commonAnnotations: {} ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ serviceAccount: ## serviceAccount.create Enable ServiceAccount creation - create: false + ## If you want to use your ServiceAccount, then make sure that the role it is binding with has the verb `patch` for the resource `pods` + create: true ## serviceAccount.name Name of the ServiceAccount to be used ## If not set and `serviceAccount.create` is `true` the name will be taken from .Release.Name ## If not set and `serviceAccount.create` is `false` the name will be "default" - name: "" + name: "ds-docs-sa" ## serviceAccount.annotations Map of annotations to add to the ServiceAccount ## If set to, it takes priority over the `commonAnnotations` ## You can also use `tpl` as the value for the key @@ -136,7 +111,7 @@ serviceAccount: ## Used only if `serviceAccount.create` is `true` ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#opt-out-of-api-credential-automounting automountServiceAccountToken: true -## Persistence parameters for forgotten and error files +## Persistence parameters for forgotten and error files. Also define parameters for PVC which is used as storage for caching static controller requests persistence: ## persistence.existingClaim Name of an existing PVC to use ## If not specified, a PVC named "ds-service-files" will be created @@ -178,9 +153,6 @@ tolerations: [] imagePullSecrets: "" ## Onlyoffice Docs service parameters service: - ## service.existing The name of an existing service for ONLYOFFICE Docs. If not set, a service named `documentserver` will be created - ## ref: https://github.com/ONLYOFFICE/Kubernetes-Docs/blob/master/templates/services/documentserver.yaml - existing: "" ## service.annotations Map of annotations to add to the ONLYOFFICE Docs service ## If set to, it takes priority over the `commonAnnotations` ## You can also use `tpl` as the value for the key @@ -309,14 +281,14 @@ requestFilteringAgent: ## documentserver: ## documentserver.terminationGracePeriodSeconds The time to terminate gracefully during which the Pod will have the Terminating status - terminationGracePeriodSeconds: 60 + terminationGracePeriodSeconds: 10800 + ## documentserver.terminationGraceTimeSeconds The time to terminate gracefully in seconds, which remains for turning off the shard and assembling documents open on it until the termination grace period is fully completed + ## Cannot be less than `documentserver.terminationGracePeriodSeconds` + terminationGraceTimeSeconds: 600 ## documentserver.keysRedisDBNum The number of the database for storing the balancing results keysRedisDBNum: "1" ## documentserver.KeysExpireTime The time in seconds after which the key will be deleted from the balancing database. 172800 mean 48 hours keysExpireTime: 172800 - ## documentserver.ingressCustomConfigMapsNamespace define where custom controller configmaps will be deployed - ## Should be the same ns where controller is deployed - ingressCustomConfigMapsNamespace: default ## documentserver.annotations Defines annotations that will be additionally added to Documentserver Deployment ## If set to, it takes priority over the `commonAnnotations` ## You can also use `tpl` as the value for the key @@ -457,7 +429,7 @@ documentserver: ## documentserver.initContainers.image.repository Documentserver add-shardkey initContainer image repository repository: onlyoffice/docs-utils ## documentserver.initContainers.image.tag Documentserver add-shardkey initContainer image tag - tag: 8.1.3-1 + tag: 8.2.0-2 ## documentserver.initContainers.image.pullPolicy Documentserver add-shardkey initContainer image pull policy pullPolicy: IfNotPresent ## Configure a Security Context for Documentserver add-shardkey initContainer container in Pod @@ -511,7 +483,7 @@ documentserver: ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters repository: onlyoffice/docs-docservice-de ## documentserver.docservice.image.tag docservice container image tag - tag: 8.1.3-1 + tag: 8.2.0-2 ## documentserver.docservice.image.pullPolicy docservice container image pull policy pullPolicy: IfNotPresent ## Configure a Security Context for the Docservice container @@ -650,7 +622,7 @@ documentserver: ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters repository: onlyoffice/docs-proxy-de ## documentserver.proxy.image.tag proxy container image tag - tag: 8.1.3-1 + tag: 8.2.0-2 ## documentserver.proxy.image.pullPolicy proxy container image pull policy pullPolicy: IfNotPresent ## Configure a Security Context for the Proxy container @@ -745,7 +717,7 @@ documentserver: ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters repository: onlyoffice/docs-converter-de ## documentserver.converter.image.tag converter container image tag - tag: 8.1.3-1 + tag: 8.2.0-2 ## documentserver.converter.image.pullPolicy converter container image pull policy pullPolicy: IfNotPresent ## Configure a Security Context for the Converter container @@ -953,7 +925,7 @@ example: ## example.image.repository example container image name repository: onlyoffice/docs-example ## example.image.tag example container image tag - tag: 8.1.3-1 + tag: 8.2.0-2 ## example.image.pullPolicy example container image pull policy pullPolicy: IfNotPresent ## Configure a Security Context for the Example container @@ -1009,11 +981,260 @@ example: ## Must be the same as the `key` name in `example.extraConf.ConfigMap` filename: local.json +## customBalancer settings. After 8.2.0 is used as default balancer instead of ingress-nginx +customBalancer: + ## customBalancer.workerConnections set worker connections count for balancer container + workerConnections: 16384 + ## customBalancer.workerProcesses set worker processes count for balancer container + workerProcesses: 1 + ## customBalancer.replicas Number of balancer replicas to deploy + ## If the `customBalancer.autoscaling.enabled` parameter is enabled, it is ignored + replicas: 3 + ## customBalancer.updateStrategy used to replace old Pods by new ones + updateStrategy: + ## customBalancer.updateStrategy.type balancer deployment update strategy type + ## Allowed values: `RollingUpdate` or `Recreate` + ## It is recommended to use the `RollingUpdate` type + type: RollingUpdate + # customBalancer.updateStrategy.rollingUpdate Used only when `customBalancer.updateStrategy.type=RollingUpdate` + rollingUpdate: + # customBalancer.updateStrategy.rollingUpdate.maxUnavailable Maximum number of Balancer Pods unavailable during the update process + maxUnavailable: 25% + # customBalancer.updateStrategy.rollingUpdate.maxSurge Maximum number of Balancer Pods created over the desired number of Pods + maxSurge: 25% + ## customBalancer.terminationGracePeriodSeconds The time to terminate gracefully during which the balancer Pod will have the Terminating status + terminationGracePeriodSeconds: 30 + ## customBalancer.annotations Defines annotations that will be additionally added to Balancer Deployment + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## customBalancer.podAnnotations Map of annotations to add to the Balancer deployment Pod + podAnnotations: + rollme: "{{ randAlphaNum 5 | quote }}" + ## customBalancer.customPodAntiAffinity Prohibiting the scheduling of balancer Pods relative to other Pods containing the specified labels on the same node + ## Example: + ## customPodAntiAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## - labelSelector: + ## matchExpressions: + ## - key: app + ## operator: In + ## values: + ## - example + ## topologyKey: kubernetes.io/hostname + customPodAntiAffinity: {} + ## Pod affinity rules for balancer Pods scheduling by nodes relative to other Pods + ## Pod affinity allow you to constrain which nodes balancer Pods can be scheduled on based on the labels of Pods already running on that node + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Example: + ## podAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## - labelSelector: + ## matchExpressions: + ## - key: app + ## operator: In + ## values: + ## - store + ## topologyKey: topology.kubernetes.io/zone + ## + podAffinity: {} + ## Node affinity rules for balancer Pods scheduling by nodes + ## Node affinity allow you to constrain which nodes balancer Pod can be scheduled on based on node labels + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Example: + ## nodeAffinity: + ## preferredDuringSchedulingIgnoredDuringExecution: + ## - weight: 100 + ## preference: + ## matchExpressions: + ## - key: kubernetes.io/name + ## operator: In + ## values: + ## - name1 + ## - name2 + ## + nodeAffinity: {} + ## customBalancer.nodeSelector Node labels for balancer Pods assignment + ## If set to, it takes priority over the `nodeSelector` + nodeSelector: {} + ## customBalancer.tolerations Tolerations for balancer Pods assignment + ## If set to, it takes priority over the `tolerations` + tolerations: [] + ## customBalancer.autoscaling.enabled Enable or disable autoscaling for balancer replicas + autoscaling: + enabled: false + ## customBalancer.autoscaling.annotations Defines annotations that will be additionally added to balancer deployment HPA + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## customBalancer.autoscaling.minReplicas balancer deployment autoscaling minimum number of replicas + minReplicas: 2 + ## customBalancer.autoscaling.maxReplicas balancer deployment autoscaling maximum number of replicas + maxReplicas: 4 + targetCPU: + ## customBalancer.autoscaling.targetCPU.enabled Enable autoscaling of balancer deployment by CPU usage percentage + enabled: true + ## customBalancer.autoscaling.targetCPU.utilizationPercentage balancer deployment autoscaling target CPU percentage + utilizationPercentage: 70 + targetMemory: + ## customBalancer.autoscaling.targetMemory.enabled Enable autoscaling of balancer deployment by memory usage percentage + enabled: false + ## customBalancer.autoscaling.targetMemory.utilizationPercentage balancer deployment autoscaling target memory percentage + utilizationPercentage: 70 + ## customBalancer.autoscaling.customMetricsType Custom, additional or external autoscaling metrics for the balancer deployment + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics + ## Example: + ## customMetricsType: + ## - type: Object + ## object: + ## metric: + ## name: requests-per-second + ## describedObject: + ## apiVersion: networking.k8s.io/v1 + ## kind: Ingress + ## name: main-route + ## target: + ## type: Value + ## value: 2k + customMetricsType: [] + ## customBalancer.autoscaling.behavior Configuring Balancer deployment scaling behavior policies for the `scaleDown` and `scaleUp` fields + ## If not set the default values are used: + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#default-behavior + ## Example: + ## behavior: + ## scaleDown: + ## stabilizationWindowSeconds: 300 + ## policies: + ## - type: Pods + ## value: 4 + ## periodSeconds: 60 + ## scaleUp: + ## stabilizationWindowSeconds: 0 + ## policies: + ## - type: Percent + ## value: 70 + ## periodSeconds: 15 + ## selectPolicy: Max + behavior: {} + ## Probe used for the balancer container: startup, readiness and liveness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## The parameters below for startup probes are used only when `customBalancer.startupProbe.enabled=true` + startupProbe: + ## customBalancer.startupProbe.enabled Enable startupProbe for balancer container + enabled: true + httpGet: + ## customBalancer.startupProbe.httpGet.path Checking the path for startupProbe + path: /balancer-healthcheck + ## customBalancer.startupProbe.httpGet.port Checking the port for startupProbe + port: 80 + ## customBalancer.startupProbe.failureThreshold Failure threshold for startupProbe + failureThreshold: 30 + ## customBalancer.startupProbe.periodSeconds Period seconds for startupProbe + periodSeconds: 10 + ## The parameters below for readiness probes are used only when `customBalancer.readinessProbe.enabled=true` + readinessProbe: + ## customBalancer.readinessProbe.enabled Enable readinessProbe for balacer container + enabled: true + ## customBalancer.readinessProbe.failureThreshold Failure threshold for readinessProbe + failureThreshold: 2 + httpGet: + ## customBalancer.readinessProbe.httpGet.path Checking the path for readinessProbe + path: /balancer-healthcheck + ## customBalancer.readinessProbe.httpGet.port Checking the port for readinessProbe + port: 80 + ## customBalancer.readinessProbe.periodSeconds Period seconds for readinessProbe + periodSeconds: 10 + ## customBalancer.readinessProbe.successThreshold Success threshold for readinessProbe + successThreshold: 1 + ## customBalancer.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + timeoutSeconds: 3 + ## The parameters below for liveness probes are used only when `customBalancer.livenessProbe.enabled=true` + livenessProbe: + ## customBalancer.livenessProbe.enabled Enable livenessProbe for balancer container + enabled: true + ## customBalancer.livenessProbe.failureThreshold Failure threshold for livenessProbe + failureThreshold: 3 + httpGet: + ## customBalancer.livenessProbe.httpGet.path Checking the path for livenessProbe + path: /balancer-healthcheck + ## customBalancer.livenessProbe.httpGet.port Checking the port for livenessProbe + port: 80 + ## customBalancer.livenessProbe.periodSeconds Period seconds for livenessProbe + periodSeconds: 10 + ## customBalancer.livenessProbe.successThreshold Success threshold for livenessProbe + successThreshold: 1 + ## customBalancer.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + timeoutSeconds: 3 + ## balancer container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## customBalancer.resources.requests The requested resources for the balancer container + ## customBalancer.resources.limits The resources limits for the balancer container + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "4000m" + ## Configure a Security Context for the Balancer container + containerSecurityContext: + ## customBalancer.containerSecurityContext.enabled Enable security context for the Balancer container + enabled: false + ## customBalancer.containerSecurityContext.runAsUser User ID for the Balancer container + runAsUser: 101 + ## customBalancer.containerSecurityContext.runAsGroup Group ID for the Balancer container + runAsGroup: 101 + ## customBalancer.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## customBalancer.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## customBalancer.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Balancer container + seccompProfile: + type: RuntimeDefault + ## customBalancer.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## customBalancer.image + image: + ## customBalancer.image.repository specify balancer image repository + repository: onlyoffice/docs-balancer + ## customBalancer.image.tag specify balancer image tag + tag: 8.2.0-2 + ## customBalancer.image.pullPolicy balancer image pull policy + pullPolicy: IfNotPresent + ## customBalancer.containerPorts balancer container port + containerPorts: + http: 80 + ## customBalancer.service specify parameters for balancer service + service: + ## customBalancer.annotations Map of annotations to add to the ONLYOFFICE Docs balancer service + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## customBalancer.service.existing The name of an existing service for balancer. If not set, a service named `docs-balancer` will be created + existing: "" + ## customBalancer.service.type balancer service type + type: ClusterIP + ## customBalancer.service.port balancer service port + port: "80" + ## customBalancer.service.sessionAffinity Session Affinity for ONLYOFFICE Docs balancer service + ## If not set, `None` will be set as the default value + ## ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + ## customBalancer.service.sessionAffinityConfig Configuration for ONLYOFFICE Docs balancer service Session Affinity + ## Used if the `customBalancer.service.sessionAffinity` is set + ## ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-stickiness-timeout + ## Example: + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 900 + sessionAffinityConfig: {} + ## Onlyoffice Docs ingress parameters ## ingress: ## ingress.enabled Enable the creation of an ingress for the ONLYOFFICE Docs - enabled: true + enabled: false ## ingress.annotations Map of annotations to add to the Ingress ## If set to, it takes priority over the `commonAnnotations` ## You can also use `tpl` as the value for the key @@ -1093,7 +1314,7 @@ grafanaDashboard: ## grafanaDashboard.job.image.repository Job by Grafana Dashboard ONLYOFFICE Docs image repository repository: onlyoffice/docs-utils ## grafanaDashboard.job.image.tag Job by Grafana Dashboard ONLYOFFICE Docs image tag - tag: 8.1.3-1 + tag: 8.2.0-2 ## grafanaDashboard.job.image.pullPolicy Job by Grafana Dashboard ONLYOFFICE Docs image pull policy pullPolicy: IfNotPresent ## Configure a Security Context for the Grafana Dashboard container @@ -1159,7 +1380,7 @@ tests: ## tests.image.repository test container image name repository: onlyoffice/docs-utils ## tests.image.tag test container image tag - tag: 8.1.3-1 + tag: 8.2.0-2 ## tests.image.pullPolicy test container image pull policy pullPolicy: IfNotPresent ## Configure a Security Context for the Test container