diff --git a/ansible/playbooks/roles/rook/tasks/main.yml b/ansible/playbooks/roles/rook/tasks/main.yml index 88501d8280..8ef3e7319a 100644 --- a/ansible/playbooks/roles/rook/tasks/main.yml +++ b/ansible/playbooks/roles/rook/tasks/main.yml @@ -21,48 +21,36 @@ - "{{ rook_helm_chart_file_name }}" - "{{ rook_helm_cluster_chart_file_name }}" - - name: Create custom configuration for operator Helm chart file (operator-custom-chart-values.yml) - when: specification.operator_chart_values is defined + - name: Fail when configuration for operator and cluster chart are not defined + fail: + msg: "You need to configure operator_chart_values and cluster_chart_values when rook is enabled" + when: + - specification.operator_chart_values is not defined or + specification.cluster_chart_values is not defined + + - name: Create configuration for operator Helm chart file (operator-custom-chart-values.yml) copy: content: "{{ specification.operator_chart_values }}" dest: "{{ download_directory }}/operator-custom-chart-values.yml" mode: preserve - - name: Create custom configuration for cluster Helm chart file (cluster-custom-chart-values.yml) - when: specification.cluster_chart_values is defined + - name: Create configuration for cluster Helm chart file (cluster-custom-chart-values.yml) copy: content: "{{ specification.cluster_chart_values }}" dest: "{{ download_directory }}/cluster-custom-chart-values.yml" mode: preserve - name: Install Rook operator using Helm chart with values from operator-custom-chart-values.yml - when: specification.operator_chart_values is defined command: | helm -n {{ specification.rook_namespace }} upgrade --install \ -f {{ download_directory }}/operator-custom-chart-values.yml \ {{ rook_helm_chart_name }} \ {{ download_directory }}/{{ rook_helm_chart_file_name }} --create-namespace - - name: Install Rook operator using Helm chart with default values - when: not specification.operator_chart_values is defined - command: | - helm -n {{ specification.rook_namespace }} upgrade --install \ - {{ rook_helm_chart_name }} \ - {{ download_directory }}/{{ rook_helm_chart_file_name }} --create-namespace - - name: Create Rook cluster with values from cluster-custom-chart-values.yml - when: specification.cluster_chart_values is defined command: | helm -n {{ specification.rook_namespace }} upgrade --install \ --set operatorNamespace={{ specification.rook_namespace }} \ -f {{ download_directory }}/cluster-custom-chart-values.yml \ {{ rook_helm_cluster_chart_name }} \ {{ download_directory }}/{{ rook_helm_cluster_chart_file_name }} --create-namespace - - - name: Create Rook cluster with default values - when: not specification.cluster_chart_values is defined - command: | - helm -n {{ specification.rook_namespace }} upgrade --install \ - --set operatorNamespace={{ specification.rook_namespace }} \ - {{ rook_helm_cluster_chart_name }} \ - {{ download_directory }}/{{ rook_helm_cluster_chart_file_name }} --create-namespace diff --git a/docs/home/howto/kubernetes/PERSISTENT_STORAGE.md b/docs/home/howto/kubernetes/PERSISTENT_STORAGE.md index 9754d17ead..f6aed34e51 100644 --- a/docs/home/howto/kubernetes/PERSISTENT_STORAGE.md +++ b/docs/home/howto/kubernetes/PERSISTENT_STORAGE.md @@ -104,7 +104,9 @@ specification: rook_namespace: your-rook-namespace ``` -The key `specification.enabled` must be set to true to install Rook/Ceph component. This will install Rook/Ceph with default values. To override default values provided by Rook you need to add to `configuration/rook` keys: +The key `specification.enabled` must be set to true to install Rook/Ceph component. Epiphany configuration file provides set of +parameters that are used for Rook/Ceph installation with default values. To override default values provided by Rook you need +to adjust `configuration/rook` keys: - `specification.operator_chart_values` - to override Rook Operator Helm Chart default values - `specification.cluster_chart_values` - to override Rook Cluster Helm Chart default values @@ -121,6 +123,7 @@ specification: ... ``` Values nested below the `operator_chart_values` and `cluster_chart_values` keys are respectively Helm Chart values for Rook Operator and Rook Ceph Cluster. +It is important to ensure that configuration of operator and chart values matches configuration of your cluster. More information about Helm Chart values may be found: - [Helm Operator](https://github.com/rook/rook/blob/master/Documentation/helm-operator.md) diff --git a/schema/common/defaults/configuration/rook.yml b/schema/common/defaults/configuration/rook.yml index f61ce6d3f9..7a322cf70e 100644 --- a/schema/common/defaults/configuration/rook.yml +++ b/schema/common/defaults/configuration/rook.yml @@ -3,5 +3,437 @@ kind: configuration/rook title: "Kubernetes Rook Config" name: default specification: - rook_namespace: rook-ceph - enabled: false + rook_namespace: rook-ceph + enabled: false + operator_chart_values: + image: + repository: rook/ceph + tag: VERSION + pullPolicy: IfNotPresent + crds: + enabled: true + resources: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + nodeSelector: {} + tolerations: [] + unreachableNodeTolerationSeconds: 5 + currentNamespaceOnly: false + annotations: {} + logLevel: INFO + rbacEnable: true + pspEnable: true + csi: + enableRbdDriver: true + enableCephfsDriver: true + enableGrpcMetrics: false + enableCephfsSnapshotter: true + enableRBDSnapshotter: true + enablePluginSelinuxHostMount : false + rbdFSGroupPolicy: "ReadWriteOnceWithFSType" + cephFSFSGroupPolicy: "ReadWriteOnceWithFSType" + enableOMAPGenerator: false + provisionerReplicas: 2 + allowUnsupportedVersion: false + csiRBDProvisionerResource: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-snapshotter + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-rbdplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + csiRBDPluginResource: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + - name : csi-rbdplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + csiCephFSProvisionerResource: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 200m + - name : csi-cephfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + csiCephFSPluginResource: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + - name : csi-cephfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + cpu: 500m + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 100m + forceCephFSKernelClient: true + volumeReplication: + enabled: false + csiAddons: + enabled: false + enableDiscoveryDaemon: false + cephCommandsTimeoutSeconds: "15" + enableSelinuxRelabeling: true + hostpathRequiresPrivileged: false + disableDeviceHotplug: false + discoverDaemonUdev: + enableOBCWatchOperatorNamespace: true + admissionController: + monitoring: + enabled: false + cluster_chart_values: + operatorNamespace: rook-ceph + toolbox: + enabled: false + image: rook/ceph:VERSION + tolerations: [] + affinity: {} + resources: + limits: + cpu: "500m" + memory: "1Gi" + requests: + cpu: "100m" + memory: "128Mi" + monitoring: + enabled: false + createPrometheusRules: false + rulesNamespaceOverride: + pspEnable: true + cephClusterSpec: + cephVersion: + image: quay.io/ceph/ceph:v16.2.7 + allowUnsupported: false + dataDirHostPath: /var/lib/rook + skipUpgradeChecks: false + continueUpgradeAfterChecksEvenIfNotHealthy: false + waitTimeoutForHealthyOSDInMinutes: 10 + mon: + count: 3 + allowMultiplePerNode: false + mgr: + count: 2 + allowMultiplePerNode: false + modules: + - name: pg_autoscaler + enabled: true + dashboard: + enabled: true + ssl: true + crashCollector: + disable: false + cleanupPolicy: + confirmation: "" + sanitizeDisks: + method: quick + dataSource: zero + iteration: 1 + allowUninstallWithVolumes: false + resources: + mgr: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + mon: + limits: + cpu: "2000m" + memory: "2Gi" + requests: + cpu: "1000m" + memory: "1Gi" + osd: + limits: + cpu: "2000m" + memory: "4Gi" + requests: + cpu: "1000m" + memory: "4Gi" + prepareosd: + limits: + cpu: "500m" + memory: "200Mi" + requests: + cpu: "500m" + memory: "50Mi" + mgr-sidecar: + limits: + cpu: "500m" + memory: "100Mi" + requests: + cpu: "100m" + memory: "40Mi" + crashcollector: + limits: + cpu: "500m" + memory: "60Mi" + requests: + cpu: "100m" + memory: "60Mi" + logcollector: + limits: + cpu: "500m" + memory: "1Gi" + requests: + cpu: "100m" + memory: "100Mi" + cleanup: + limits: + cpu: "500m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "100Mi" + removeOSDsIfOutAndSafeToRemove: false + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: true + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 + pgHealthCheckTimeout: 0 + manageMachineDisruptionBudgets: false + machineDisruptionBudgetNamespace: openshift-machine-api + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false + ingress: + dashboard: {} + cephBlockPools: + - name: ceph-blockpool + spec: + failureDomain: host + replicated: + size: 3 + storageClass: + enabled: true + name: ceph-block + isDefault: true + reclaimPolicy: Delete + allowVolumeExpansion: true + mountOptions: [] + parameters: + imageFormat: "2" + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + csi.storage.k8s.io/fstype: ext4 + cephFileSystems: + - name: ceph-filesystem + spec: + metadataPool: + replicated: + size: 3 + dataPools: + - failureDomain: host + replicated: + size: 3 + name: data0 + metadataServer: + activeCount: 1 + activeStandby: true + resources: + limits: + cpu: "2000m" + memory: "4Gi" + requests: + cpu: "1000m" + memory: "4Gi" + storageClass: + enabled: true + isDefault: false + name: ceph-filesystem + pool: data0 + reclaimPolicy: Delete + allowVolumeExpansion: true + mountOptions: [] + parameters: + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + csi.storage.k8s.io/fstype: ext4 + cephFileSystemVolumeSnapshotClass: + enabled: false + name: ceph-filesystem + isDefault: true + deletionPolicy: Delete + annotations: {} + labels: {} + parameters: {} + cephBlockPoolsVolumeSnapshotClass: + enabled: false + name: ceph-block + isDefault: false + deletionPolicy: Delete + annotations: {} + labels: {} + parameters: {} + cephObjectStores: + - name: ceph-objectstore + spec: + metadataPool: + failureDomain: host + replicated: + size: 3 + dataPool: + failureDomain: host + erasureCoded: + dataChunks: 2 + codingChunks: 1 + preservePoolsOnDelete: true + gateway: + port: 80 + resources: + limits: + cpu: "2000m" + memory: "2Gi" + requests: + cpu: "1000m" + memory: "1Gi" + instances: 1 + healthCheck: + bucket: + interval: 60s + storageClass: + enabled: true + name: ceph-bucket + reclaimPolicy: Delete + parameters: + region: us-east-1