diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 050af174e8..2f7b60ecab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: name: OPA fmt description: Formats Rego policy using opa fmt entry: opa fmt - args: [-w] + args: [ -w ] language: system files: (\.rego)$ @@ -13,7 +13,7 @@ repos: name: OPA check description: Check syntax of staged Rego files entry: opa check - args: [-S, "./bundle/compliance"] + args: [ -S, './bundle/compliance' ] pass_filenames: false language: system files: (\.rego)$ @@ -22,7 +22,7 @@ repos: name: OPA test description: Runs OPA unit tests on rego source files entry: opa test - args: [-b, "./bundle"] + args: [ -b, './bundle' ] pass_filenames: false language: system @@ -37,12 +37,10 @@ repos: rev: v0.32.2 hooks: - id: markdownlint - args: [ - "--disable", - MD013, # Line length can be ignored for now - MD033, # Allow inline HTML - MD046, # Allow code blocks to be fenced with backticks - MD041, # Allow multiple top level headers - "--", - ] + args: [ '--disable', + MD013, # Line length can be ignored for now + MD033, # Allow inline HTML + MD046, # Allow code blocks to be fenced with backticks + MD041, # Allow multiple top level headers + '--' ] files: \.(md|markdown)$ diff --git a/README.md b/README.md index fc5afede5e..3207d12490 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # Cloud Security Posture - Rego policies -[![CIS K8S](https://img.shields.io/badge/CIS-Kubernetes%20(73.6%25)-326CE5?logo=Kubernetes)](RULES.md#k8s-cis-benchmark) -[![CIS EKS](https://img.shields.io/badge/CIS-Amazon%20EKS%20(59.6%25)-FF9900?logo=Amazon+EKS)](RULES.md#eks-cis-benchmark) -[![CIS AWS](https://img.shields.io/badge/CIS-AWS%20(3.2%25)-232F3E?logo=Amazon+AWS)](RULES.md#aws-cis-benchmark) +[![CIS K8S](https://img.shields.io/badge/CIS-Kubernetes%20(74%25)-326CE5?logo=Kubernetes)](RULES.md#k8s-cis-benchmark) +[![CIS EKS](https://img.shields.io/badge/CIS-Amazon%20EKS%20(60%25)-FF9900?logo=Amazon+EKS)](RULES.md#eks-cis-benchmark) +[![CIS AWS](https://img.shields.io/badge/CIS-AWS%20(3%25)-232F3E?logo=Amazon+AWS)](RULES.md#aws-cis-benchmark) ![Coverage Badge](https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/oren-zohar/a7160df46e48dff45b24096de9302d38/raw/csp-security-policies_coverage.json) diff --git a/RULES.md b/RULES.md index 34d0d39b9b..54f30f3aa5 100644 --- a/RULES.md +++ b/RULES.md @@ -154,7 +154,6 @@ | [3.2.7](bundle/compliance/cis_eks/rules/cis_3_2_7) | 3.2 | Ensure that the --make-iptables-util-chains argument is set to true | :white_check_mark: | Automated | | [3.2.8](bundle/compliance/cis_eks/rules/cis_3_2_8) | 3.2 | Ensure that the --hostname-override argument is not set | :white_check_mark: | Manual | | [3.2.9](bundle/compliance/cis_eks/rules/cis_3_2_9) | 3.2 | Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture | :white_check_mark: | Automated | -| 3.3.1 | 3.3 | Prefer using Container-Optimized OS when possible | :x: | Manual | | 4.1.1 | 4.1 | Ensure that the cluster-admin role is only used where required | :x: | Manual | | 4.1.2 | 4.1 | Minimize access to secrets | :x: | Manual | | 4.1.3 | 4.1 | Minimize wildcard use in Roles and ClusterRoles | :x: | Manual | @@ -168,21 +167,22 @@ | [4.2.5](bundle/compliance/cis_eks/rules/cis_4_2_5) | 4.2 | Minimize the admission of containers with allowPrivilegeEscalation | :white_check_mark: | Automated | | [4.2.6](bundle/compliance/cis_eks/rules/cis_4_2_6) | 4.2 | Minimize the admission of root containers | :white_check_mark: | Automated | | [4.2.7](bundle/compliance/cis_eks/rules/cis_4_2_7) | 4.2 | Minimize the admission of containers with the NET_RAW capability | :white_check_mark: | Automated | -| [4.2.8](bundle/compliance/cis_eks/rules/cis_4_2_8) | 4.2 | Minimize the admission of containers with added capabilities | :white_check_mark: | Manual | +| [4.2.8](bundle/compliance/cis_eks/rules/cis_4_2_8) | 4.2 | Minimize the admission of containers with added capabilities | :white_check_mark: | Automated | | [4.2.9](bundle/compliance/cis_eks/rules/cis_4_2_9) | 4.2 | Minimize the admission of containers with capabilities assigned | :white_check_mark: | Manual | | 4.3.1 | 4.3 | Ensure latest CNI version is used | :x: | Manual | -| 4.3.2 | 4.3 | Ensure that all Namespaces have Network Policies defined | :x: | Manual | +| 4.3.2 | 4.3 | Ensure that all Namespaces have Network Policies defined | :x: | Automated | | 4.4.1 | 4.4 | Prefer using secrets as files over secrets as environment variables | :x: | Manual | | 4.4.2 | 4.4 | Consider external secret storage | :x: | Manual | +| 4.5.1 | 4.5 | Configure Image Provenance using ImagePolicyWebhook admission controller | :x: | Manual | | 4.6.1 | 4.6 | Create administrative boundaries between resources using namespaces | :x: | Manual | | 4.6.2 | 4.6 | Apply Security Context to Your Pods and Containers | :x: | Manual | -| 4.6.3 | 4.6 | The default namespace should not be used | :x: | Manual | +| 4.6.3 | 4.6 | The default namespace should not be used | :x: | Automated | | [5.1.1](bundle/compliance/cis_eks/rules/cis_5_1_1) | 5.1 | Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider | :white_check_mark: | Manual | | 5.1.2 | 5.1 | Minimize user access to Amazon ECR | :x: | Manual | | 5.1.3 | 5.1 | Minimize cluster access to read-only for Amazon ECR | :x: | Manual | | 5.1.4 | 5.1 | Minimize Container Registries to only those approved | :x: | Manual | -| 5.2.1 | 5.2 | Prefer using managed identities for workloads | :x: | Manual | -| [5.3.1](bundle/compliance/cis_eks/rules/cis_5_3_1) | 5.3 | Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS | :white_check_mark: | Manual | +| 5.2.1 | 5.2 | Prefer using dedicated EKS Service Accounts | :x: | Manual | +| [5.3.1](bundle/compliance/cis_eks/rules/cis_5_3_1) | 5.3 | Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS | :white_check_mark: | Automated | | [5.4.1](bundle/compliance/cis_eks/rules/cis_5_4_1) | 5.4 | Restrict Access to the Control Plane Endpoint | :white_check_mark: | Manual | | [5.4.2](bundle/compliance/cis_eks/rules/cis_5_4_2) | 5.4 | Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled | :white_check_mark: | Manual | | [5.4.3](bundle/compliance/cis_eks/rules/cis_5_4_3) | 5.4 | Ensure clusters are created with Private Nodes | :white_check_mark: | Manual | diff --git a/bundle/compliance/cis_aws/rules/cis_1_8/data.yaml b/bundle/compliance/cis_aws/rules/cis_1_8/data.yaml index de1c7f0df5..0bf8bba7a4 100644 --- a/bundle/compliance/cis_aws/rules/cis_1_8/data.yaml +++ b/bundle/compliance/cis_aws/rules/cis_1_8/data.yaml @@ -1,48 +1,38 @@ metadata: - id: 0674190c-677c-5f17-bcc8-f60e913eb9d6 + id: 328079dd-6af7-5967-97cf-b6db063dd90f name: Ensure IAM password policy requires minimum length of 14 or greater - profile_applicability: "* Level 1" - description: Password policies are, in part, used to enforce password complexity - requirements. IAM password policies can be used to ensure password are at - least a given length. It is recommended that the password policy require a - minimum password length 14. - version: "1.0" - rationale: Setting a password complexity policy increases account resiliency - against brute force login attempts. - audit: > - Perform the following to ensure the password policy is configured as - prescribed: + rule_number: '1.8' + profile_applicability: '* Level 1' + description: |- + Password policies are, in part, used to enforce password complexity requirements. + IAM password policies can be used to ensure password are at least a given length. + It is recommended that the password policy require a minimum password length 14. + rationale: |- + Setting a password complexity policy increases account resiliency against brute force login attempts. + audit: |- + Perform the following to ensure the password policy is configured as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) - 2. Go to IAM Service on the AWS Console - 3. Click on Account Settings on the Left Pane - 4. Ensure "Minimum password length" is set to 14 or greater. **From Command Line:** - ``` aws iam get-account-password-policy - ``` Ensure the output of the above command includes "MinimumPasswordLength": 14 (or higher) - remediation: > + remediation: |- Perform the following to set the password policy as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) - 2. Go to IAM Service on the AWS Console - 3. Click on Account Settings on the Left Pane - 4. Set "Minimum password length" to `14` or greater. - 5. Click "Apply password policy" **From Command Line:** @@ -50,17 +40,18 @@ metadata: aws iam update-account-password-policy --minimum-password-length 14 ``` Note: All commands starting with "aws iam update-account-password-policy" can be combined into a single command. - impact: "" - default_value: "" + impact: None + default_value: '' references: - - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html - - https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy - tags: - - CIS - - AWS - - CIS 1.8 - - Identity and Access Management + - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html + - https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy section: Identity and Access Management + version: '1.0' + tags: + - CIS + - CIS_AWS + - CIS 1.8 + - Identity and Access Management benchmark: name: CIS Amazon Web Services Foundations version: v1.5.0 diff --git a/bundle/compliance/cis_aws/rules/cis_1_9/data.yaml b/bundle/compliance/cis_aws/rules/cis_1_9/data.yaml index c6e2811caa..0e0cfeed3c 100644 --- a/bundle/compliance/cis_aws/rules/cis_1_9/data.yaml +++ b/bundle/compliance/cis_aws/rules/cis_1_9/data.yaml @@ -1,71 +1,57 @@ metadata: - id: 1e20e1e4-0104-5c4c-a4be-afd48eaa46f6 + id: b7a0af34-6b0b-5d6c-ade0-40e78890db00 name: Ensure IAM password policy prevents password reuse - profile_applicability: "* Level 1" - description: IAM password policies can prevent the reuse of a given password by - the same user. It is recommended that the password policy prevent the reuse - of passwords. - version: "1.0" + rule_number: '1.9' + profile_applicability: '* Level 1' + description: |- + IAM password policies can prevent the reuse of a given password by the same user. + It is recommended that the password policy prevent the reuse of passwords. rationale: Preventing password reuse increases account resiliency against brute force login attempts. - audit: > - Perform the following to ensure the password policy is configured as - prescribed: + audit: |- + Perform the following to ensure the password policy is configured as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) - 2. Go to IAM Service on the AWS Console - 3. Click on Account Settings on the Left Pane - 4. Ensure "Prevent password reuse" is checked - 5. Ensure "Number of passwords to remember" is set to 24 **From Command Line:** - ``` - aws iam get-account-password-policy - ``` - Ensure the output of the above command includes "PasswordReusePrevention": 24 - remediation: > + remediation: |- Perform the following to set the password policy as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) - 2. Go to IAM Service on the AWS Console - 3. Click on Account Settings on the Left Pane - 4. Check "Prevent password reuse" - 5. Set "Number of passwords to remember" is set to `24` **From Command Line:** - ``` aws iam update-account-password-policy --password-reuse-prevention 24 ``` - Note: All commands starting with "aws iam update-account-password-policy" can be combined into a single command. - impact: "" - default_value: "" + impact: None + default_value: '' references: - - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html - - https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy - tags: - - CIS - - AWS - - CIS 1.9 - - Identity and Access Management + - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html + - https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy section: Identity and Access Management + version: '1.0' + tags: + - CIS + - CIS_AWS + - CIS 1.9 + - Identity and Access Management benchmark: name: CIS Amazon Web Services Foundations version: v1.5.0 diff --git a/bundle/compliance/cis_eks/rules/cis_2_1_1/data.yaml b/bundle/compliance/cis_eks/rules/cis_2_1_1/data.yaml index 893da5bf8c..167f9688d9 100644 --- a/bundle/compliance/cis_eks/rules/cis_2_1_1/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_2_1_1/data.yaml @@ -1,41 +1,39 @@ metadata: - id: 433ae1b7-0411-54e2-8b06-f62be93b4089 - name: Enable audit Logs (Manual) - profile_applicability: | - * Level 1 - description: | - The audit logs are part of the EKS managed Kubernetes control plane logs - that are managed by Amazon EKS. - Amazon EKS is integrated with AWS CloudTrail, a service that provides a - record of actions taken by a user, role, or an AWS service in Amazon EKS. CloudTrail - captures all API calls for Amazon EKS as events. The calls captured include calls from the - Amazon EKS console and code calls to the Amazon EKS API operations. - rationale: | - Exporting logs and metrics to a dedicated, persistent datastore such as - CloudTrail ensures - availability of audit data following a cluster security event, and provides a central location - for analysis of log and metric data collated from multiple sources. - audit: | + id: 5b3b7bd3-eff9-5f9b-8456-2c880cbdb0b0 + name: Enable audit Logs + rule_number: 2.1.1 + profile_applicability: '* Level 1' + description: |- + The audit logs are part of the EKS managed Kubernetes control plane logs that are managed by Amazon EKS. + Amazon EKS is integrated with AWS CloudTrail, a service that provides a record of actions taken by a user, role, or an AWS service in Amazon EKS. + CloudTrail captures all API calls for Amazon EKS as events. + The calls captured include calls from the Amazon EKS console and code calls to the Amazon EKS API operations. + rationale: |- + Exporting logs and metrics to a dedicated, persistent datastore such as CloudTrail ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources. + audit: |- Perform the following to determine if CloudTrail is enabled for all regions: + **Via the Management Console** - 1. Sign in to the AWS Management Console and open the EKS console at - https://console.aws.amazon.com/eks + + 1. Sign in to the AWS Management Console and open the EKS console at https://console.aws.amazon.com/eks 2. Click on Cluster Name of the cluster you are auditing 3. Click Logging - You will see Control Plane Logging info - ``` - API Server Enabled/Disabled - Audit Enabled/Disabled - Authenticator Enabled/Disabled - Controller Manager Enabled/Disabled - Scheduler Enabled/Disabled + You will see Control Plane Logging info + + ``` + API Server Audit Authenticator + Enabled/Disabled Enabled/Disabled Enabled/Disabled + + Controller Manager Scheduler + Enabled/Disabled Enabled/Disabled ``` 4. Ensure all 5 choices are set to Enabled - remediation: | + remediation: |- Perform the following to determine if CloudTrail is enabled for all regions: + **Via The Management Console** - 1. Sign in to the AWS Management Console and open the EKS console at - https://console.aws.amazon.com/eks + + 1. Sign in to the AWS Management Console and open the EKS console at https://console.aws.amazon.com/eks 2. Click on Cluster Name of the cluster you are auditing 3. Click Logging 4. Select Manage Logging from the button on the right hand side @@ -43,40 +41,29 @@ metadata: 6. Click Save Changes **Via CLI** - ``` - aws --region "${REGION_CODE}" eks describe-cluster --name "${CLUSTER_NAME}" - - -query 'cluster.logging.clusterLogging[?enabled==true].types - ``` - impact: > - Audit logs will be created on the master nodes, which will consume disk - space. - Care should be taken to avoid generating too large volumes of log information as this could impact the - available of the cluster nodes. S3 lifecycle features can be used to manage the accumulation - and management of logs over time. + + `aws --region "${REGION_CODE}" eks describe-cluster --name "${CLUSTER_NAME}" --query 'cluster.logging.clusterLogging[?enabled==true].types` + impact: |- + Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes. + S3 lifecycle features can be used to manage the accumulation and management of logs over time. + See the following AWS resource for more information on these features: http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html - default_value: > - By default, cluster control plane logs aren't sent to CloudWatch Logs. ... - When you enable a log type, the logs are sent with a log verbosity level of 2 . - To enable or disable control plane - logs with the console. Open the Amazon EKS console at - https://console.aws.amazon.com/eks/home#/clusters . - Amazon EKS Information in CloudTrail CloudTrail is enabled on your AWS account when - you create the account. When activity occurs in Amazon EKS, that activity is recorded in a - CloudTrail event along with other AWS service events in Event history. - references: | - 1. https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ - 2. https://aws.github.io/aws-eks-best-practices/detective/ - 3. https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html - 4. https://docs.aws.amazon.com/eks/latest/userguide/logging-using-cloudtrail.html + default_value: | + By default, cluster control plane logs aren't sent to CloudWatch Logs. ... When you enable a log type, the logs are sent with a log verbosity level of 2 . To enable or disable control plane logs with the console. Open the Amazon EKS console at https://console.aws.amazon.com/eks/home#/clusters . Amazon EKS Information in CloudTrail CloudTrail is enabled on your AWS account when you create the account. When activity occurs in Amazon EKS, that activity is recorded in a CloudTrail event along with other AWS service events in Event history. + references: + - https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ + - https://aws.github.io/aws-eks-best-practices/detective/ + - https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + - https://docs.aws.amazon.com/eks/latest/userguide/logging-using-cloudtrail.html section: Logging - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 2.1.1 - Logging benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_1_1/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_1_1/data.yaml index bceb451d29..d8d4b14292 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_1_1/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_1_1/data.yaml @@ -1,59 +1,61 @@ metadata: - id: fd3e93e3-c1c6-5dd7-a582-cf6ab65ea2f2 - name: Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual) - profile_applicability: | - * Level 1 - description: | - If `kubelet` is running, and if it is using a file-based kubeconfig file, - ensure that the proxy - kubeconfig file has permissions of `644` or more restrictive. - rationale: | - The `kubelet` kubeconfig file controls various parameters of the `kubelet` - service in the worker node. + id: 8f843abe-1720-5ee5-a3ae-a1fd773d2bec + name: Ensure that the kubeconfig file permissions are set to 644 or more restrictive + rule_number: 3.1.1 + profile_applicability: '* Level 1' + description: |- + If `kubelet` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `644` or more restrictive. + rationale: |- + The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system. - It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes - ConfigMap instead of a file. In this case, there is no proxy kubeconfig file. - audit: | + + It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. + In this case, there is no proxy kubeconfig file. + audit: |- SSH to the worker nodes + To check to see if the Kubelet Service is running: ``` sudo systemctl status kubelet ``` The output should return `Active: active (running) since..` + Run the following command on each node to find the appropriate kubeconfig file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--kubeconfig - /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file. + The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file. + Run this command to obtain the kubeconfig file permissions: + ``` stat -c %a /var/lib/kubelet/kubeconfig ``` The output of the above command gives you the kubeconfig file's permissions. - Verify that if a file is specified and it exists, the permissions are 644 or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - each worker node. + + Verify that if a file is specified and it exists, the permissions are `644` or more restrictive. + remediation: |- + Run the below command (based on the file location on your system) on the each worker + node. For example, ``` chmod 644 ``` - impact: | - None. + impact: None. default_value: | See the AWS EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kube-proxy/ + references: + - https://kubernetes.io/docs/admin/kube-proxy/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.1.1 - Worker Node Configuration Files benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_1_2/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_1_2/data.yaml index 3644c2bdc3..3c0d31ddd7 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_1_2/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_1_2/data.yaml @@ -1,56 +1,58 @@ metadata: - id: 22c14de0-5c3b-5b14-b1ba-9cc831b48147 - name: Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual) - profile_applicability: | - * Level 1 - description: | - If `kubelet` is running, ensure that the file ownership of its kubeconfig file - is set to `root:root`. - rationale: | - The kubeconfig file for `kubelet` controls various parameters for the `kubelet` - service in the - worker node. You should set its file ownership to maintain the integrity of the file. + id: be9d43ae-4355-51ed-8ea7-dfd9b30e9d07 + name: Ensure that the kubelet kubeconfig file ownership is set to root:root + rule_number: 3.1.2 + profile_applicability: '* Level 1' + description: |- + If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`. + rationale: |- + The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. + You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`. - audit: | + audit: |- SSH to the worker nodes + To check to see if the Kubelet Service is running: ``` sudo systemctl status kubelet ``` The output should return `Active: active (running) since..` + Run the following command on each node to find the appropriate kubeconfig file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--kubeconfig` - `/var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file. + The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file. + Run this command to obtain the kubeconfig file ownership: + ``` stat -c %U:%G /var/lib/kubelet/kubeconfig ``` - The output of the above command gives you the kubeconfig file's ownership. Verify that the - ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - each worker node. + + The output of the above command gives you the kubeconfig file's ownership. + Verify that the ownership is set to `root:root`. + remediation: |- + Run the below command (based on the file location on your system) on the each worker node. For example, + ``` chown root:root ``` - impact: | - None + impact: None default_value: | See the AWS EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kube-proxy/ + references: + - https://kubernetes.io/docs/admin/kube-proxy/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.1.2 - Worker Node Configuration Files benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 - id: cis_eks \ No newline at end of file + id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_1_3/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_1_3/data.yaml index 2432397338..5786029f07 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_1_3/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_1_3/data.yaml @@ -1,56 +1,57 @@ metadata: - id: cd1f19c9-381b-5009-bc19-be33b52322d8 - name: Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual) - profile_applicability: | - * Level 1 - description: | - Ensure that if the kubelet refers to a configuration file with the `--config` - argument, that file has permissions of 644 or more restrictive. - rationale: | - The kubelet reads various parameters, including security settings, from a config file - specified by the `--config` argument. If this file is specified you should restrict its file - permissions to maintain the integrity of the file. The file should be writable by only the - administrators on the system. - audit: | + id: e11693e6-8f19-5ddb-b1d3-4adcb5595adf + name: Ensure that the kubelet configuration file has permissions set to 644 or more + restrictive + rule_number: 3.1.3 + profile_applicability: '* Level 1' + description: |- + Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive. + rationale: |- + The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. + If this file is specified you should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- First, SSH to the relevant worker node: + To check to see if the Kubelet Service is running: ``` sudo systemctl status kubelet ``` The output should return `Active: active (running) since..` + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Run the following command: + ``` stat -c %a /etc/kubernetes/kubelet/kubelet-config.json ``` - The output of the above command is the Kubelet config file's permissions. Verify that the - permissions are `644` or more restrictive. - remediation: | - Run the following command (using the config file location identied in the - Audit step) + The output of the above command is the Kubelet config file's permissions. + Verify that the permissions are `644` or more restrictive. + remediation: |- + Run the following command (using the config file location identied in the Audit step) + ``` chmod 644 /etc/kubernetes/kubelet/kubelet-config.json ``` - impact: | - None + impact: None. default_value: | See the AWS EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ + references: + - https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.1.3 - Worker Node Configuration Files benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_1_4/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_1_4/data.yaml index 1d2c948d84..61b5f6aa35 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_1_4/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_1_4/data.yaml @@ -1,57 +1,56 @@ metadata: - id: 09e032a7-9ac1-5964-a1da-cdb45e3f8a61 - name: Ensure that the kubelet configuration file ownership is set to root:root (Manual) - profile_applicability: | - * Level 1 - description: | - Ensure that if the kubelet refers to a configuration file with the `--config` - argument, that file is owned by `root:root`. - rationale: > - The kubelet reads various parameters, including security settings, from a - config file - specified by the `--config` argument. If this file is specified you should restrict its file - permissions to maintain the integrity of the file. The file should be writable by only the - administrators on the system. - audit: | + id: 7dd9fde2-7160-5313-be4c-a26f9d705808 + name: Ensure that the kubelet configuration file ownership is set to root:root + rule_number: 3.1.4 + profile_applicability: '* Level 1' + description: |- + Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root. + rationale: |- + The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. + If this file is specified you should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- First, SSH to the relevant worker node: + To check to see if the Kubelet Service is running: ``` sudo systemctl status kubelet ``` The output should return `Active: active (running) since..` + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Run the following command: + ``` stat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json ``` - The output of the above command is the Kubelet config file's ownership. Verify that the - ownership is set to `root:root` - remediation: | - Run the following command (using the config file location identified in the - Audit step) + The output of the above command is the Kubelet config file's ownership. + Verify that the ownership is set to `root:root` + remediation: |- + Run the following command (using the config file location identified in the Audit step) + ``` chown root:root /etc/kubernetes/kubelet/kubelet-config.json ``` - impact: | - None + impact: None default_value: | See the AWS EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kube-proxy/ + references: + - https://kubernetes.io/docs/admin/kube-proxy/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.1.4 - Worker Node Configuration Files benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_1/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_1/data.yaml index a816d8a385..8505ee9cf7 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_1/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_1/data.yaml @@ -1,100 +1,105 @@ metadata: - id: d98fd08b-bafa-5314-8aa7-c5cf5e319739 - name: Ensure that the --anonymous-auth argument is set to false (Automated) - profile_applicability: | - * Level 1 - description: | - Disable anonymous requests to the Kubelet server. - rationale: | - When enabled, requests that are not rejected by other configured - authentication methods - are treated as anonymous requests. These requests are then served by the Kubelet server. + id: 340b6012-9394-569d-ac5a-74745dae70cb + name: Ensure that the --anonymous-auth argument is set to false + rule_number: 3.2.1 + profile_applicability: '* Level 1' + description: Disable anonymous requests to the Kubelet server. + rationale: |- + When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. + These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests. - audit: | + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for - `authentication: anonymous: enabled` set to `false`. + + If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to - `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` sudo more /etc/kubernetes/kubelet/kubelet-config.json ``` - Verify that the `"authentication": { "anonymous": { "enabled": false }` argument is - set to `false`. + + Verify that the `"authentication": { "anonymous": { "enabled": false }` argument is set to `false`. + + **Audit Method 2:** - If using the api configz endpoint consider searching for the status of `authentication...` - `"anonymous":{"enabled":false}` by extracting the live configuration from the nodes - running kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `authentication... + "anonymous":{"enabled":false}` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to `false` + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + ``` "authentication": { "anonymous": { "enabled": false ``` + **Remediation Method 2:** - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --anonymous-auth=false ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of - `"authentication.*anonymous":{"enabled":false}"` by extracting the live configuration - from the nodes running kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"authentication.*anonymous":{"enabled":false}"` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` + **For all three remediations:** - Based on your system, restart the kubelet service and check status + Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service - systemctl status kubelet -l + systemctl status kubelet -l ``` - impact: | - Anonymous requests will be rejected. + impact: Anonymous requests will be rejected. default_value: | See the EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication - 3. https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication + - https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.2.1 - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_10/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_10/data.yaml index 79ab3ddd0f..9176a0601f 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_10/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_10/data.yaml @@ -1,72 +1,73 @@ metadata: - id: efb2ffa2-28a9-558e-aba3-f58243cb3bbb - name: Ensure that the --rotate-certificates argument is not set to false (Manual) - profile_applicability: | - * Level 2 - description: | - Enable kubelet client certificate rotation. - rationale: | - The `--rotate-certificates` setting causes the kubelet to rotate its client - certificates by - creating new CSRs as its existing credentials expire. This automated periodic rotation - ensures that the there is no downtime due to expired certificates and thus addressing - availability in the CIA security triad. - **Note**: This recommendation only applies if you let kubelets get their certificates from the - API server. In case your kubelet certificates come from an outside authority/tool (e.g. + id: 4dda78b5-7d6e-52fb-8faa-998ac97fa4ef + name: Ensure that the --rotate-certificates argument is not set to false + rule_number: 3.2.10 + profile_applicability: '* Level 2' + description: Enable kubelet client certificate rotation. + rationale: |- + The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. + This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad. + + **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. + In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself. - **Note**: This feature also require the `RotateKubeletClientCertificate` feature gate to be - enabled (which is the default since Kubernetes v1.7) - audit: | + + **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7) + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for `--rotate-` - `certificates` set to `false`. + + If using a Kubelet configuration file, check that there is an entry for `--rotate-certificates` set to `false`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` + Verify that the `RotateCertificate` argument is not present, or is set to `true`. - If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet - config file specified by `--config`, that file does not contain `rotateCertificates: false`. - remediation: | + + If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`. + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to `false` + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + ``` "RotateCertificate":true ``` + **Remediation Method 2:** - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --RotateCertificate=true ``` - impact: | - None + impact: None default_value: | See the Amazon EKS documentation for the default value. - references: | - 1. https://github.com/kubernetes/kubernetes/pull/41912 - 2. https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration - 3. https://kubernetes.io/docs/imported/release/notes/ - 4. https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ - 5. https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ + references: + - https://github.com/kubernetes/kubernetes/pull/41912 + - https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration + - https://kubernetes.io/docs/imported/release/notes/ + - https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + - https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - EKS - - CIS 3.2.10 - - Kubelet + - CIS + - CIS_EKS + - CIS 3.2.10 + - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_11/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_11/data.yaml index f320b4e1a2..8e4e03791b 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_11/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_11/data.yaml @@ -1,104 +1,108 @@ metadata: - id: c6bcde81-4ca4-5f6f-9ef0-1116b4a02f4c - name: Ensure that the RotateKubeletServerCertificate argument is set to true (Manual) - profile_applicability: | - * Level 1 - description: | - Enable kubelet server certificate rotation. - rationale: > - `RotateKubeletServerCertificate` causes the kubelet to both request a serving - certificate - after bootstrapping its client credentials and rotate the certificate as its existing credentials - expire. This automated periodic rotation ensures that the there are no downtimes due to - expired certificates and thus addressing availability in the CIA security triad. - Note: This recommendation only applies if you let kubelets get their certificates from the - API server. In case your kubelet certificates come from an outside authority/tool (e.g. + id: 3be81f29-6e8b-5393-bb91-425a2b2a388e + name: Ensure that the RotateKubeletServerCertificate argument is set to true + rule_number: 3.2.11 + profile_applicability: '* Level 1' + description: Enable kubelet server certificate rotation. + rationale: |- + `RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. + This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad. + + Note: This recommendation only applies if you let kubelets get their certificates from the API server. + In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself. - audit: | + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for - `RotateKubeletServerCertificate` is set to `true`. + + If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` + Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`. + **Audit Method 2:** - If using the api configz endpoint consider searching for the status of - `"RotateKubeletServerCertificate":true` by extracting the live configuration from the - nodes running kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `"RotateKubeletServerCertificate":true` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to `true` + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + ``` "RotateKubeletServerCertificate":true ``` + **Remediation Method 2:** - If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate` to - `true`. - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`. + + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --rotate-kubelet-server-certificate=true ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of - `"RotateKubeletServerCertificate"`: by extracting the live configuration from the nodes - running kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"RotateKubeletServerCertificate":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` + **For all three remediations:** Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: | - None + impact: None default_value: | See the Amazon EKS documentation for the default value. - references: | - 1. https://github.com/kubernetes/kubernetes/pull/45059 - 2. https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration + references: + - https://github.com/kubernetes/kubernetes/pull/45059 + - https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - EKS - - CIS 3.2.11 - - Kubelet + - CIS + - CIS_EKS + - CIS 3.2.11 + - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_2/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_2/data.yaml index d25f132dd7..dd3876d124 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_2/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_2/data.yaml @@ -1,103 +1,107 @@ metadata: - id: 737e58cc-60c3-5785-b2e6-7d1bb2ca1cca - name: Ensure that the --authorization-mode argument is not set to Always Allow (Automated) - profile_applicability: | - * Level 1 - description: | - Do not allow all requests. Enable explicit authorization. - rationale: | - Kubelets, by default, allow all authenticated requests (even anonymous ones) - without - needing explicit authorization checks from the apiserver. You should restrict this behavior - and only allow explicitly authorized requests. - audit: | + id: 78ba925d-bea7-5b17-b0db-9ee592faf10c + name: Ensure that the --authorization-mode argument is not set to AlwaysAllow + rule_number: 3.2.2 + profile_applicability: '* Level 1' + description: "Do not allow all requests.\nEnable explicit authorization." + rationale: |- + Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. + You should restrict this behavior and only allow explicitly authorized requests. + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for `"authentication":` - `"webhook": "enabled"` set to `true`. + + If using a Kubelet configuration file, check that there is an entry for `"authentication": "webhook": "enabled"` set to `true`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` sudo more /etc/kubernetes/kubelet/kubelet-config.json ``` - Verify that the `"authentication": {"webhook": { "enabled": is set to true.` - If the `"authentication": {"mode": { argument` is present check that it is not set to - `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by - `--config`, and that file sets `"authentication": {"mode": { ` to something other than - `AlwaysAllow`. + Verify that the `"authentication": {"webhook": { "enabled": is set to true`. + + + If the `"authentication": {"mode": {` argument is present check that it is not set to `AlwaysAllow`. + If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `"authentication": {"mode": {` to something other than `AlwaysAllow`. + **Audit Method 2:** - If using the api configz endpoint consider searching for the status of `authentication...` - `"webhook":{"enabled":true}` by extracting the live configuration from the nodes running - kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `authentication... + "webhook":{"enabled":true}` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false - ``` - "authentication"... "webhook":{"enabled":true - ``` + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + + ``` + "authentication"... + "webhook":{"enabled":true + ``` + **Remediation Method 2:** - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --authorization-mode=Webhook ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of - `"authentication.*webhook":{"enabled":true"` by extracting the live configuration from - the nodes running kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"authentication.*webhook":{"enabled":true"` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` + **For all three remediations:** - Based on your system, restart the kubelet service and check status + Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: | - Unauthorized requests will be denied. + impact: Unauthorized requests will be denied. default_value: | See the EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication - 3. https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication + - https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.2.2 - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_3/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_3/data.yaml index 95dd775de6..b3e69be157 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_3/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_3/data.yaml @@ -1,109 +1,108 @@ metadata: - id: 46014c5a-c573-5357-b567-964e0139dcd3 - name: Ensure that the --client-ca-file argument is set as appropriate (Manual) - profile_applicability: | - * Level 1 - description: | - Enable Kubelet authentication using certificates. - rationale: > - The connections from the apiserver to the kubelet are used for fetching logs - for pods, - attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding - functionality. These connections terminate at the kubelet’s HTTPS endpoint. By default, the - apiserver does not verify the kubelet’s serving certificate, which makes the connection - subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public - networks. Enabling Kubelet certificate authentication ensures that the apiserver could - authenticate the Kubelet before submitting any requests. - audit: | + id: 789d5b9b-b96f-5bc0-8e37-dd12530eae65 + name: Ensure that the --client-ca-file argument is set as appropriate + rule_number: 3.2.3 + profile_applicability: '* Level 1' + description: Enable Kubelet authentication using certificates. + rationale: |- + The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding functionality. + These connections terminate at the kubelet’s HTTPS endpoint. + By default, the apiserver does not verify the kubelet’s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. + Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests. + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for `"x509":` - `{"clientCAFile:"` set to the location of the client certificate authority file. + + If using a Kubelet configuration file, check that there is an entry for `"x509": {"clientCAFile:"` set to the location of the client certificate authority file. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` sudo more /etc/kubernetes/kubelet/kubelet-config.json ``` - Verify that the `"x509": {"clientCAFile:"` argument exists and is set to the location of the - client certificate authority file. - If the `"x509": {"clientCAFile:"` argument is not present, check that there is a Kubelet - config file specified by `--config`, and that the file sets `"authentication": { "x509":` - `{"clientCAFile:"` to the location of the client certificate authority file. + + Verify that the `"x509": {"clientCAFile:"` argument exists and is set to the location of the client certificate authority file. + + If the `"x509": {"clientCAFile:"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `"authentication": { "x509": {"clientCAFile:"` to the location of the client certificate authority file. **Audit Method 2:** - If using the api configz endpoint consider searching for the status of `authentication..` - `x509":("clientCAFile":"/etc/kubernetes/pki/ca.crt` by extracting the live - configuration from the nodes running kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `authentication.. + x509":("clientCAFile":"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false - ``` - "authentication": { "x509": {"clientCAFile:" to the location of the client CA - file. + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + ``` + "authentication": { "x509": {"clientCAFile:" to the location of the client CA file. + ``` + **Remediation Method 2:** - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --client-ca-file= ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of - `"authentication.*x509":("clientCAFile":"/etc/kubernetes/pki/ca.crt"` by - extracting the live configuration from the nodes running kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"authentication.*x509":("clientCAFile":"/etc/kubernetes/pki/ca.crt"` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` + **For all three remediations:** - Based on your system, restart the kubelet service and check status + Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: | - You require TLS to be configured on apiserver as well as kubelets. + ``` + impact: You require TLS to be configured on apiserver as well as kubelets. default_value: | See the EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/ - 3. https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/ + - https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.2.3 - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_4/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_4/data.yaml index 9d40ade65a..5b9fd5ead4 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_4/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_4/data.yaml @@ -1,67 +1,68 @@ metadata: - id: 8c4d1ae0-5f42-5ad0-a95d-27824a774951 - name: Ensure that the --read-only-port is secured (Manual) - profile_applicability: | - * Level 1 - description: | - Disable the read-only port. - rationale: > - The Kubelet process provides a read-only API in addition to the main Kubelet - API. - Unauthenticated access is provided to this read-only API which could possibly retrieve - potentially sensitive information about the cluster. - audit: | - If using a Kubelet configuration file, check that there is an entry for - `authentication: anonymous: enabled` set to `0`. + id: 4fc4a2f3-bd99-517f-abca-2a35540432d0 + name: Ensure that the --read-only-port is secured + rule_number: 3.2.4 + profile_applicability: '* Level 1' + description: Disable the read-only port. + rationale: |- + The Kubelet process provides a read-only API in addition to the main Kubelet API. + Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster. + audit: |- + If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` - Verify that the `--read-only-port argument` exists and is set to `0`. - If the `--read-only-port` argument is not present, check that there is a Kubelet config file - specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`. - remediation: | - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to `false` + + Verify that the `--read-only-port` argument exists and is set to `0`. + + If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. + Check that if there is a `readOnlyPort` entry in the file, it is set to `0`. + remediation: |- + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + ``` readOnlyPort to 0 ``` - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --read-only-port=0 ``` + For all three remediations: Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: > - Removal of the read-only port will require that any service which made use - of it will need to be re-configured to use the main Kubelet API. + impact: |- + Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API. default_value: | See the Amazon EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - EKS - - CIS 3.2.4 - - Kubelet + - CIS + - CIS_EKS + - CIS 3.2.4 + - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_5/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_5/data.yaml index ee6370a05c..a58d4e00f6 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_5/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_5/data.yaml @@ -1,106 +1,110 @@ metadata: - id: e9322670-79b9-5177-9fc0-388712f1df07 - name: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) - profile_applicability: | - * Level 1 - description: | - Do not disable timeouts on streaming connections. - rationale: > - Setting idle timeouts ensures that you are protected against - Denial-of-Service attacks, - inactive connections and running out of ephemeral ports. - **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be - too high for your environment. Setting this as appropriate would additionally ensure that - such streaming connections are timed out after serving legitimate use cases. - audit: | + id: 863f315d-739a-5995-843c-1ab6f9e2bac4 + name: Ensure that the --streaming-connection-idle-timeout argument is not set to + 0 + rule_number: 3.2.5 + profile_applicability: '* Level 1' + description: Do not disable timeouts on streaming connections. + rationale: |- + Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports. + + + **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. + Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases. + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for - `streamingConnectionIdleTimeout` is not set to `0`. + + If using a Kubelet configuration file, check that there is an entry for `streamingConnectionIdleTimeout` is not set to `0`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` + Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`. - If the argument is not present, and there is a Kubelet config file specified by `--config`, - check that it does not set `streamingConnectionIdleTimeout` to 0. + + If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0. + **Audit Method 2:** - If using the api configz endpoint consider searching for the status of - `"streamingConnectionIdleTimeout":"4h0m0s"` by extracting the live configuration from - the nodes running kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `"streamingConnectionIdleTimeout":"4h0m0s"` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non- - zero value in the format of #h#m#s + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s + ``` - "streamingConnectionIdleTimeout": + "streamingConnectionIdleTimeout": ``` + **Remediation Method 2:** - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0. + + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --streaming-connection-idle-timeout=4h0m0s ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of - `"streamingConnectionIdleTimeout":` by extracting the live configuration from the nodes - running kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"streamingConnectionIdleTimeout":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` + **For all three remediations:** Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: | - Long-lived connections could be interrupted. + impact: Long-lived connections could be interrupted. default_value: | See the EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://github.com/kubernetes/kubernetes/pull/18552 + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://github.com/kubernetes/kubernetes/pull/18552 section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - EKS - - CIS 3.2.5 - - Kubelet + - CIS + - CIS_EKS + - CIS 3.2.5 + - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_6/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_6/data.yaml index 46189827ba..e1408fad79 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_6/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_6/data.yaml @@ -1,103 +1,108 @@ metadata: - id: 136ee092-7486-59d2-8f77-760b743c8a72 - name: Ensure that the --protect-kernel-defaults argument is set to true (Automated) - profile_applicability: | - * Level 1 - description: | - Protect tuned kernel parameters from overriding kubelet default kernel + id: bce28b5e-2f2c-5c57-960e-5385341b2f2b + name: Ensure that the --protect-kernel-defaults argument is set to true + rule_number: 3.2.6 + profile_applicability: '* Level 1' + description: Protect tuned kernel parameters from overriding kubelet default kernel parameter values. - rationale: > - Kernel parameters are usually tuned and hardened by the system - administrators before - putting the systems into production. These parameters protect the kernel and the system. - Your kubelet kernel defaults that rely on such parameters should be appropriately set to - match the desired secured system state. Ignoring this could potentially lead to running - pods with undesired kernel behavior. - audit: | + rationale: |- + Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. + These parameters protect the kernel and the system. + Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. + Ignoring this could potentially lead to running pods with undesired kernel behavior. + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for - `protectKernelDefaults` is set to `true`. + + If using a Kubelet configuration file, check that there is an entry for `protectKernelDefaults` is set to `true`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` + Verify that the `--protect-kernel-defaults=true`. - If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet - config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`. + + If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`. + **Audit Method 2:** - If using the api configz endpoint consider searching for the status of - `"protectKernelDefaults"` by extracting the live configuration from the nodes running - kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `"protectKernelDefaults"` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | - Remediation Method 1: - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to `true` + remediation: |- + **Remediation Method 1:** + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true + ``` - "protectKernelDefaults": + "protectKernelDefaults": ``` + **Remediation Method 2:** + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` ----protect-kernel-defaults=true ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of - `"protectKernelDefaults":` by extracting the live configuration from the nodes running - kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"protectKernelDefaults":` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` + **For all three remediations:** - Based on your system, restart the kubelet service and check status + Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: | - You would have to re-tune kernel parameters to match kubelet parameters. + impact: You would have to re-tune kernel parameters to match kubelet parameters. default_value: | See the EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.2.6 - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_7/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_7/data.yaml index e3a8898403..3eb8a65519 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_7/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_7/data.yaml @@ -1,107 +1,107 @@ metadata: - id: 7d7d7235-406a-5d9b-9401-7034758b2e8b - name: Ensure that the --make-iptables-util-chains argument is set to true (Automated) - profile_applicability: | - * Level 1 - description: | - Allow Kubelet to manage iptables. - rationale: > - Kubelets can automatically manage the required changes to iptables based on - how you choose your networking options for the pods. - It is recommended to let kubelets manage - the changes to iptables. This ensures that the iptables configuration remains in sync with - pods networking configuration. Manually configuring iptables with dynamic pod network - configuration changes might hamper the communication between pods/containers and to - the outside world. You might have iptables rules too restrictive or too open. - audit: | + id: bfe7e23b-0a7f-5530-a986-0b06fab6f274 + name: Ensure that the --make-iptables-util-chains argument is set to true + rule_number: 3.2.7 + profile_applicability: '* Level 1' + description: Allow Kubelet to manage iptables. + rationale: |- + Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. + It is recommended to let kubelets manage the changes to iptables. + This ensures that the iptables configuration remains in sync with pods networking configuration. + Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. + You might have iptables rules too restrictive or too open. + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for - `makeIPTablesUtilChains` set to `true`. + + If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` + Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`. - If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config - file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to - `false`. + + If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`. + **Audit Method 2:** - If using the api configz endpoint consider searching for the status of `authentication...` - `"makeIPTablesUtilChains":true` by extracting the live configuration from the nodes - running kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `authentication... + "makeIPTablesUtilChains":true` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to `false` + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false + ``` "makeIPTablesUtilChains": true ``` + **Remediation Method 2:** - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --make-iptables-util-chains:true ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of - `"makeIPTablesUtilChains": true` by extracting the live configuration from the nodes - running kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"makeIPTablesUtilChains": true` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - For all three remediations: + + **For all three remediations:** Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service - systemctl status kubelet -l - ``` - impact: | - Kubelet would manage the iptables on the system and keep it in sync. If you - are using any - - other iptables management solution, then there might be some conflicts. + systemctl status kubelet -l + impact: |- + Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts. default_value: | See the Amazon EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 3.2.7 - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_8/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_8/data.yaml index 844ed5affa..90c6fc32c4 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_8/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_8/data.yaml @@ -1,74 +1,76 @@ metadata: - id: 98524674-3de3-5424-9afa-ca763edca62a - name: Ensure that the --hostname-override argument is not set (Manual) - profile_applicability: | - * Level 1 - description: | - Do not override node hostnames. - rationale: > - Overriding hostnames could potentially break TLS setup between the kubelet - and the apiserver. - Additionally, with overridden hostnames, it becomes increasingly difficult to - associate logs with a particular node and process them for security analytics. Hence, you - should setup your kubelet nodes with resolvable FQDNs and avoid overriding the - hostnames with IPs. - audit: | + id: 06ee416f-aadc-5ac5-807e-39c16c52e985 + name: Ensure that the --hostname-override argument is not set + rule_number: 3.2.8 + profile_applicability: '* Level 1' + description: Do not override node hostnames. + rationale: |- + Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. + Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. + Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for `--hostname-override` - is not set or does not exist. + + If using a Kubelet configuration file, check that there is an entry for `--hostname-override` is not set or does not exist. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` + Verify that `--hostname-override` argument does not exist. + **Note** This setting is not configurable via the Kubelet config file. - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to null + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to null + ``` "hostname-override" ``` + **Remediation Method 2:** - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --hostname-override ``` + **For all remediations:** Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: > - Some cloud providers may require this flag to ensure that hostname matches - names issued - by the cloud provider. In these environments, this recommendation should not apply. + impact: |- + Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply. default_value: | See the Amazon EKS documentation for the default value. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://github.com/kubernetes/kubernetes/issues/22063 - 3. https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://github.com/kubernetes/kubernetes/issues/22063 + - https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - EKS - - CIS 3.2.8 - - Kubelet + - CIS + - CIS_EKS + - CIS 3.2.8 + - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_3_2_9/data.yaml b/bundle/compliance/cis_eks/rules/cis_3_2_9/data.yaml index e4b5f658cd..0469fc4860 100644 --- a/bundle/compliance/cis_eks/rules/cis_3_2_9/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_3_2_9/data.yaml @@ -1,109 +1,110 @@ metadata: - id: d4eef2c6-599a-57d1-99dd-490d3745161d - name: Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated) - profile_applicability: | - * Level 2 - description: > - Security relevant information should be captured. The `--eventRecordQPS` flag - on the - Kubelet can be used to limit the rate at which events are gathered. Setting this too low - could result in relevant events not being logged, however the unlimited setting of `0` could - result in a denial of service on the kubelet. - rationale: > + id: 7b8b0bbb-e4ca-5279-a60a-7e1957f1c0ee + name: |- + Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture + rule_number: 3.2.9 + profile_applicability: '* Level 2' + description: |- + Security relevant information should be captured. + The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. + Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet. + rationale: |- It is important to capture all events and not restrict event creation. - Events are an important - source of security information and analytics that ensure that your environment is - consistently monitored using the event data. - audit: | + Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data. + audit: |- **Audit Method 1:** - If using a Kubelet configuration file, check that there is an entry for `eventRecordQPS` set to `5` - or a value equal to or greater than 0. + + If using a Kubelet configuration file, check that there is an entry for `eventRecordQPS` set to `5` or a value equal to or greater than 0. + First, SSH to the relevant node: + Run the following command on each node to find the appropriate Kubelet config file: + ``` ps -ef | grep kubelet ``` - The output of the above command should return something similar to `--config` - `/etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet - config file. + The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file. + Open the Kubelet config file: ``` cat /etc/kubernetes/kubelet/kubelet-config.json ``` - Review the value set for the `--eventRecordQPS` argument and determine whether this has - been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all - events are captured. - If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file - specified by `--config` and review the value in this location. + + Review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. + The value of `0` can be used to ensure that all events are captured. + + If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location. + **Audit Method 2:** - If using the api configz endpoint consider searching for the status of `eventRecordQPS` by - extracting the live configuration from the nodes running kubelet. - Set the local proxy port and the following variables and provide proxy port number and - node name; + + If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet. + + Set the local proxy port and the following variables and provide proxy port number and node name; `HOSTNAME_PORT="localhost-and-port-number"` - `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of` - `"kubectl get nodes"` + `NODE_NAME="The-Name-Of-Node-To-Extract-Configuration" from the output of "kubectl get nodes"` ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` - remediation: | + remediation: |- **Remediation Method 1:** - If modifying the Kubelet config file, edit the kubelet-config.json file - `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a - value greater or equal to 0 + + If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0 + ``` "eventRecordQPS": 5 ``` + **Remediation Method 2:** - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node - and add the below parameter at the end of the `KUBELET_ARGS` variable string. + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string. + ``` --eventRecordQPS=5 ``` + **Remediation Method 3:** - If using the api configz endpoint consider searching for the status of `"eventRecordQPS"` by - extracting the live configuration from the nodes running kubelet. - **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a Live - Cluster, and then rerun the curl statement from audit process to check for kubelet - configuration changes + + If using the api configz endpoint consider searching for the status of `"eventRecordQPS"` by extracting the live configuration from the nodes running kubelet. + + **See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes ``` kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) - export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from - "kubectl get nodes") + export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" ``` + **For all three remediations:** Based on your system, restart the `kubelet` service and check status + ``` systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l ``` - impact: > - Setting this parameter to `0` could result in a denial of service condition - due to excessive - events being created. The cluster's event processing and storage systems should be scaled - to handle expected event loads. + impact: |- + Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads. default_value: | See the Amazon EKS documentation for the default value. - references: > - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go - 3. https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go + - https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - EKS - - CIS 3.2.9 - - Kubelet + - CIS + - CIS_EKS + - CIS 3.2.9 + - Kubelet benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_1/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_1/data.yaml index 0c12d5a553..3b2d2aaf75 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_1/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_1/data.yaml @@ -1,38 +1,40 @@ metadata: - id: 1319beca-0436-5d6e-8d4a-1ee1bea35311 - name: Minimize the admission of privileged containers (Automated) - profile_applicability: | - * Level 1 - description: | - Do not generally permit containers to be run with the - `securityContext.privileged` flag set to true. - rationale: > - Privileged containers have access to all Linux Kernel capabilities and - devices. A container - running with full privileges can do almost everything that the host can do. This flag exists - to allow special use-cases, like manipulating the network stack and accessing devices. - There should be at least one PodSecurityPolicy (PSP) defined which does not permit - privileged containers. - If you need to run privileged containers, this should be defined in a separate PSP and you - should carefully check RBAC controls to ensure that only limited service accounts and - users are given permission to access that PSP. - audit: | + id: 9be37569-9f32-51b7-978c-556a2d7d207a + name: Minimize the admission of privileged containers + rule_number: 4.2.1 + profile_applicability: '* Level 1' + description: |- + Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`. + rationale: |- + Privileged containers have access to all Linux Kernel capabilities and devices. + A container running with full privileges can do almost everything that the host can do. + This flag exists to allow special use-cases, like manipulating the network stack and accessing devices. + + + There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers. + + + If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether privileged is enabled: + ``` kubectl get psp -o json ``` + Verify that there is at least one PSP which does not return `true`. + `kubectl get psp -o=jsonpath='{.spec.privileged}'` - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - `.spec.privileged` field is omitted or set to `false`. - impact: > - Pods defined with `spec.containers[].securityContext.privileged: true` will - not be permitted. + remediation: |- + Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`. + impact: 'Pods defined with `spec.containers[].securityContext.privileged: true` + will not be permitted.' default_value: | By default, when you provision an EKS cluster, a pod security policy called eks.privileged is automatically created. The manifest for that policy appears below: @@ -71,17 +73,17 @@ metadata: volumes: - '*' ``` - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies - 2. https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies + - https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.1 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_2/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_2/data.yaml index e447a82e8d..0cd93adfa1 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_2/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_2/data.yaml @@ -1,49 +1,48 @@ metadata: - id: 29f995c1-1553-5200-9c08-530032d4ed1d - name: Minimize the admission of containers wishing to share the hostprocess ID namespace (Automated) - profile_applicability: | - * Level 1 - description: | - Do not generally permit containers to be run with the `hostPID` flag set to - `true`. - rationale: > - A container running in the host's PID namespace can inspect processes - running outside the - container. If the container also has access to ptrace capabilities this can be used to escalate - privileges outside of the container. - There should be at least one PodSecurityPolicy (PSP) defined which does not permit - containers to share the host PID namespace. - If you need to run containers which require hostPID, this should be defined in a separate - PSP and you should carefully check RBAC controls to ensure that only limited service - accounts and users are given permission to access that PSP. - audit: | + id: f7c69dc3-fbdd-5a04-b4fe-38550e586f7e + name: Minimize the admission of containers wishing to share the host process ID + namespace + rule_number: 4.2.2 + profile_applicability: '* Level 1' + description: Do not generally permit containers to be run with the `hostPID` flag + set to true. + rationale: |- + A container running in the host's PID namespace can inspect processes running outside the container. + If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container. + + There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace. + + If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether privileged is enabled: + ``` kubectl get psp -o=jsonpath='{.spec.hostPID}' ``` + Verify that there is at least one PSP which does not return true. - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - `.spec.hostPID` field is omitted or set to false. - impact: | - Pods defined with `spec.hostPID: true` will not be permitted unless they are - run under a specific PSP. + remediation: |- + Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false. + impact: |- + Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.2 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_3/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_3/data.yaml index e1fa5037ce..11c7f17c47 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_3/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_3/data.yaml @@ -1,48 +1,46 @@ metadata: - id: 1b727ec0-12aa-5598-ae7b-3123f5d76610 - name: Minimize the admission of containers wishing to share the hostIPC namespace (Automated) - profile_applicability: | - * Level 1 - description: | - Do not generally permit containers to be run with the hostIPC flag set to - true. - rationale: > - A container running in the host's IPC namespace can use IPC to interact with - processes - outside the container. - There should be at least one PodSecurityPolicy (PSP) defined which does not permit - containers to share the host IPC namespace. - If you have a requirement to containers which require hostIPC, this should be defined in a - separate PSP and you should carefully check RBAC controls to ensure that only limited - service accounts and users are given permission to access that PSP. - audit: | + id: c44797fd-3b88-51d4-b4e7-164695073447 + name: Minimize the admission of containers wishing to share the host IPC namespace + rule_number: 4.2.3 + profile_applicability: '* Level 1' + description: Do not generally permit containers to be run with the `hostIPC` flag + set to true. + rationale: |- + A container running in the host's IPC namespace can use IPC to interact with processes outside the container. + + There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace. + + If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether privileged is enabled: + ``` kubectl get psp -o=jsonpath='{.spec.hostIPC}' ``` + Verify that there is at least one PSP which does not return true. - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - `.spec.hostIPC` field is omitted or set to false. - impact: | - Pods defined with `spec.hostIPC: true` will not be permitted unless they are - run under a specific PSP. + remediation: |- + Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false. + impact: |- + Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.3 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_4/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_4/data.yaml index 9f1195ddad..c425a1838c 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_4/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_4/data.yaml @@ -1,48 +1,46 @@ metadata: - id: 4b4dc57c-c46d-5db2-822d-ad702b0dd1d1 - name: Minimize the admission of containers wishing to share the host network namespace (Automated) - profile_applicability: | - * Level 1 - description: | - Do not generally permit containers to be run with the `hostNetwork` flag set - to true. - rationale: | - A container running in the host's network namespace could access the local - loopback - device, and could access network traffic to and from other pods. - There should be at least one PodSecurityPolicy (PSP) defined which does not permit - containers to share the host network namespace. - If you have need to run containers which require hostNetwork, this should be defined in a - separate PSP and you should carefully check RBAC controls to ensure that only limited - service accounts and users are given permission to access that PSP. - audit: | + id: fca03b1c-e00c-5552-84f7-5be94cbfc600 + name: Minimize the admission of containers wishing to share the host network namespace + rule_number: 4.2.4 + profile_applicability: '* Level 1' + description: Do not generally permit containers to be run with the `hostNetwork` + flag set to true. + rationale: |- + A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods. + + There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace. + + If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether privileged is enabled: + ``` kubectl get psp -o=jsonpath='{.spec.hostNetwork}' ``` + Verify that there is at least one PSP which does not return true. - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - `.spec.hostNetwork` field is omitted or set to false. - impact: > - Pods defined with `spec.hostNetwork: true` will not be permitted unless they - are run under a specific PSP. + remediation: |- + Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false. + impact: |- + Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.4 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_5/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_5/data.yaml index c62795ab26..a4a8a58e08 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_5/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_5/data.yaml @@ -1,50 +1,48 @@ metadata: - id: 2eaebbf1-4cd6-5e02-9f8d-fc2be36f3c86 - name: Minimize the admission of containers with allowPrivilegeEscalation (Automated) - profile_applicability: | - * Level 1 - description: | - Do not generally permit containers to be run with the - `allowPrivilegeEscalation` flag set to true. - rationale: > - A container running with the `allowPrivilegeEscalation` flag set to `true` may - have - processes that can gain more privileges than their parent. - There should be at least one PodSecurityPolicy (PSP) defined which does not permit - containers to allow privilege escalation. The option exists (and is defaulted to true) to - permit setuid binaries to run. - If you have need to run containers which use setuid binaries or require privilege escalation, - this should be defined in a separate PSP and you should carefully check RBAC controls to - ensure that only limited service accounts and users are given permission to access that - PSP. - audit: | + id: d6fa5f41-3671-563a-ba35-a7494223273a + name: Minimize the admission of containers with allowPrivilegeEscalation + rule_number: 4.2.5 + profile_applicability: '* Level 1' + description: Do not generally permit containers to be run with the `allowPrivilegeEscalation` + flag set to true. + rationale: |- + A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent. + + There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. + The option exists (and is defaulted to true) to permit setuid binaries to run. + + + If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether privileged is enabled: + ``` kubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}' ``` + Verify that there is at least one PSP which does not return true. - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - `.spec.allowPrivilegeEscalation` field is omitted or set to false. - impact: > - Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted - unless they are run under a specific PSP. + remediation: |- + Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false. + impact: |- + Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.5 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_6/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_6/data.yaml index edfcfc57bc..70ee260056 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_6/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_6/data.yaml @@ -1,50 +1,48 @@ metadata: - id: dcccab00-07f8-55e8-bfa8-0f82d362e4ce - name: Minimize the admission of root containers (Automated) - profile_applicability: | - * Level 2 - description: | - Do not generally permit containers to be run as the root user. - rationale: > - Containers may run as any Linux user. Containers which run as the root user, - whilst - constrained by Container Runtime security features still have a escalated likelihood of - container breakout. + id: 77bc9d3b-7f54-5149-8430-9c0222184f32 + name: Minimize the admission of root containers + rule_number: 4.2.6 + profile_applicability: '* Level 2' + description: Do not generally permit containers to be run as the root user. + rationale: |- + Containers may run as any Linux user. + Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout. + Ideally, all containers should run as a defined non-UID 0 user. - There should be at least one PodSecurityPolicy (PSP) defined which does not permit root - users in a container. - If you need to run root containers, this should be defined in a separate PSP and you should - carefully check RBAC controls to ensure that only limited service accounts and users are - given permission to access that PSP. - audit: | + + There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container. + + + If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether running containers as root is enabled: + ``` kubectl get psp -o=jsonpath='{.spec.runAsUser.rule}' ``` - Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the - range of UIDs not including 0. - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of - UIDs not including 0. - impact: | - Pods with containers which run as the root user will not be permitted. + + Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0. + remediation: |- + Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0. + impact: Pods with containers which run as the root user will not be permitted. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.6 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_7/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_7/data.yaml index 88068f8330..949d1ca369 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_7/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_7/data.yaml @@ -1,52 +1,50 @@ metadata: - id: a3cd45ae-ba6b-564a-bd85-810efbbe5cea - name: Minimize the admission of containers with the NET_RAW capability (Automated) - profile_applicability: | - * Level 1 - description: | - Do not generally permit containers with the potentially dangerous NET_RAW + id: 13c821af-7d0a-5269-855d-2a87134ed4ee + name: Minimize the admission of containers with the NET_RAW capability + rule_number: 4.2.7 + profile_applicability: '* Level 1' + description: Do not generally permit containers with the potentially dangerous NET_RAW capability. - rationale: > - Containers run with a default set of capabilities as assigned by the - Container Runtime. By - default this can include potentially dangerous capabilities. With Docker as the container - runtime the NET_RAW capability is enabled which may be misused by malicious - containers. + rationale: |- + Containers run with a default set of capabilities as assigned by the Container Runtime. + By default this can include potentially dangerous capabilities. + With Docker as the container runtime the NET_RAW capability is enabled which may be misused by malicious containers. + Ideally, all containers should drop this capability. - There should be at least one PodSecurityPolicy (PSP) defined which prevents containers - with the NET_RAW capability from launching. - If you need to run containers with this capability, this should be defined in a separate PSP - and you should carefully check RBAC controls to ensure that only limited service accounts - and users are given permission to access that PSP. - audit: | + + There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with the NET_RAW capability from launching. + + If you need to run containers with this capability, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether NET_RAW is disabled: + ``` kubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}' ``` + Verify that there is at least one PSP which returns NET_RAW or ALL. - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - `.spec.requiredDropCapabilities` is set to include either `NET_RAW` or `ALL`. - impact: | - Pods with containers which run with the NET_RAW capability will not be - permitted. + remediation: |- + Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.requiredDropCapabilities` is set to include either `NET_RAW` or `ALL`. + impact: Pods with containers which run with the NET_RAW capability will not be permitted. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies - 2. https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies + - https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.7 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_8/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_8/data.yaml index cd78694768..73fdc9b097 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_8/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_8/data.yaml @@ -1,48 +1,42 @@ metadata: - id: a7e49914-1cab-599f-8d8a-bce7d808dc66 - name: Minimize the admission of containers with added capabilities (Automated) - profile_applicability: | - * Level 1 - description: | - Do not generally permit containers with capabilities assigned beyond the - default set. - rationale: > - Containers run with a default set of capabilities as assigned by the - Container Runtime. - Capabilities outside this set can be added to containers which could expose them to risks of - container breakout attacks. - There should be at least one PodSecurityPolicy (PSP) defined which prevents containers - with capabilities beyond the default set from launching. - If you need to run containers with additional capabilities, this should be defined in a - separate PSP and you should carefully check RBAC controls to ensure that only limited - service accounts and users are given permission to access that PSP. - audit: | + id: 9870ae7a-8c40-5f47-a482-355a720b01db + name: Minimize the admission of containers with added capabilities + rule_number: 4.2.8 + profile_applicability: '* Level 1' + description: Do not generally permit containers with capabilities assigned beyond + the default set. + rationale: |- + Containers run with a default set of capabilities as assigned by the Container Runtime. + Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks. + + There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching. + + If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` - Verify that there are no PSPs present which have `allowedCapabilities` set to anything - other than an empty array. - remediation: | - Ensure that `allowedCapabilities` is not present in PSPs for the cluster - unless it is set to an - empty array. - impact: > - Pods with containers which require capabilities outwith the default set will - not be permitted. + + Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array. + remediation: |- + Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array. + impact: Pods with containers which require capabilities outwith the default set + will not be permitted. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies - 2. https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies + - https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.8 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_4_2_9/data.yaml b/bundle/compliance/cis_eks/rules/cis_4_2_9/data.yaml index 4b686f67c5..570d7a2733 100644 --- a/bundle/compliance/cis_eks/rules/cis_4_2_9/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_4_2_9/data.yaml @@ -1,46 +1,43 @@ metadata: - id: 26dba6c5-6823-56ac-8ea2-40a977bc6a5e - name: Minimize the admission of containers with capabilities assigned (Manual) - profile_applicability: | - * Level 2 - description: | - Do not generally permit containers with capabilities - rationale: > - Containers run with a default set of capabilities as assigned by the - Container Runtime. + id: b28f5d7c-3db2-58cf-8704-b8e922e236b7 + name: Minimize the admission of containers with capabilities assigned + rule_number: 4.2.9 + profile_applicability: '* Level 2' + description: Do not generally permit containers with capabilities + rationale: |- + Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user. - In many cases applications running in containers do not require any capabilities to operate, - so from the perspective of the principal of least privilege use of capabilities should be - minimized. - audit: | + + In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized. + audit: |- Get the set of PSPs with the following command: + ``` kubectl get psp ``` + For each PSP, check whether capabilities have been forbidden: + ``` kubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}' ``` - remediation: > - Review the use of capabilities in applications running on your cluster. - Where a namespace - contains applications which do not require any Linux capabilities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - impact: | - Pods with containers require capabilities to operate will not be permitted. + remediation: |- + Review the use of capabilites in applications runnning on your cluster. + Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities. + impact: Pods with containers require capabilities to operate will not be permitted. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies - 2. https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ + references: + - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies + - https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ section: Pod Security Policies - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 4.2.9 - Pod Security Policies benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_5_1_1/data.yaml b/bundle/compliance/cis_eks/rules/cis_5_1_1/data.yaml index 8b99db0740..5de1566784 100644 --- a/bundle/compliance/cis_eks/rules/cis_5_1_1/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_5_1_1/data.yaml @@ -1,66 +1,63 @@ metadata: - id: a8592cf4-d913-5906-8499-dc1d5a410e2f - name: Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider (Manual) - profile_applicability: | - * Level 1 - description: | - Scan images being deployed to Amazon EKS for vulnerabilities. - rationale: > - Vulnerabilities in software packages can be exploited by hackers or - malicious users to - obtain unauthorized access to local cloud resources. Amazon ECR and other third party - products allow images to be scanned for known vulnerabilities. - audit: > - Please follow AWS ECS or your 3rd party image scanning provider's guidelines - for enabling Image Scanning. - remediation: | + id: d062432b-6804-5626-9953-1de08c4f8ccb + name: Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third + party provider + rule_number: 5.1.1 + profile_applicability: '* Level 1' + description: Scan images being deployed to Amazon EKS for vulnerabilities. + rationale: |- + Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. + Amazon ECR and other third party products allow images to be scanned for known vulnerabilities. + audit: |- + Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning. + remediation: |- To utilize AWS ECR for Image scanning please follow the steps below: + To create a repository configured for scan on push (AWS CLI) ``` - aws ecr create-repository --repository-name $REPO_NAME --image-scanning- - configuration scanOnPush=true --region $REGION_CODE + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE ``` + To edit the settings of an existing repository (AWS CLI) ``` - aws ecr put-image-scanning-configuration --repository-name $REPO_NAME -- - image-scanning-configuration scanOnPush=true --region $REGION_CODE + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE ``` + Use the following steps to start a manual image scan using the AWS Management Console. - 1. Open the Amazon ECR console at - https://console.aws.amazon.com/ecr/repositories. + + 1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. 2. From the navigation bar, choose the Region to create your repository in. 3. In the navigation pane, choose Repositories. 4. On the Repositories page, choose the repository that contains the image to scan. 5. On the Images page, select the image to scan and then choose Scan. - impact: | + impact: |- If you are utilizing AWS ECR - The following are common image scan failures. You can view errors like this in the Amazon - ECR console by displaying the image details or through the API or AWS CLI by using the - DescribeImageScanFindings API. - UnsupportedImageError You may get an UnsupportedImageError error when attempting - to scan an image that was built using an operating system that Amazon ECR doesn't - support image scanning for. Amazon ECR supports package vulnerability scanning for - major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, - Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built - from the Docker scratch image. - An UNDEFINED severity level is returned You may receive a scan finding that has a severity - level of UNDEFINED. The following are the common causes for this: + + The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. + + UnsupportedImageError + You may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. + + An UNDEFINED severity level is returned + You may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: + The vulnerability was not assigned a priority by the CVE source. + The vulnerability was assigned a priority that Amazon ECR did not recognize. - To determine the severity and description of a vulnerability, you can view the CVE directly - from the source. + + To determine the severity and description of a vulnerability, you can view the CVE directly from the source. default_value: | Images are not scanned by Default. - references: | - 1. https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html + references: + - https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html section: Image Registry and Image Scanning - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 5.1.1 - Image Registry and Image Scanning benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_5_3_1/data.yaml b/bundle/compliance/cis_eks/rules/cis_5_3_1/data.yaml index fead4502d9..1bdf0ede63 100644 --- a/bundle/compliance/cis_eks/rules/cis_5_3_1/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_5_3_1/data.yaml @@ -1,55 +1,47 @@ metadata: - id: fa91cef9-6c40-5644-b39d-d17e0f66d507 - name: Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Automated) - profile_applicability: | - * Level 1 - description: > - Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature - during - Amazon EKS cluster creation. - rationale: | + id: 8b6bfb52-9051-5863-baef-2c4eaa6a5eb1 + name: Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) + managed in AWS KMS + rule_number: 5.3.1 + profile_applicability: '* Level 1' + description: |- + Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation. + rationale: |- Kubernetes can store secrets that pods can access via a mounted volume. - Today, - Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended - approach. Amazon EKS clusters version 1.13 and higher support the capability of - encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer - Managed Keys (CMK). The only requirement is to enable the encryption provider support - during EKS cluster creation. - Use AWS Key Management Service (KMS) keys to provide envelope encryption of - Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is - considered a security best practice for applications that store sensitive data and is part of a - defense in depth security strategy. - Application-layer Secrets Encryption provides an additional layer of security for sensitive - data, such as user defined Secrets and Secrets required for the operation of the cluster, - such as service account keys, which are all stored in etcd. - Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at - the application layer. This protects against attackers in the event that they manage to gain - access to etcd. - audit: | - For Amazon EKS clusters with Secrets Encryption enabled, look for - 'encryptionConfig' - configuration when you run: + Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. + Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). + The only requirement is to enable the encryption provider support during EKS cluster creation. + + Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. + Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy. + + Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd. + + + Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. + This protects against attackers in the event that they manage to gain access to etcd. + audit: |- + For Amazon EKS clusters with Secrets Encryption enabled, look for 'encryptionConfig' configuration when you run: + ``` aws eks describe-cluster --name="" ``` - remediation: > - Enable 'Secrets Encryption' during Amazon EKS cluster creation as described - in the links - within the 'References' section. - impact : "" + remediation: |- + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section. + impact: None default_value: | By default, Application-layer Secrets Encryption is not enabled. - references: | - 1. https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html - 2. https://eksworkshop.com/beginner/191_secrets/ + references: + - https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html + - https://eksworkshop.com/beginner/191_secrets/ section: AWS Key Management Service (KMS) - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 5.3.1 - AWS Key Management Service (KMS) benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_5_4_1/data.yaml b/bundle/compliance/cis_eks/rules/cis_5_4_1/data.yaml index ca415a360a..d0e0ae9dc3 100644 --- a/bundle/compliance/cis_eks/rules/cis_5_4_1/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_5_4_1/data.yaml @@ -1,123 +1,112 @@ metadata: - id: f0d65fdb-035c-5e08-b4f8-0a1356c4005c - name: Restrict Access to the Control Plane Endpoint (Manual) - profile_applicability: | - * Level 1 - description: > - Enable Endpoint Private Access to restrict access to the cluster's control - plane to only an allowlist of authorized IPs. - rationale: | - Authorized networks are a way of specifying a restricted range of IP - addresses that are - permitted to access your cluster's control plane. Kubernetes Engine uses both Transport - Layer Security (TLS) and authentication to provide secure access to your cluster's control - plane from the public internet. This provides you the flexibility to administer your cluster - from anywhere; however, you might want to further restrict access to a set of IP addresses - that you control. You can set this restriction by specifying an authorized network. - Restricting access to an authorized network can provide additional security benefits for - your container cluster, including: - * Better protection from outsider attacks: Authorized networks provide an additional - layer of security by limiting external access to a specific set of addresses you - designate, such as those that originate from your premises. This helps protect access - to your cluster in the case of a vulnerability in the cluster's authentication or - authorization mechanism. - * Better protection from insider attacks: Authorized networks help protect your - cluster from accidental leaks of master certificates from your company's premises. - Leaked certificates used from outside Amazon EC2 and outside the authorized IP - ranges (for example, from addresses outside your company) are still denied access. - audit: | + id: ef45d674-b20a-509a-8d95-15142c89c3d0 + name: Restrict Access to the Control Plane Endpoint + rule_number: 5.4.1 + profile_applicability: '* Level 1' + description: |- + Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs. + rationale: |- + Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. + Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. + This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. + You can set this restriction by specifying an authorized network. + + Restricting access to an authorized network can provide additional security benefits for your container cluster, including: + + - Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. + This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism. + - Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. + Leaked certificates used from outside Amazon EC2 and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access. + audit: |- Input: + ``` aws eks describe-cluster \ - --region \ - --name + --region \ + --name ``` Output: + ``` - ... - "endpointPublicAccess": false, - "endpointPrivateAccess": true, - "publicAccessCidrs": [ - "203.0.113.5/32" - ] - ... + ... + "endpointPublicAccess": false, + "endpointPrivateAccess": true, + "publicAccessCidrs": [ + "203.0.113.5/32" + ] + ... ``` - remediation: | - Complete the following steps using the AWS CLI version 1.18.10 or later. You - can check - your current version with aws --version. To install or upgrade the AWS CLI, see Installing - the AWS CLI. + remediation: |- + Complete the following steps using the AWS CLI version 1.18.10 or later. + You can check your current version with aws --version. + To install or upgrade the AWS CLI, see Installing the AWS CLI. + Update your cluster API server endpoint access with the following AWS CLI command. - Substitute your cluster name and desired endpoint access values. If you set - endpointPublicAccess=true, then you can (optionally) enter single CIDR block, or a comma- - separated list of CIDR blocks for publicAccessCidrs. The blocks cannot include reserved - addresses. If you specify CIDR blocks, then the public API server endpoint will only receive - requests from the listed blocks. There is a maximum number of CIDR blocks that you can - specify. For more information, see Amazon EKS Service Quotas. If you restrict access to - your public endpoint using CIDR blocks, it is recommended that you also enable private - endpoint access so that worker nodes and Fargate pods (if you use them) can communicate - with the cluster. Without the private endpoint enabled, your public access endpoint CIDR - sources must include the egress sources from your VPC. For example, if you have a worker - node in a private subnet that communicates to the internet through a NAT Gateway, you - will need to add the outbound IP address of the NAT gateway as part of a whitelisted CIDR - block on your public endpoint. If you specify no CIDR blocks, then the public API server - endpoint receives requests from all (0.0.0.0/0) IP addresses. + Substitute your cluster name and desired endpoint access values. + If you set endpointPublicAccess=true, then you can (optionally) enter single CIDR block, or a comma-separated list of CIDR blocks for publicAccessCidrs. + The blocks cannot include reserved addresses. + If you specify CIDR blocks, then the public API server endpoint will only receive requests from the listed blocks. + There is a maximum number of CIDR blocks that you can specify. + For more information, see Amazon EKS Service Quotas. + If you restrict access to your public endpoint using CIDR blocks, it is recommended that you also enable private endpoint access so that worker nodes and Fargate pods (if you use them) can communicate with the cluster. + Without the private endpoint enabled, your public access endpoint CIDR sources must include the egress sources from your VPC. + For example, if you have a worker node in a private subnet that communicates to the internet through a NAT Gateway, you will need to add the outbound IP address of the NAT gateway as part of a whitelisted CIDR block on your public endpoint. + If you specify no CIDR blocks, then the public API server endpoint receives requests from all (0.0.0.0/0) IP addresses. Note - The following command enables private access and public access from a single IP address - for the API server endpoint. Replace 203.0.113.5/32 with a single CIDR block, or a comma- - separated list of CIDR blocks that you want to restrict network access to. + The following command enables private access and public access from a single IP address for the API server endpoint. + Replace 203.0.113.5/32 with a single CIDR block, or a comma-separated list of CIDR blocks that you want to restrict network access to. + Example command: ``` aws eks update-cluster-config \ - --region region-code \ - --name dev \ - --resources-vpc-config \ - endpointPublicAccess=true, \ - publicAccessCidrs="203.0.113.5/32",\ - endpointPrivateAccess=true + --region region-code \ + --name dev \ + --resources-vpc-config \ + endpointPublicAccess=true, \ + publicAccessCidrs="203.0.113.5/32",\ + endpointPrivateAccess=true ``` + Output: + ``` { - "update": { - "id": "e6f0905f-a5d4-4a2a-8c49-EXAMPLE00000", - "status": "InProgress", - "type": "EndpointAccessUpdate", - "params": [ - { - "type": "EndpointPublicAccess", - "value": "true" - }, - { - "type": "EndpointPrivateAccess", - "value": "true" - }, - { - "type": "publicAccessCidrs", - "value": "[\203.0.113.5/32\"]" - } - ], - "createdAt": 1576874258.137, - "errors": [] - } + "update": { + "id": "e6f0905f-a5d4-4a2a-8c49-EXAMPLE00000", + "status": "InProgress", + "type": "EndpointAccessUpdate", + "params": [ + { + "type": "EndpointPublicAccess", + "value": "true" + }, + { + "type": "EndpointPrivateAccess", + "value": "true" + }, + { + "type": "publicAccessCidrs", + "value": "[\203.0.113.5/32\"]" + } + ], + "createdAt": 1576874258.137, + "errors": [] + } ``` - impact: > - When implementing Endpoint Private Access, be careful to ensure all desired - networks are - on the allowlist (whitelist) to prevent inadvertently blocking external access to your - cluster's control plane. + impact: |- + When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane. default_value: | By default, Endpoint Private Access is disabled. - references: | - 1. https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html + references: + - https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html section: Cluster Networking - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 5.4.1 - Cluster Networking benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_5_4_2/data.yaml b/bundle/compliance/cis_eks/rules/cis_5_4_2/data.yaml index afcdeb6668..126bd106f5 100644 --- a/bundle/compliance/cis_eks/rules/cis_5_4_2/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_5_4_2/data.yaml @@ -1,50 +1,41 @@ metadata: - id: 677697a6-309b-5a91-ba2b-1acd571817a6 - name: Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual) - profile_applicability: | - * Level 2 - description: > - Disable access to the Kubernetes API from outside the node network if it is - not required. - rationale: > - In a private cluster, the master node has two endpoints, a private and - public endpoint. The - private endpoint is the internal IP address of the master, behind an internal load balancer - in the master's VPC network. Nodes communicate with the master using the private - endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the - master's VPC network. - Although Kubernetes API requires an authorized token to perform sensitive actions, a - vulnerability could potentially expose the Kubernetes publically with unrestricted access. - Additionally, an attacker may be able to identify the current cluster and Kubernetes API - version and determine whether it is vulnerable to an attack. Unless required, disabling - public endpoint will help prevent such threats, and require the attacker to be on the - master's VPC network to perform any attack on the Kubernetes API. - audit: "" - remediation: "" - impact: | - Configure the EKS cluster endpoint to be private. See Modifying Cluster - Endpoint Access for - further information on this topic. - 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate - with the cluster endpoint. The blocks are effectively a whitelisted set of public IP - addresses that are allowed to access the cluster endpoint. - 2. Configure public access with a set of whitelisted CIDR blocks and set private - endpoint access to enabled. This will allow public access from a specific range of - public IPs while forcing all network traffic between the kubelets (workers) and the - Kubernetes API through the cross-account ENIs that get provisioned into the cluster - VPC when the control plane is provisioned. + id: 4c198b47-5e4f-559d-be67-cbd5b8c3ebcd + name: Ensure clusters are created with Private Endpoint Enabled and Public Access + Disabled + rule_number: 5.4.2 + profile_applicability: '* Level 2' + description: Disable access to the Kubernetes API from outside the node network + if it is not required. + rationale: |- + In a private cluster, the master node has two endpoints, a private and public endpoint. + The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. + Nodes communicate with the master using the private endpoint. + The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network. + + + Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. + Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. + Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API. + audit: nan + remediation: nan + impact: |- + Configure the EKS cluster endpoint to be private. See [Modifying Cluster Endpoint Access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for further information on this topic. + + 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint. + + 1. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned. default_value: | By default, the Private Endpoint is disabled. - references: | - 1. https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html + references: + - https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html section: Cluster Networking - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 5.4.2 - Cluster Networking benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_5_4_3/data.yaml b/bundle/compliance/cis_eks/rules/cis_5_4_3/data.yaml index 7e664f5595..3bf1d8e3b2 100644 --- a/bundle/compliance/cis_eks/rules/cis_5_4_3/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_5_4_3/data.yaml @@ -1,37 +1,31 @@ metadata: - id: f026b2aa-c739-5512-8d7a-eaa21de507c6 - name: Ensure clusters are created with Private Nodes (Manual) - profile_applicability: | - * Level 1 - description: > - Disable public IP addresses for cluster nodes, so that they only have - private IP addresses. + id: 76565652-57f3-586c-b702-379e39b65fb5 + name: Ensure clusters are created with Private Nodes + rule_number: 5.4.3 + profile_applicability: '* Level 1' + description: |- + Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses. - rationale: > - Disabling public IP addresses on cluster nodes restricts access to only - internal networks, - forcing attackers to obtain local network access before attempting to compromise the - underlying Kubernetes hosts. - audit: "" - remediation: "" - impact: > - To enable Private Nodes, the cluster has to also be configured with a - private master IP - range and IP Aliasing enabled. - Private Nodes do not have outbound access to the public internet. If you want to provide - outbound Internet access for your private nodes, you can use Cloud NAT or you can - manage your own NAT gateway. + rationale: |- + Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts. + audit: nan + remediation: nan + impact: |- + To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled. + + Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway. default_value: | By default, Private Nodes are disabled. - references: "" + references: + - nan section: Cluster Networking - version: "1.0" + version: '1.0' tags: - CIS - - EKS + - CIS_EKS - CIS 5.4.3 - Cluster Networking benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark + name: CIS Amazon Elastic Kubernetes Service (EKS) version: v1.0.1 id: cis_eks diff --git a/bundle/compliance/cis_eks/rules/cis_5_4_5/data.yaml b/bundle/compliance/cis_eks/rules/cis_5_4_5/data.yaml index 5203ecc41b..b486e60ceb 100644 --- a/bundle/compliance/cis_eks/rules/cis_5_4_5/data.yaml +++ b/bundle/compliance/cis_eks/rules/cis_5_4_5/data.yaml @@ -1,27 +1,25 @@ - metadata: - id: 0b5e340f-9120-515a-b71a-72e43ed24941 - name: Encrypt traffic to HTTPS load balancers with TLS certificates (Manual) - profile_applicability: | - * Level 2 - description: > - Encrypt traffic to HTTPS load balancers using TLS certificates. - rationale: > - Encrypting traffic between users and your Kubernetes workload is fundamental to - protecting data sent over the web. - audit: "" - remediation: "" - impact: "" - default_value: "" - references: | - 1. https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html - section: Cluster Networking - version: "1.0" - tags: - - CIS - - EKS - - CIS 5.4.5 - - Cluster Networking - benchmark: - name: CIS Amazon Elastic Kubernetes Service (EKS) Benchmark - version: v1.0.1 - id: cis_eks +metadata: + id: 2b7f6d8c-48fc-58a8-9f38-0ea23b4477ca + name: Encrypt traffic to HTTPS load balancers with TLS certificates + rule_number: 5.4.5 + profile_applicability: '* Level 2' + description: Encrypt traffic to HTTPS load balancers using TLS certificates. + rationale: |- + Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web. + audit: nan + remediation: nan + impact: None + default_value: '' + references: + - https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html + section: Cluster Networking + version: '1.0' + tags: + - CIS + - CIS_EKS + - CIS 5.4.5 + - Cluster Networking + benchmark: + name: CIS Amazon Elastic Kubernetes Service (EKS) + version: v1.0.1 + id: cis_eks diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_1/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_1/data.yaml index d357398c42..75525644d1 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_1/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_1/data.yaml @@ -1,46 +1,44 @@ metadata: - id: 6664c1b8-05f2-5872-a516-4b2c3c36d2d7 - name: Ensure that the API server pod specification file permissions are set to - 644 or more restrictive (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the API server pod specification file has permissions of `644` - or more restrictive. - rationale: > - The API server pod specification file controls various parameters that set - the behavior of the API server. You should restrict its file permissions to - maintain the integrity of the file. The file should be writable by only the - administrators on the system. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + id: 56f66a4d-0bec-5ec5-bc6d-2d0ae33a0635 + name: Ensure that the API server pod specification file permissions are set to 644 + or more restrictive + rule_number: 1.1.1 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the API server pod specification file has permissions of + `644` or more restrictive. + rationale: |- + The API server pod specification file controls various parameters that set the behavior of the API server. + You should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml ``` - impact: | - None + impact: None default_value: | By default, the `kube-apiserver.yaml` file has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.1 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.1 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_11/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_11/data.yaml index 190a814558..cfb336ac8b 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_11/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_11/data.yaml @@ -1,55 +1,53 @@ metadata: - id: 3e07507c-31ba-5d01-800f-cfb8c0a09787 - name: Ensure that the etcd data directory permissions are set to 700 or more - restrictive (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the etcd data directory has permissions of `700` or more + id: a1bfd354-721a-5038-a7f3-0a38e9232304 + name: Ensure that the etcd data directory permissions are set to 700 or more restrictive + rule_number: 1.1.11 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the etcd data directory has permissions of `700` or more restrictive. - rationale: > - etcd is a highly-available key-value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. This data directory - should be protected from any unauthorized reads or writes. It should not be - readable or writable by any group members or the world. - audit: | - On the etcd server node, get the etcd data directory, passed as an argument - `--data-dir`, - from the below command: + rationale: |- + etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + This data directory should be protected from any unauthorized reads or writes. + It should not be readable or writable by any group members or the world. + audit: |- + On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + ``` ps -ef | grep etcd ``` - Run the below command (based on the etcd data directory found above). For example, + + Run the below command (based on the etcd data directory found above). + For example, + ``` stat -c %a /var/lib/etcd ``` + Verify that the permissions are `700` or more restrictive. - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument - `--data-dir`, - from the below command: + remediation: |- + On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: ``` ps -ef | grep etcd ``` - Run the below command (based on the etcd data directory found above). For example, + Run the below command (based on the etcd data directory found above). + For example, ``` chmod 700 /var/lib/etcd ``` - impact: | - None + impact: None default_value: | By default, etcd data directory has permissions of `755`. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir](https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) + references: + - https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir + - https://kubernetes.io/docs/admin/etcd/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.11 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.11 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_12/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_12/data.yaml index 1ba5252ae4..3a1008ed2a 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_12/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_12/data.yaml @@ -1,53 +1,52 @@ metadata: - id: 1a5c8f4f-fc22-5c1e-b7e1-affa0f03edf0 - name: Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Ensure that the etcd data directory ownership is set to `etcd:etcd`. - rationale: > - etcd is a highly-available key-value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. This data directory - should be protected from any unauthorized reads or writes. It should be - owned by `etcd:etcd`. - audit: | - On the etcd server node, get the etcd data directory, passed as an argument - `--data-dir`, - from the below command: + id: 29b0f497-6bbf-5991-8173-279b97004fd6 + name: Ensure that the etcd data directory ownership is set to etcd:etcd + rule_number: 1.1.12 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the etcd data directory ownership is set to `etcd:etcd`. + rationale: |- + etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + This data directory should be protected from any unauthorized reads or writes. + It should be owned by `etcd:etcd`. + audit: |- + On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + ``` ps -ef | grep etcd ``` - Run the below command (based on the etcd data directory found above). For example, + + Run the below command (based on the etcd data directory found above). + For example, + ``` stat -c %U:%G /var/lib/etcd ``` + Verify that the ownership is set to `etcd:etcd`. - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument - `--data-dir`, - from the below command: + remediation: |- + On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: ``` ps -ef | grep etcd ``` - Run the below command (based on the etcd data directory found above). For example, + Run the below command (based on the etcd data directory found above). + For example, ``` chown etcd:etcd /var/lib/etcd ``` - impact: | - None + impact: None default_value: | By default, etcd data directory ownership is set to `etcd:etcd`. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir](https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) + references: + - https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir + - https://kubernetes.io/docs/admin/etcd/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.12 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.12 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_13/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_13/data.yaml index d848a0e564..92e4fb8fcf 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_13/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_13/data.yaml @@ -1,44 +1,43 @@ metadata: - id: fa54a7f6-30ac-5c16-9f7c-a156c78cf96a - name: Ensure that the admin.conf file permissions are set to 600 (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the `admin.conf` file has permissions of `600`. - restrictive. - rationale: > + id: 33d1b3cc-7d61-5940-9b67-bd3bdf1372c0 + name: Ensure that the admin.conf file permissions are set to 600 + rule_number: 1.1.13 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the `admin.conf` file has permissions of `600`. + rationale: |- The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system. - audit: | - Run the following command (based on the file location on your system) on the control plane node. + audit: |- + Run the following command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %a /etc/kubernetes/admin.conf ``` + Verify that the permissions are `600` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chmod 600 /etc/kubernetes/admin.conf ``` - impact: | - None. + impact: None. default_value: | By default, `admin.conf` has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) + references: + - https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.13 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.13 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_14/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_14/data.yaml index 5aa8ce5c4d..4f64765456 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_14/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_14/data.yaml @@ -1,43 +1,42 @@ metadata: - id: 272d95bf-8e18-5a8e-b8b0-76d220de9664 - name: Ensure that the admin.conf file ownership is set to root:root (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Ensure that the `admin.conf` file ownership is set to `root:root`. - rationale: > - The `admin.conf` file contains the admin credentials for the cluster. You - should set its file ownership to maintain the integrity of the file. The - file should be owned by root:root. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + id: e3b1c434-02ca-5c23-83c7-767e692545f9 + name: Ensure that the admin.conf file ownership is set to root:root + rule_number: 1.1.14 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the `admin.conf` file ownership is set to `root:root`. + rationale: |- + The `admin.conf` file contains the admin credentials for the cluster. + You should set its file ownership to maintain the integrity and confidentiality of the file. + The file should be owned by root:root. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %U:%G /etc/kubernetes/admin.conf ``` + Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown root:root /etc/kubernetes/admin.conf ``` - impact: | - None. + impact: None. default_value: | By default, `admin.conf` file ownership is set to `root:root`. - references: | - 1. [https://kubernetes.io/docs/admin/kubeadm/](https://kubernetes.io/docs/admin/kubeadm/) + references: + - https://kubernetes.io/docs/admin/kubeadm/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.14 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.14 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_15/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_15/data.yaml index 5e932711b9..ed576b4b0a 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_15/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_15/data.yaml @@ -1,44 +1,43 @@ metadata: - id: 140866d3-af45-58a5-9984-cfa9f9498809 - name: Ensure that the scheduler.conf file permissions are set to 644 or more - restrictive (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the `scheduler.conf` file has permissions of `644` or more + id: 84c66732-9c11-5a66-9bf9-e65f06e08171 + name: Ensure that the scheduler.conf file permissions are set to 644 or more restrictive + rule_number: 1.1.15 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the `scheduler.conf` file has permissions of `644` or more restrictive. - rationale: > - The `scheduler.conf` file is the kubeconfig file for the Scheduler. You - should restrict its file permissions to maintain the integrity of the file. + rationale: |- + The `scheduler.conf` file is the kubeconfig file for the Scheduler. + You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system. - audit: | - Run the following command (based on the file location on your system) on the control plane node. + audit: |- + Run the following command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %a /etc/kubernetes/scheduler.conf ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chmod 644 /etc/kubernetes/scheduler.conf ``` - impact: | - None + impact: None default_value: | By default, `scheduler.conf` has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) + references: + - https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.15 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.15 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_16/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_16/data.yaml index 9b35553b32..3cc0c73968 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_16/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_16/data.yaml @@ -1,43 +1,42 @@ metadata: - id: 6cfcb087-18c7-5cb1-95fd-0dc074f48766 - name: Ensure that the scheduler.conf file ownership is set to root:root (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Ensure that the `scheduler.conf` file ownership is set to `root:root`. - rationale: > - The `scheduler.conf` file is the kubeconfig file for the Scheduler. You - should set its file ownership to maintain the integrity of the file. The - file should be owned by `root:root`. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + id: 6a676e26-ad2f-53a2-9968-1a075a70a84b + name: Ensure that the scheduler.conf file ownership is set to root:root + rule_number: 1.1.16 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the `scheduler.conf` file ownership is set to `root:root`. + rationale: |- + The `scheduler.conf` file is the kubeconfig file for the Scheduler. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %U:%G /etc/kubernetes/scheduler.conf ``` + Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown root:root /etc/kubernetes/scheduler.conf ``` - impact: | - None + impact: None default_value: | By default, `scheduler.conf` file ownership is set to `root:root`. - references: | - 1. [https://kubernetes.io/docs/admin/kubeadm/](https://kubernetes.io/docs/admin/kubeadm/) + references: + - https://kubernetes.io/docs/admin/kubeadm/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.16 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.16 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_17/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_17/data.yaml index 92dd981570..4dad2b3061 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_17/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_17/data.yaml @@ -1,46 +1,44 @@ metadata: - id: 772a42c5-4652-5610-858d-01ef23bd6290 - name: - Ensure that the controller-manager.conf file permissions are set to 644 or - more restrictive (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the `controller-manager.conf` file has permissions of `644` or - more restrictive. - rationale: > - The `controller-manager.conf` file is the kubeconfig file for the Controller - Manager. You should restrict its file permissions to maintain the integrity - of the file. The file should be writable by only the administrators on the - system. - audit: | - Run the following command (based on the file location on your system) on the control plane node. + id: 73926a71-4298-505a-a3d4-6131ab97a9f4 + name: Ensure that the controller-manager.conf file permissions are set to 644 or + more restrictive + rule_number: 1.1.17 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the `controller-manager.conf` file has permissions of 644 + or more restrictive. + rationale: |- + The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. + You should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- + Run the following command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %a /etc/kubernetes/controller-manager.conf ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chmod 644 /etc/kubernetes/controller-manager.conf ``` - impact: | - None + impact: None default_value: | By default, `controller-manager.conf` has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-controller-manager/](https://kubernetes.io/docs/admin/kube-controller-manager/) + references: + - https://kubernetes.io/docs/admin/kube-controller-manager/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.17 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.17 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_18/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_18/data.yaml index 4b88d8153a..866e2d71e5 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_18/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_18/data.yaml @@ -1,46 +1,43 @@ metadata: - id: 5867e0c0-71c5-5f21-8c9c-621a160bfbd9 - name: - Ensure that the controller-manager.conf file ownership is set to root:root - (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the `controller-manager.conf` file ownership is set to + id: 76c4a120-d350-5958-a585-0f8b9d581652 + name: Ensure that the controller-manager.conf file ownership is set to root:root + rule_number: 1.1.18 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the `controller-manager.conf` file ownership is set to `root:root`. - rationale: > - The `controller-manager.conf` file is the kubeconfig file for the Controller - Manager. You should set its file ownership to maintain the integrity of the - file. The file should be owned by `root:root`. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + rationale: |- + The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %U:%G /etc/kubernetes/controller-manager.conf ``` + Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown root:root /etc/kubernetes/controller-manager.conf ``` - impact: | - None + impact: None default_value: | By default, `controller-manager.conf` file ownership is set to `root:root`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-controller-manager/](https://kubernetes.io/docs/admin/kube-controller-manager/) + references: + - https://kubernetes.io/docs/admin/kube-controller-manager/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.18 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.18 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_19/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_19/data.yaml index 4944101175..844b2e2e60 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_19/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_19/data.yaml @@ -1,47 +1,43 @@ metadata: - id: 780ac02f-e0f5-537c-98ba-354ae5873a81 - name: Ensure that the Kubernetes PKI directory and file ownership is set to - root:root (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the Kubernetes PKI directory and file ownership is set to - `root:root`. - rationale: > + id: a6f7e4ca-4a24-58df-9807-2ed19b164cd2 + name: Ensure that the Kubernetes PKI directory and file ownership is set to root:root + rule_number: 1.1.19 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the Kubernetes PKI directory and file ownership is set + to `root:root`. + rationale: |- Kubernetes makes use of a number of certificates as part of its operation. - You should set the ownership of the directory containing the PKI information - and all files in that directory to maintain their integrity. The directory - and files should be owned by root:root. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. + The directory and files should be owned by `root:root`. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` ls -laR /etc/kubernetes/pki/ ``` + Verify that the ownership of all files and directories in this hierarchy is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown -R root:root /etc/kubernetes/pki/ ``` - impact: | - None - default_value: > - By default, the `/etc/kubernetes/pki/` directory and all of the files and - directories contained within it, are set to be owned by the root user. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) + impact: None + default_value: | + By default, the `/etc/kubernetes/pki/` directory and all of the files and directories contained within it, are set to be owned by the root user. + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.19 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.19 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_2/data.yaml index 3d063c88c0..9105d0ccaa 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_2/data.yaml @@ -1,45 +1,43 @@ metadata: - id: 3b0587fb-01a3-50c2-9d3d-6675f376d509 - name: Ensure that the API server pod specification file ownership is set - to root:root (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the API server pod specification file ownership is set to - `root:root`. - rationale: > - The API server pod specification file controls various parameters that set - the behavior of the API server. You should set its file ownership to - maintain the integrity of the file. The file should be owned by `root:root`. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + id: ffcb9161-0ec8-5c29-9856-9626556e8f12 + name: Ensure that the API server pod specification file ownership is set to root:root + rule_number: 1.1.2 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the API server pod specification file ownership is set + to `root:root`. + rationale: |- + The API server pod specification file controls various parameters that set the behavior of the API server. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml ``` + Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown root:root /etc/kubernetes/manifests/kube-apiserver.yaml ``` - impact: | - None + impact: None default_value: | By default, the `kube-apiserver.yaml` file ownership is set to `root:root`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.2 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.2 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_20/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_20/data.yaml index f7b36194ba..40cdeb972a 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_20/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_20/data.yaml @@ -1,44 +1,44 @@ metadata: - id: 428bd666-2825-532e-a484-0f31ea5db0f9 + id: 5a666423-ae65-502b-a256-938bfc55bc43 name: Ensure that the Kubernetes PKI certificate file permissions are set to 644 - or more restrictive (Manual) - profile_applicability: | - • Level 1 - Master Node - description: > - Ensure that Kubernetes PKI certificate files have permissions of 644 or more - restrictive. - rationale: > - Kubernetes makes use of a number of certificate files as part of the - operation of its components. - - The permissions on these files should be set to 644 or more restrictive to protect their integrity. - audit: > - Run the below command (based on the file location on your system) on the - Control Plane node. For example, + or more restrictive + rule_number: 1.1.20 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that Kubernetes PKI certificate files have permissions of `644` + or more restrictive. + rationale: |- + Kubernetes makes use of a number of certificate files as part of the operation of its components. + The permissions on these files should be set to `644` or more restrictive to protect their integrity. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. + For example, + ``` ls -laR /etc/kubernetes/pki/*.crt ``` - Verify that the permissions are 644 or more restrictive. - remediation: > - Run the below command (based on the file location on your system) on the - Control Plane node. For example, + + Verify that the permissions are `644` or more restrictive. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. + For example, + ``` chmod -R 644 /etc/kubernetes/pki/*.crt ``` - impact: | - None + impact: None default_value: > - By default, the certificates used by Kubernetes are set to have permissions of 644 - references: | - 1. https://kubernetes.io/docs/admin/kube-apiserver/ - section: Master Node Configuration Files - version: "1.0" + By default, the certificates used by Kubernetes are set to have permissions of + 644 + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + section: Control Plane Node Configuration Files + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.20 - - Master Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.20 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_21/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_21/data.yaml index 6c46059199..b322a7169e 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_21/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_21/data.yaml @@ -1,42 +1,41 @@ metadata: - id: 5ee652ed-952d-57d3-8643-87f95d046f25 - name: Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) - profile_applicability: | - • Level 1 - Master Node - description: | - Ensure that Kubernetes PKI key files have permissions of 600. - rationale: > - Kubernetes makes use of a number of key files as part of the operation of - its components. + id: 58a8587b-2c3a-5bd0-9097-42fdfa2801aa + name: Ensure that the Kubernetes PKI key file permissions are set to 600 + rule_number: 1.1.21 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that Kubernetes PKI key files have permissions of `600`. + rationale: |- + Kubernetes makes use of a number of key files as part of the operation of its components. + The permissions on these files should be set to `600` to protect their integrity and confidentiality. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. + For example, - The permissions on these files should be set to 600 to protect their integrity and confidentiality. - audit: > - Run the below command (based on the file location on your system) on the - Control Plane node. For example, ``` ls -laR /etc/kubernetes/pki/*.key ``` - Verify that the permissions are 600. - remediation: > - Run the below command (based on the file location on your system) on the - Control Plane node. For example, + + Verify that the permissions are `600`. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. + For example, + ``` chmod -R 600 /etc/kubernetes/pki/*.key ``` - impact: | - None + impact: None default_value: | By default, the keys used by Kubernetes are set to have permissions of 600 - references: | - 1. https://kubernetes.io/docs/admin/kube-apiserver/ - section: Master Node Configuration Files - version: "1.0" + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + section: Control Plane Node Configuration Files + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.21 - - Master Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.21 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_3/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_3/data.yaml index c8c6a607cd..4e758920dd 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_3/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_3/data.yaml @@ -1,47 +1,44 @@ metadata: - id: 1d3a468f-78ca-54ff-a43c-0d205ad832b7 - name: - Ensure that the controller manager pod specification file permissions are - set to 644 or more restrictive (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the controller manager pod specification file has permissions of - `644` or more restrictive. - rationale: > - The controller manager pod specification file controls various parameters - that set the behavior of the Controller Manager on the control plane node. You - should restrict its file permissions to maintain the integrity of the file. + id: b7a17fac-ac87-5dbe-8b1e-f2565cd2427d + name: |- + Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive + rule_number: 1.1.3 + profile_applicability: '* Level 1 - Master Node' + description: |- + Ensure that the controller manager pod specification file has permissions of `644` or more restrictive. + rationale: |- + The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. + You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chmod 644 /etc/kubernetes/manifests/kube-controller-manager.yaml ``` - impact: | - None + impact: None default_value: > By default, the `kube-controller-manager.yaml` file has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.3 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.3 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_4/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_4/data.yaml index ca352b36c5..db6a94bc3d 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_4/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_4/data.yaml @@ -1,48 +1,45 @@ metadata: - id: f5d3c40c-d915-56d7-b4c4-33d863201c9f - name: - Ensure that the controller manager pod specification file ownership is set - to root:root (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the controller manager pod specification file ownership is set - to `root:root`. - rationale: > - The controller manager pod specification file controls various parameters - that set the behavior of various components of the control plane node. You should - set its file ownership to maintain the integrity of the file. The file - should be owned by `root:root`. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + id: c4978061-8644-5d22-a5d5-16f63af61c09 + name: Ensure that the controller manager pod specification file ownership is set + to root:root + rule_number: 1.1.4 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the controller manager pod specification file ownership + is set to `root:root`. + rationale: |- + The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml ``` + Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml ``` - impact: | - None + impact: None default_value: > By default, `kube-controller-manager.yaml` file ownership is set to `root:root`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-controller-manager](https://kubernetes.io/docs/admin/kube-controller-manager) + references: + - https://kubernetes.io/docs/admin/kube-controller-manager section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.4 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.4 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_5/data.yaml index f2423c1592..21f230e7e1 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_5/data.yaml @@ -1,46 +1,44 @@ metadata: - id: 66cf51af-50b4-5570-be5b-afe549fafd62 - name: Ensure that the scheduler pod specification file permissions are set to - 644 or more restrictive (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the scheduler pod specification file has permissions of `644` or - more restrictive. - rationale: > - The scheduler pod specification file controls various parameters that set - the behavior of the Scheduler service in the control plane node. You should - restrict its file permissions to maintain the integrity of the file. The - file should be writable by only the administrators on the system. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + id: c08aac61-22f2-5c36-ade5-7c41ccf67fea + name: Ensure that the scheduler pod specification file permissions are set to 644 + or more restrictive + rule_number: 1.1.5 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the scheduler pod specification file has permissions of + `644` or more restrictive. + rationale: |- + The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. + You should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chmod 644 /etc/kubernetes/manifests/kube-scheduler.yaml ``` - impact: | - None + impact: None default_value: | By default, `kube-scheduler.yaml` file has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-scheduler/](https://kubernetes.io/docs/admin/kube-scheduler/) + references: + - https://kubernetes.io/docs/admin/kube-scheduler/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.5 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.5 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_6/data.yaml index 5119c052a8..243d3e23fe 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_6/data.yaml @@ -1,46 +1,43 @@ metadata: - id: 81a89547-86b3-5538-a6c4-c33aae82a8ae - name: Ensure that the scheduler pod specification file ownership is set - to root:root (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the scheduler pod specification file ownership is set to + id: 7dcb6273-247f-5398-9732-1c3f2ce0b98b + name: Ensure that the scheduler pod specification file ownership is set to root:root + rule_number: 1.1.6 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the scheduler pod specification file ownership is set to `root:root`. - rationale: > - The scheduler pod specification file controls various parameters that set - the behavior of the `kube-scheduler` service in the control plane node. You should - set its file ownership to maintain the integrity of the file. The file - should be owned by `root:root`. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + rationale: |- + The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml ``` + Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown root:root /etc/kubernetes/manifests/kube-scheduler.yaml ``` - impact: | - None + impact: None default_value: | By default, `kube-scheduler.yaml` file ownership is set to `root:root`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-scheduler/](https://kubernetes.io/docs/admin/kube-scheduler/) + references: + - https://kubernetes.io/docs/admin/kube-scheduler/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.6 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.6 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_7/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_7/data.yaml index 03fa678e0f..b290e835d3 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_7/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_7/data.yaml @@ -1,51 +1,47 @@ metadata: - id: f8239b9d-dbc4-51ee-a4c7-2896ad3b102e - name: - Ensure that the etcd pod specification file permissions are set to 644 or - more restrictive (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions - of `644` or more restrictive. - rationale: > - The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` - controls various parameters that set the behavior of the `etcd` service in - the control plane node. etcd is a highly- available key-value store which - Kubernetes uses for persistent storage of all of its REST API object. You - should restrict its file permissions to maintain the integrity of the file. + id: bec347e5-b056-5104-9199-eca9e264983a + name: Ensure that the etcd pod specification file permissions are set to 644 or + more restrictive + rule_number: 1.1.7 + profile_applicability: '* Level 1 - Master Node' + description: |- + Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `644` or more restrictive. + rationale: |- + The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. + etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. + You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %a /etc/kubernetes/manifests/etcd.yaml ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chmod 644 /etc/kubernetes/manifests/etcd.yaml ``` - impact: | - None + impact: None default_value: > By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`. - references: | - 1. [https://coreos.com/etcd](https://coreos.com/etcd) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) + references: + - https://coreos.com/etcd + - https://kubernetes.io/docs/admin/etcd/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.7 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.7 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_1_8/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_1_8/data.yaml index 6f032fede7..084b91c3ed 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_1_8/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_1_8/data.yaml @@ -1,52 +1,46 @@ metadata: - id: c3c56d62-e217-5a77-aba0-124b1dfb3d2c - name: - Ensure that the etcd pod specification file ownership is set to root:root - (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set - to - `root:root`. - rationale: > - The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` - controls various parameters that set the behavior of the `etcd` service in - the control plane node. etcd is a highly-available key-value store which - Kubernetes uses for persistent storage of all of its REST API object. You - should set its file ownership to maintain the integrity of the file. The - file should be owned by `root:root`. - audit: | - Run the below command (based on the file location on your system) on the - control plane node. + id: d023f3a1-548d-5fb8-a664-31c1d9113d47 + name: Ensure that the etcd pod specification file ownership is set to root:root + rule_number: 1.1.8 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership + is set to `root:root`. + rationale: |- + The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. + etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` stat -c %U:%G /etc/kubernetes/manifests/etcd.yaml ``` + Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. + remediation: |- + Run the below command (based on the file location on your system) on the Control Plane node. For example, + ``` chown root:root /etc/kubernetes/manifests/etcd.yaml ``` - impact: | - None + impact: None default_value: > By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`. - references: | - 1. [https://coreos.com/etcd](https://coreos.com/etcd) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) + references: + - https://coreos.com/etcd + - https://kubernetes.io/docs/admin/etcd/ section: Control Plane Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.1.8 - - Control Plane Node Configuration Files + - CIS + - Kubernetes + - CIS 1.1.8 + - Control Plane Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_10/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_10/data.yaml index 84bb722728..39c054ebf3 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_10/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_10/data.yaml @@ -1,60 +1,48 @@ metadata: - id: d44fc5e7-7275-5d07-88a7-f1f8fc2f73c2 - name: Ensure that the admission control plugin EventRateLimit is set (Manual) - profile_applicability: | - • Level 1 - Master Node - description: | - Limit the rate at which the API server accepts requests. - rationale: > - Using `EventRateLimit` admission control enforces a limit on the number of events that the - - API Server will accept in a given time slice. A misbehaving workload could overwhelm and - - DoS the API Server, making it unavailable. This particularly applies to a multi-tenant - - cluster, where there might be a small percentage of misbehaving tenants which could have - - a significant impact on the performance of the cluster overall. Hence, it is recommended to - - limit the rate of events that the API server will accept. + id: ca5b005f-9922-5cdc-a664-7b8f33e20f00 + name: Ensure that the admission control plugin EventRateLimit is set + rule_number: 1.2.10 + profile_applicability: '* Level 1 - Master Node' + description: Limit the rate at which the API server accepts requests. + rationale: |- + Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. + A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. + This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. + Hence, it is recommended to limit the rate of events that the API server will accept. Note: This is an Alpha feature in the Kubernetes 1.15 release. - audit: > + audit: |- Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`. - remediation: > - Follow the Kubernetes documentation and set the desired limits in a - configuration file. + remediation: |- + Follow the Kubernetes documentation and set the desired limits in a configuration file. + + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters. - Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` - - and set the below parameters. ``` --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= ``` - impact: | - You need to carefully tune in limits as per your environment. + impact: You need to carefully tune in limits as per your environment. default_value: | By default, `EventRateLimit` is not set. - references: | - 1. https://kubernetes.io/docs/admin/kube-apiserver/ - 2. https://kubernetes.io/docs/admin/admission-controllers/#eventratelimit - 3. https://github.com/staebler/community/blob/9873b632f4d99b5d99c38c9b15fe2f - 8b93d0a746/contributors/design- - proposals/admission_control_event_rate_limit.md + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/admission-controllers/#eventratelimit + - https://github.com/staebler/community/blob/9873b632f4d99b5d99c38c9b15fe2f8b93d0a746/contributors/design-proposals/admission_control_event_rate_limit.md section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.10 - - API Server + - CIS + - Kubernetes + - CIS 1.2.10 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_11/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_11/data.yaml index 75fce7ba66..ba3e3814b2 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_11/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_11/data.yaml @@ -1,43 +1,39 @@ metadata: - id: d8682dfd-f8ab-5f9f-b10f-6225b9b46560 - name: Ensure that the admission control plugin AlwaysAdmit is not set (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Do not allow all requests. - rationale: > - Setting admission control plugin `AlwaysAdmit` allows all requests and do - not filter any requests. The `AlwaysAdmit` admission controller was - deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off - all admission controllers. - audit: | - Run the following command on the control plane node: + id: 848c3124-9dd0-5590-9338-82de21c1df9f + name: Ensure that the admission control plugin AlwaysAdmit is not set + rule_number: 1.2.11 + profile_applicability: '* Level 1 - Master Node' + description: Do not allow all requests. + rationale: |- + Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests. + + The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. + Its behavior was equivalent to turning off all admission controllers. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that if the `--enable-admission-plugins` argument is set, its value does not include - `AlwaysAdmit`. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and either remove the `--enable-admission-plugins` - parameter, or set it to a value that does not include `AlwaysAdmit`. - impact: > - Only requests explicitly allowed by the admissions control plugins would be - served. + + Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`. + impact: Only requests explicitly allowed by the admissions control plugins would + be served. default_value: | `AlwaysAdmit` is not in the list of default admission plugins. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/admission-controllers/#alwaysadmit](https://kubernetes.io/docs/admin/admission-controllers/#alwaysadmit) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/admission-controllers/#alwaysadmit section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.11 - - API Server + - CIS + - Kubernetes + - CIS 1.2.11 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_12/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_12/data.yaml index 816c33bf09..5b015c5f6b 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_12/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_12/data.yaml @@ -1,63 +1,45 @@ metadata: - id: 95868b0f-4f1a-5af1-846e-93725abc9c18 - name: Ensure that the admission control plugin AlwaysPullImages is set(Manual) - profile_applicability: | - • Level 1 - Master Node - description: | - Always pull images. - rationale: > - Setting admission control policy to AlwaysPullImages forces every new pod to - - pull the required images every time. In a multi-tenant cluster users can be assured that their - - private images can only be used by those who have the credentials to pull them. - - Without this admission control policy, once an image has been pulled to a node, any pod from any - - user can use it simply by knowing the image’s name, without any authorization check - - against the image ownership. When this plug-in is enabled, images are always pulled prior - - to starting containers, which means valid credentials are required. - audit: > + id: 73fa944c-a62c-5371-a011-0e17bf85f79f + name: Ensure that the admission control plugin AlwaysPullImages is set + rule_number: 1.2.12 + profile_applicability: '* Level 1 - Master Node' + description: Always pull images. + rationale: |- + Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. + In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. + Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image’s name, without any authorization check against the image ownership. + When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required. + audit: |- Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--enable-admission-plugins` argument is set to a value that includes - `AlwaysPullImages`. - remediation: > - Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml - - on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`. + Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`. + ``` --enable-admission-plugins=...,AlwaysPullImages,... ``` - impact: > - Credentials would be required to pull the private images every time. Also, - in trusted - - environments, this might increases load on network, registry, and decreases speed. - - This setting could impact offline or isolated clusters, which have images pre-loaded and do - - not have access to a registry to pull in-use images. This setting is not appropriate for + impact: |- + Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. - clusters which use this configuration. + This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration. default_value: | By default, AlwaysPullImages is not set. - references: | - 1. https://kubernetes.io/docs/admin/kube-apiserver/ - 2. https://kubernetes.io/docs/admin/admission-controllers/#alwayspullimages + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/admission-controllers/#alwayspullimages section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.12 - - API Server + - CIS + - Kubernetes + - CIS 1.2.12 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_13/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_13/data.yaml index cf2e54df46..1f94379f75 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_13/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_13/data.yaml @@ -1,57 +1,44 @@ metadata: - id: 9ad692c7-e8ee-5633-ac83-d7911467c2c0 - name: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - profile_applicability: | - • Level 1 - Master Node - description: > - The `SecurityContextDeny` admission controller can be used to deny pods which make use of - - some SecurityContext fields which could allow for privilege escalation in the cluster. - + id: ee7b40c4-abf3-53f1-b938-aa982a416ad4 + name: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy + is not used + rule_number: 1.2.13 + profile_applicability: '* Level 1 - Master Node' + description: |- + The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster. - rationale: > - `SecurityContextDeny` can be used to provide a layer of security for clusters which do not - - have PodSecurityPolicies enabled. - audit: > + rationale: |- + SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled. + audit: |- Run the following command on the Control Plane node: - + ``` ps -ef | grep kube-apiserver ``` - - Verify that the `--enable-admission-plugins` argument is set to a value that includes - - `SecurityContextDeny`, if `PodSecurityPolicy` is not included. - remediation: > - Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` - - on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, - - unless `PodSecurityPolicy` is already in place. - `--enable-admission-plugins=...,SecurityContextDeny,...` - impact: > - This admission controller should only be used where Pod Security Policies - cannot be used + Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place. - on the cluster, as it can interact poorly with certain Pod Security Policies + ``` + --enable-admission-plugins=...,SecurityContextDeny,... + ``` + impact: |- + This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies default_value: | By default, `SecurityContextDeny` is not set. - references: > - 1. https://kubernetes.io/docs/admin/kube-apiserver/ - - 2. https://kubernetes.io/docs/admin/admission-controllers/#securitycontextdeny - - 3. https://kubernetes.io/docs/user-guide/pod-security-policy/#working-with-rbac + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/admission-controllers/#securitycontextdeny + - https://kubernetes.io/docs/user-guide/pod-security-policy/#working-with-rbac section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.13 - - API Server + - CIS + - Kubernetes + - CIS 1.2.13 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_14/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_14/data.yaml index d36e62c73e..f1ef2ae9d7 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_14/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_14/data.yaml @@ -1,44 +1,38 @@ metadata: - id: 3f2a98b2-26a6-52bf-97ad-68ef81df953c - name: Ensure that the admission control plugin ServiceAccount is set (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Automate service accounts management. - rationale: > - When you create a pod, if you do not specify a service account, it is - automatically assigned the default service account in the same namespace. - You should create your own service account and let the API server manage its - security tokens. - audit: | - Run the following command on the control plane node: + id: 145d13e1-a792-5070-bf34-68d90fcd90dd + name: Ensure that the admission control plugin ServiceAccount is set + rule_number: 1.2.14 + profile_applicability: '* Level 1 - Master Node' + description: Automate service accounts management. + rationale: |- + When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. + You should create your own service account and let the API server manage its security tokens. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--disable-admission-plugins` argument is set to a value that does not - includes `ServiceAccount`. - remediation: | - Follow the documentation and create `ServiceAccount` objects as per your - environment. - Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and ensure that the `--disable-admission-plugins` - parameter is set to a value that does not include `ServiceAccount`. - impact: | - None. + + Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`. + remediation: |- + Follow the documentation and create `ServiceAccount` objects as per your environment. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`. + impact: None. default_value: | By default, `ServiceAccount` is set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/admission-controllers/#serviceaccount](https://kubernetes.io/docs/admin/admission-controllers/#serviceaccount) - 3. [https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/admission-controllers/#serviceaccount + - https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.14 - - API Server + - CIS + - Kubernetes + - CIS 1.2.14 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_15/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_15/data.yaml index 803d28c18c..56625b1d17 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_15/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_15/data.yaml @@ -1,44 +1,36 @@ metadata: - id: 248339e7-4a2c-598b-954e-b676176f0e49 + id: c2cd82d0-2ded-593d-8231-9c643c40ed73 name: Ensure that the admission control plugin NamespaceLifecycle is set - (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Reject creating objects in a namespace that is undergoing termination. - rationale: > - Setting admission control policy to `NamespaceLifecycle` ensures that - objects cannot be created in non-existent namespaces, and that namespaces - undergoing termination are not used for creating the new objects. This is - recommended to enforce the integrity of the namespace termination process - and also for the availability of the newer objects. - audit: | - Run the following command on the control plane node: + rule_number: 1.2.15 + profile_applicability: '* Level 1 - Master Node' + description: Reject creating objects in a namespace that is undergoing termination. + rationale: |- + Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. + This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--disable-admission-plugins` argument is set to a value that does not - include `NamespaceLifecycle`. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--disable-admission-plugins` parameter - to ensure it does not include `NamespaceLifecycle`. - impact: | - None + + Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`. + impact: None default_value: | By default, `NamespaceLifecycle` is set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/admission-controllers/#namespacelifecycle](https://kubernetes.io/docs/admin/admission-controllers/#namespacelifecycle) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/admission-controllers/#namespacelifecycle section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.15 - - API Server + - CIS + - Kubernetes + - CIS 1.2.15 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_16/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_16/data.yaml index c9f80689a8..38a84b9622 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_16/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_16/data.yaml @@ -1,49 +1,43 @@ metadata: - id: 0cd77c44-7420-5cac-a366-821b44bf819e - name: Ensure that the admission control plugin NodeRestriction is set (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Limit the `Node` and `Pod` objects that a kubelet could modify. - rationale: > - Using the `NodeRestriction` plug-in ensures that the kubelet is restricted - to the `Node` and Pod objects that it could modify as defined. Such kubelets - will only be allowed to modify their own `Node` API object, and only modify - `Pod` API objects that are bound to their node. - audit: | - Run the following command on the control plane node: + id: 0a9250ba-0634-567c-bd6d-63dbab03d83b + name: Ensure that the admission control plugin NodeRestriction is set + rule_number: 1.2.16 + profile_applicability: '* Level 1 - Master Node' + description: Limit the `Node` and `Pod` objects that a kubelet could modify. + rationale: |- + Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. + Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--enable-admission-plugins` argument is set to a value that includes - `NodeRestriction`. - remediation: | - Follow the Kubernetes documentation and configure `NodeRestriction` plug-in - on - kubelets. - Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--enable-admission-plugins` parameter - to a value that includes `NodeRestriction`. + + Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`. + remediation: |- + Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`. + ``` --enable-admission-plugins=...,NodeRestriction,... ``` - impact: | - None + impact: None default_value: | By default, `NodeRestriction` is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/admission-controllers/#noderestriction](https://kubernetes.io/docs/admin/admission-controllers/#noderestriction) - 3. [https://kubernetes.io/docs/admin/authorization/node/](https://kubernetes.io/docs/admin/authorization/node/) - 4. [https://acotten.com/post/kube17-security](https://acotten.com/post/kube17-security) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/admission-controllers/#noderestriction + - https://kubernetes.io/docs/admin/authorization/node/ + - https://acotten.com/post/kube17-security section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.16 - - API Server + - CIS + - Kubernetes + - CIS 1.2.16 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_17/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_17/data.yaml index 5edac25858..7312b7dbba 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_17/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_17/data.yaml @@ -1,40 +1,35 @@ metadata: - id: e14db7eb-4c21-5f97-826a-391266a92ced - name: Ensure that the --secure-port argument is not set to 0 (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Do not disable the secure port. - rationale: > - The secure port is used to serve https with authentication and - authorization. If you disable it, no https traffic is served and all traffic - is served unencrypted. - audit: | - Run the following command on the control plane node: + id: b38efe94-7174-5e1d-8f32-14ec97aa9d47 + name: Ensure that the --secure-port argument is not set to 0 + rule_number: 1.2.17 + profile_applicability: '* Level 1 - Master Node' + description: Do not disable the secure port. + rationale: |- + The secure port is used to serve https with authentication and authorization. + If you disable it, no https traffic is served and all traffic is served unencrypted. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--secure-port` argument is either not set or is set to an integer value - between 1 and 65535. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and either remove the `--secure-port` parameter or - set it to a different (non-zero) desired port. - impact: | - You need to set the API Server up with the right TLS certificates. + + Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port. + impact: You need to set the API Server up with the right TLS certificates. default_value: | By default, port 6443 is used as the secure port. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.17 - - API Server + - CIS + - Kubernetes + - CIS 1.2.17 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_18/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_18/data.yaml index 968f0278b2..c2e5bf23d6 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_18/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_18/data.yaml @@ -1,44 +1,41 @@ metadata: - id: 0434fc0b-4b72-5a73-8bee-8c8c40345165 - name: Ensure that the --profiling argument is set to false (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Disable profiling, if not needed. - rationale: > + id: 75faeb95-6d19-5205-85b9-4bb1471292c4 + name: Ensure that the --profiling argument is set to false + rule_number: 1.2.18 + profile_applicability: '* Level 1 - Master Node' + description: Disable profiling, if not needed. + rationale: |- Profiling allows for the identification of specific performance bottlenecks. - It generates a significant amount of program data that could potentially be - exploited to uncover system and program details. If you are not experiencing - any bottlenecks and do not need the profiler for troubleshooting purposes, - it is recommended to turn it off to reduce the potential attack surface. - audit: | - Run the following command on the control plane node: + It generates a significant amount of program data that could potentially be exploited to uncover system and program details. + If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--profiling` argument is set to `false`. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the below parameter. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter. + ``` --profiling=false ``` - impact: | - Profiling information would not be available. + impact: Profiling information would not be available. default_value: | By default, profiling is enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://github.com/kubernetes/community/blob/master/contributors/devel/sig-scalability/profiling.md](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-scalability/profiling.md) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://github.com/kubernetes/community/blob/master/contributors/devel/profiling.md section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.18 - - API Server + - CIS + - Kubernetes + - CIS 1.2.18 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_19/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_19/data.yaml index ac264c1735..b48ded4798 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_19/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_19/data.yaml @@ -1,48 +1,43 @@ metadata: - id: 07361e5e-0142-57ce-8e42-d6ebd5110d2e - name: Ensure that the --audit-log-path argument is set (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Enable auditing on the Kubernetes API Server and set the desired audit log - path. - rationale: > - Auditing the Kubernetes API Server provides a security-relevant - chronological set of records documenting the sequence of activities that - have affected system by individual users, administrators or other components - of the system. Even though currently, Kubernetes provides only basic audit - capabilities, it should be enabled. You can enable it by setting an - appropriate audit log path. - audit: | - Run the following command on the control plane node: + id: 50ebf148-b0a8-5d12-a761-a4e79cb68b37 + name: Ensure that the --audit-log-path argument is set + rule_number: 1.2.19 + profile_applicability: '* Level 1 - Master Node' + description: Enable auditing on the Kubernetes API Server and set the desired audit + log path. + rationale: |- + Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. + Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. + You can enable it by setting an appropriate audit log path. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--audit-log-path` argument is set as appropriate. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--audit-log-path` parameter to a suitable - path and file where you would like audit logs to be written, for example: + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example: + ``` --audit-log-path=/var/log/apiserver/audit.log ``` - impact: | - None + impact: None default_value: | By default, auditing is not enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/concepts/cluster-administration/audit/](https://kubernetes.io/docs/concepts/cluster-administration/audit/) - 3. [https://github.com/kubernetes/features/issues/22](https://github.com/kubernetes/features/issues/22) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/concepts/cluster-administration/audit/ + - https://github.com/kubernetes/features/issues/22 section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.19 - - API Server + - CIS + - Kubernetes + - CIS 1.2.19 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_2/data.yaml index ff901eb385..cfe8273730 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_2/data.yaml @@ -1,44 +1,39 @@ metadata: - id: 6c8f1b4a-9e41-5fdb-b4bf-57b850ea1d29 - name: Ensure that the --token-auth-file parameter is not set (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Do not use token based authentication. - rationale: > - The token-based authentication utilizes static tokens to authenticate - requests to the apiserver. The tokens are stored in clear-text in a file on - the apiserver, and cannot be revoked or rotated without restarting the - apiserver. Hence, do not use static token-based authentication. - audit: | - Run the following command on the control plane node: + id: 6d342ec7-132b-5764-92df-89d4018f632d + name: Ensure that the --token-auth-file parameter is not set + rule_number: 1.2.2 + profile_applicability: '* Level 1 - Master Node' + description: Do not use token based authentication. + rationale: |- + The token-based authentication utilizes static tokens to authenticate requests to the apiserver. + The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. + Hence, do not use static token-based authentication. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--token-auth-file` argument does not exist. - remediation: | - Follow the documentation and configure alternate mechanisms for - authentication. Then, - edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and remove the `--token-auth-file=` - parameter. - impact: > - You will have to configure and use alternate authentication mechanisms such - as - certificates. Static token based authentication could not be used. + remediation: |- + Follow the documentation and configure alternate mechanisms for authentication. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter. + impact: |- + You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used. default_value: | By default, `--token-auth-file` argument is not set. - references: | - 1. [https://kubernetes.io/docs/admin/authentication/#static-token-file](https://kubernetes.io/docs/admin/authentication/#static-token-file) - 2. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) + references: + - https://kubernetes.io/docs/admin/authentication/#static-token-file + - https://kubernetes.io/docs/admin/kube-apiserver/ section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.2 - - API Server + - CIS + - Kubernetes + - CIS 1.2.2 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_20/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_20/data.yaml index 96b228a9ed..1fb5cb947d 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_20/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_20/data.yaml @@ -1,47 +1,41 @@ metadata: - id: 058dd742-d183-57c8-9115-16ab34615037 - name: - Ensure that the --audit-log-maxage argument is set to 30 or as appropriate - (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Retain the logs for at least 30 days or as appropriate. - rationale: > - Retaining logs for at least 30 days ensures that you can go back in time and - investigate or - correlate any events. Set your audit log retention period to 30 days or as per your business - requirements. - audit: | - Run the following command on the control plane node: + id: b7515b92-10ec-5218-94b4-b10de05fd90f + name: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate + rule_number: 1.2.20 + profile_applicability: '* Level 1 - Master Node' + description: Retain the logs for at least 30 days or as appropriate. + rationale: |- + Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. + Set your audit log retention period to 30 days or as per your business requirements. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--audit-log-maxage` parameter to 30 or - as an appropriate number of days: + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days: + ``` --audit-log-maxage=30 ``` - impact: | - None + impact: None default_value: | By default, auditing is not enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/concepts/cluster-administration/audit/](https://kubernetes.io/docs/concepts/cluster-administration/audit/) - 3. [https://github.com/kubernetes/features/issues/22](https://github.com/kubernetes/features/issues/22) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/concepts/cluster-administration/audit/ + - https://github.com/kubernetes/features/issues/22 section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.20 - - API Server + - CIS + - Kubernetes + - CIS 1.2.20 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_21/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_21/data.yaml index 6cd11bb80e..763146857f 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_21/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_21/data.yaml @@ -1,47 +1,42 @@ metadata: - id: 861cb7eb-4db7-58c2-b849-19c2437c97f2 - name: Ensure that the --audit-log-maxbackup argument is set to 10 or as - appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Retain 10 or an appropriate number of old log files. - rationale: > - Kubernetes automatically rotates the log files. Retaining old log files - ensures that you would have sufficient log data available for carrying out - any investigation or correlation. For example, if you have set file size of - 100 MB and the number of old log files to keep as 10, you would approximate - have 1 GB of log data that you could potentially use for your analysis. - audit: | - Run the following command on the control plane node: + id: 5bf4df2b-2758-5dfc-b670-74faf135e437 + name: Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate + rule_number: 1.2.21 + profile_applicability: '* Level 1 - Master Node' + description: Retain 10 or an appropriate number of old log files. + rationale: |- + Kubernetes automatically rotates the log files. + Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. + For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--audit-log-maxbackup` parameter to 10 - or to an appropriate value. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value. + ``` --audit-log-maxbackup=10 ``` - impact: | - None + impact: None default_value: | By default, auditing is not enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/concepts/cluster-administration/audit/](https://kubernetes.io/docs/concepts/cluster-administration/audit/) - 3. [https://github.com/kubernetes/features/issues/22](https://github.com/kubernetes/features/issues/22) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/concepts/cluster-administration/audit/ + - https://github.com/kubernetes/features/issues/22 section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.21 - - API Server + - CIS + - Kubernetes + - CIS 1.2.21 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_22/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_22/data.yaml index cdeca4e298..b94099392a 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_22/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_22/data.yaml @@ -1,47 +1,43 @@ metadata: - id: eb641843-bd10-5ec5-9a37-7b6a70feb6e0 - name: Ensure that the --audit-log-maxsize argument is set to 100 or as - appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Rotate log files on reaching 100 MB or as appropriate. - rationale: > - Kubernetes automatically rotates the log files. Retaining old log files - ensures that you would have sufficient log data available for carrying out - any investigation or correlation. If you have set file size of 100 MB and - the number of old log files to keep as 10, you would approximate have 1 GB - of log data that you could potentially use for your analysis. - audit: | - Run the following command on the control plane node: + id: 9629f363-8475-5939-8432-ae72e223a892 + name: Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate + rule_number: 1.2.22 + profile_applicability: '* Level 1 - Master Node' + description: Rotate log files on reaching 100 MB or as appropriate. + rationale: |- + Kubernetes automatically rotates the log files. + Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. + If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--audit-log-maxsize` parameter to an - appropriate size in MB. For example, to set it as 100 MB: + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. + For example, to set it as 100 MB: + ``` --audit-log-maxsize=100 ``` - impact: | - None + impact: None default_value: | By default, auditing is not enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/concepts/cluster-administration/audit/](https://kubernetes.io/docs/concepts/cluster-administration/audit/) - 3. [https://github.com/kubernetes/features/issues/22](https://github.com/kubernetes/features/issues/22) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/concepts/cluster-administration/audit/ + - https://github.com/kubernetes/features/issues/22 section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.22 - - API Server + - CIS + - Kubernetes + - CIS 1.2.22 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_23/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_23/data.yaml index 5c7b61e16b..84cb98e04a 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_23/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_23/data.yaml @@ -1,49 +1,43 @@ metadata: - id: c03358d0-05d0-54a9-b079-7c6991b2bc41 - name: Ensure that the --request-timeout argument is set as appropriate (Manual) - profile_applicability: | - * Level 1 - Master Node - description: | - Set global request timeout for API server requests as appropriate. - rationale: > - Setting global request timeout allows extending the API server request - timeout limit to a duration appropriate to the user's connection speed. By - default, it is set to 60 seconds which might be problematic on slower - connections making cluster resources inaccessible once the data volume for - requests exceeds what can be transmitted in 60 seconds. But, setting this - timeout limit to be too large can exhaust the API server resources making it - prone to Denial-of-Service attack. Hence, it is recommended to set this - limit as appropriate and change the default limit of 60 seconds only if - needed. - audit: | - Run the following command on the control plane node: + id: 21ef8d8e-ac0d-57e4-805f-30817ec1500b + name: Ensure that the --request-timeout argument is set as appropriate + rule_number: 1.2.23 + profile_applicability: '* Level 1 - Master Node' + description: Set global request timeout for API server requests as appropriate. + rationale: |- + Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. + By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. + But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. + Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--request-timeout` argument is either not set or set to an appropriate - value. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - and set the below parameter as appropriate and if needed. For example, + + Verify that the `--request-timeout` argument is either not set or set to an appropriate value. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. + For example, + ``` --request-timeout=300s ``` - impact: | - None + impact: None default_value: | By default, `--request-timeout` is set to 60 seconds. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://github.com/kubernetes/kubernetes/pull/51415](https://github.com/kubernetes/kubernetes/pull/51415) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://github.com/kubernetes/kubernetes/pull/51415 section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.23 - - API Server + - CIS + - Kubernetes + - CIS 1.2.23 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_24/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_24/data.yaml index 0287256d80..cb854ec2dd 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_24/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_24/data.yaml @@ -1,48 +1,45 @@ metadata: - id: 3b9e7a41-95c8-5262-8643-c0e15d2eb8c7 - name: Ensure that the --service-account-lookup argument is set to true (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Validate service account before validating token. - rationale: > - If `--service-account-lookup` is not enabled, the apiserver only verifies - that the authentication token is valid, and does not validate that the - service account token mentioned in the request is actually present in etcd. - This allows using a service account token even after the corresponding - service account is deleted. This is an example of time of check to time of - use security issue. - audit: | - Run the following command on the control plane node: + id: 98547a10-464e-5b9f-9a80-6b254cb621a3 + name: Ensure that the --service-account-lookup argument is set to true + rule_number: 1.2.24 + profile_applicability: '* Level 1 - Master Node' + description: Validate service account before validating token. + rationale: |- + If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. + This allows using a service account token even after the corresponding service account is deleted. + This is an example of time of check to time of use security issue. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that if the `--service-account-lookup` argument exists it is set to `true`. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the below parameter. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter. + + ``` --service-account-lookup=true ``` - Alternatively, you can delete the `--service-account-lookup` parameter from this file so - that the default takes effect. - impact: | - None + + Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect. + impact: None default_value: | By default, `--service-account-lookup` argument is set to `true`. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://github.com/kubernetes/kubernetes/issues/24167](https://github.com/kubernetes/kubernetes/issues/24167) - 3. [https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use](https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://github.com/kubernetes/kubernetes/issues/24167 + - https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.24 - - API Server + - CIS + - Kubernetes + - CIS 1.2.24 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_25/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_25/data.yaml index e0d5d38f99..be8e875b1c 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_25/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_25/data.yaml @@ -1,51 +1,43 @@ metadata: - id: 4caf31ec-2fe2-5337-b1e1-9cbf498de17f - name: - Ensure that the --service-account-key-file argument is set as appropriate - (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Explicitly set a service account public key file for service accounts on the - apiserver. - rationale: > - By default, if no `--service-account-key-file` is specified to the - apiserver, it uses the private key from the TLS serving certificate to - verify service account tokens. To ensure that the keys for service account - tokens could be rotated as needed, a separate public/private key pair should - be used for signing service account tokens. Hence, the public key should be - specified to the apiserver with `--service-account-key-file`. - audit: | - Run the following command on the control plane node: + id: 645144dd-f578-5e76-9251-1679bc027910 + name: Ensure that the --service-account-key-file argument is set as appropriate + rule_number: 1.2.25 + profile_applicability: '* Level 1 - Master Node' + description: Explicitly set a service account public key file for service accounts + on the apiserver. + rationale: |- + By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. + To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. + Hence, the public key should be specified to the apiserver with `--service-account-key-file`. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--service-account-key-file` argument exists and is set as appropriate. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--service-account-key-file` parameter - to the public key file for service accounts: + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts: + ``` --service-account-key-file= ``` - impact: > - The corresponding private key must be provided to the controller manager. - You would need to securely maintain the key file and rotate the keys based - on your organization's key rotation policy. + impact: |- + The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy. default_value: | By default, `--service-account-key-file` argument is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://github.com/kubernetes/kubernetes/issues/24167](https://github.com/kubernetes/kubernetes/issues/24167) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://github.com/kubernetes/kubernetes/issues/24167 section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.25 - - API Server + - CIS + - Kubernetes + - CIS 1.2.25 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_26/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_26/data.yaml index 7602512b39..1bf82ab1be 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_26/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_26/data.yaml @@ -1,50 +1,44 @@ metadata: - id: 9ed65e6a-550a-5960-b86d-ab3449c407c0 - name: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as - appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - etcd should be configured to make use of TLS encryption for client + id: 1635eab6-ee81-5075-a0bc-8e56a9f16a88 + name: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate + rule_number: 1.2.26 + profile_applicability: '* Level 1 - Master Node' + description: etcd should be configured to make use of TLS encryption for client connections. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. These objects are - sensitive in nature and should be protected by client authentication. This - requires the API server to identify itself to the etcd server using a client - certificate and key. - audit: | - Run the following command on the control plane node: + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should be protected by client authentication. + This requires the API server to identify itself to the etcd server using a client certificate and key. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as - appropriate. - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between - the - apiserver and etcd. Then, edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` on the control plane node and set the etcd - certificate and key file parameters. + + Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate. + remediation: |- + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters. + ``` - --etcd-certfile= + --etcd-certfile= --etcd-keyfile= ``` - impact: | - TLS and client certificate authentication must be configured for etcd. + impact: TLS and client certificate authentication must be configured for etcd. default_value: | By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://coreos.com/etcd/docs/latest/op-guide/security.html section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.26 - - API Server + - CIS + - Kubernetes + - CIS 1.2.26 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_27/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_27/data.yaml index 89425a6b5a..2c2f6adcc8 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_27/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_27/data.yaml @@ -1,49 +1,46 @@ metadata: - id: 576f7a4e-bfec-5e1e-9b41-92212823e83d - name: Ensure that the --tls-cert-file and --tls-private-key-file arguments are - set as appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Setup TLS connection on the API server. - rationale: > - API server communication contains sensitive parameters that should remain - encrypted in transit. Configure the API server to serve only HTTPS traffic. - audit: | - Run the following command on the control plane node: + id: 719ccb48-0106-5c2a-a563-b31620da736a + name: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set + as appropriate + rule_number: 1.2.27 + profile_applicability: '* Level 1 - Master Node' + description: Setup TLS connection on the API server. + rationale: |- + API server communication contains sensitive parameters that should remain encrypted in transit. + Configure the API server to serve only HTTPS traffic. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they - are set as appropriate. - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the - apiserver. - Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the TLS certificate and private key file - parameters. + + Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate. + remediation: |- + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters. + ``` - --tls-cert-file= + --tls-cert-file= --tls-private-key-file= ``` - impact: > - TLS and client certificate authentication must be configured for your - Kubernetes cluster deployment. + impact: TLS and client certificate authentication must be configured for your Kubernetes + cluster deployment. default_value: > By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/](http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/) - 3. [https://github.com/kelseyhightower/docker-kubernetes-tls-guide](https://github.com/kelseyhightower/docker-kubernetes-tls-guide) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/ + - https://github.com/kelseyhightower/docker-kubernetes-tls-guide section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.27 - - API Server + - CIS + - Kubernetes + - CIS 1.2.27 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_28/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_28/data.yaml index 2f70c06b76..efa7f668b9 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_28/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_28/data.yaml @@ -1,48 +1,44 @@ metadata: - id: e60a7027-cdd7-5a35-afa2-f7a8be2dc845 - name: Ensure that the --client-ca-file argument is set as appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Setup TLS connection on the API server. - rationale: > - API server communication contains sensitive parameters that should remain - encrypted in transit. Configure the API server to serve only HTTPS traffic. - If `--client-ca-file` argument is set, any request presenting a client - certificate signed by one of the authorities in the `client-ca-file` is - authenticated with an identity corresponding to the CommonName of the client - certificate. - audit: | - Run the following command on the control plane node: + id: 5097a7f7-be0e-57f1-81d7-665334381426 + name: Ensure that the --client-ca-file argument is set as appropriate + rule_number: 1.2.28 + profile_applicability: '* Level 1 - Master Node' + description: Setup TLS connection on the API server. + rationale: |- + API server communication contains sensitive parameters that should remain encrypted in transit. + Configure the API server to serve only HTTPS traffic. + If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--client-ca-file` argument exists and it is set as appropriate. - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the - apiserver. - Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the client certificate authority file. + remediation: |- + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file. + ``` --client-ca-file= ``` - impact: > - TLS and client certificate authentication must be configured for your - Kubernetes cluster deployment. + impact: TLS and client certificate authentication must be configured for your Kubernetes + cluster deployment. default_value: | By default, `--client-ca-file` argument is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/](http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/) - 3. [https://github.com/kelseyhightower/docker-kubernetes-tls-guide](https://github.com/kelseyhightower/docker-kubernetes-tls-guide) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/ + - https://github.com/kelseyhightower/docker-kubernetes-tls-guide section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.28 - - API Server + - CIS + - Kubernetes + - CIS 1.2.28 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_29/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_29/data.yaml index 3ecd2f35e4..4cdd2ce69b 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_29/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_29/data.yaml @@ -1,46 +1,43 @@ metadata: - id: 271a6cd7-9498-5bc1-bac4-8d58f7b46e96 - name: Ensure that the --etcd-cafile argument is set as appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - etcd should be configured to make use of TLS encryption for client + id: 174c9892-d459-584c-9407-b69e848bdd2c + name: Ensure that the --etcd-cafile argument is set as appropriate + rule_number: 1.2.29 + profile_applicability: '* Level 1 - Master Node' + description: etcd should be configured to make use of TLS encryption for client connections. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent - storage of all of its REST API objects. These objects are sensitive in nature and should be - protected by client authentication. This requires the API server to identify itself to the etcd - server using a SSL Certificate Authority file. - audit: | - Run the following command on the control plane node: + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should be protected by client authentication. + This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--etcd-cafile` argument exists and it is set as appropriate. - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between - the apiserver and etcd. Then, edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` on the control plane node and set the etcd - certificate authority file parameter. + remediation: |- + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter. + ``` --etcd-cafile= ``` - impact: | - TLS and client certificate authentication must be configured for etcd. + impact: TLS and client certificate authentication must be configured for etcd. default_value: | By default, `--etcd-cafile` is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://coreos.com/etcd/docs/latest/op-guide/security.html section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.29 - - API Server + - CIS + - Kubernetes + - CIS 1.2.29 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_32/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_32/data.yaml index 99000924c6..b30f85474d 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_32/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_32/data.yaml @@ -1,76 +1,41 @@ metadata: - id: 0e3755af-d150-504f-80db-3a5bb1094c4e - name: Ensure that the API Server only makes use of StrongCryptographic Ciphers - (Manual) - profile_applicability: | - • Level 1 - Master Node - description: > - Ensure that the API server is configured to only use strong cryptographic ciphers. - rationale: > - TLS ciphers have had a number of known vulnerabilities and weaknesses, which - can - - reduce the protection provided by them. By default Kubernetes supports a number of TLS - - cipher suites including some that have security concerns, weakening the protection - - provided. - audit: > + id: 1b0fb4c0-9ee1-51df-9b7b-d261950ecc07 + name: Ensure that the API Server only makes use of Strong Cryptographic Ciphers + rule_number: 1.2.32 + profile_applicability: '* Level 1 - Master Node' + description: Ensure that the API server is configured to only use strong cryptographic + ciphers. + rationale: |- + TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. + By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided. + audit: |- Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below. - remediation: > - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` + remediation: |- + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter. - on the Control Plane node and set the below parameter. ``` - --tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, - - TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, - - TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, - - TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. ``` - impact: > - API server clients that cannot support modern cryptographic ciphers will not - be able to - - make connections to the API server. + impact: |- + API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server. default_value: | By default the Kubernetes API server supports a wide range of TLS ciphers - references: | - 1. https://github.com/ssllabs/research/wiki/SSL-and-TLS-Deployment-Best- - Practices#23-use-secure-cipher-suites + references: + - https://github.com/ssllabs/research/wiki/SSL-and-TLS-Deployment-Best-Practices#23-use-secure-cipher-suites section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.32 - - API Server + - CIS + - Kubernetes + - CIS 1.2.32 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_4/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_4/data.yaml index e199844e9b..a672ff2606 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_4/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_4/data.yaml @@ -1,39 +1,36 @@ metadata: - id: 8832517d-74ba-5191-8b2b-3bbc81f6f970 - name: Ensure that the --kubelet-https argument is set to true (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Use https for kubelet connections. - rationale: > - Connections from apiserver to kubelets could potentially carry sensitive - data such as secrets and keys. It is thus important to use in-transit - encryption for any communication between the apiserver and kubelets. - audit: | - Run the following command on the control plane node: + id: 697cc9f6-28dd-51a5-934c-b6cc3922a6db + name: Ensure that the --kubelet-https argument is set to true + rule_number: 1.2.4 + profile_applicability: '* Level 1 - Master Node' + description: Use https for kubelet connections. + rationale: |- + Connections from apiserver to kubelets could potentially carry sensitive data such as secrets and keys. + It is thus important to use in-transit encryption for any communication between the apiserver and kubelets. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--kubelet-https` argument either does not exist or is set to `true`. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and remove the `--kubelet-https` parameter. - impact: | - You require TLS to be configured on apiserver as well as kubelets. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and remove the `--kubelet-https` parameter. + impact: You require TLS to be configured on apiserver as well as kubelets. default_value: | By default, kubelet connections are over https. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/kubelet-authentication-authorization/](https://kubernetes.io/docs/admin/kubelet-authentication-authorization/) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/kubelet-authentication-authorization/ section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.4 - - API Server + - CIS + - Kubernetes + - CIS 1.2.4 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_5/data.yaml index 62d99332ac..5e9cd48a34 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_5/data.yaml @@ -1,49 +1,45 @@ metadata: - id: 5c060a65-bbc5-5af4-9a42-338717937ec4 - name: Ensure that the --kubelet-client-certificate and - --kubelet-client-keyarguments are set as appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Enable certificate based kubelet authentication. - rationale: > - The apiserver, by default, does not authenticate itself to the kubelet's - HTTPS endpoints. The requests from the apiserver are treated anonymously. - You should set up certificate- based kubelet authentication to ensure that - the apiserver authenticates itself to kubelets when submitting requests. - audit: | - Run the following command on the control plane node: + id: 6f90e227-03d9-5a1b-bf87-b13c45b23182 + name: |- + Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate + rule_number: 1.2.5 + profile_applicability: '* Level 1 - Master Node' + description: Enable certificate based kubelet authentication. + rationale: |- + The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. + The requests from the apiserver are treated anonymously. + You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments - exist and they are set as appropriate. - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between - the - apiserver and kubelets. Then, edit API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` on the control plane node and set the - kubelet client certificate and key parameters as below. + + Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate. + remediation: |- + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. + Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below. + ``` --kubelet-client-certificate= --kubelet-client-key= ``` - impact: | - You require TLS to be configured on apiserver as well as kubelets. + impact: You require TLS to be configured on apiserver as well as kubelets. default_value: | By default, certificate-based kubelet authentication is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/kubelet-authentication-authorization/](https://kubernetes.io/docs/admin/kubelet-authentication-authorization/) - 3. [https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet](https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/kubelet-authentication-authorization/ + - https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.5 - - API Server + - CIS + - Kubernetes + - CIS 1.2.5 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_6/data.yaml index 50d65c68c6..70539a693c 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_6/data.yaml @@ -1,52 +1,43 @@ metadata: - id: 9a584e20-2a45-598e-b7ee-1148740f3085 - name: Ensure that the --kubelet-certificate-authority argument is set as - appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Verify kubelet's certificate before establishing connection. - rationale: > - The connections from the apiserver to the kubelet are used for fetching logs - for pods, attaching (through kubectl) to running pods, and using the - kubelet's port-forwarding functionality. These connections terminate at the - kubelet's HTTPS endpoint. By default, the apiserver does not verify the - kubelet's serving certificate, which makes the connection subject to - man-in-the-middle attacks, and unsafe to run over untrusted and/or public - networks. - audit: | - Run the following command on the control plane node: + id: 53095c31-20ab-5659-a8e6-3e1b505c6984 + name: Ensure that the --kubelet-certificate-authority argument is set as appropriate + rule_number: 1.2.6 + profile_applicability: '* Level 1 - Master Node' + description: Verify kubelet's certificate before establishing connection. + rationale: |- + The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding functionality. + These connections terminate at the kubelet’s HTTPS endpoint. + By default, the apiserver does not verify the kubelet’s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` - Verify that the `--kubelet-certificate-authority` argument exists and is set as - appropriate. - remediation: | - Follow the Kubernetes documentation and setup the TLS connection between the - apiserver - and kubelets. Then, edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` on the control plane node and set the - `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate - authority. + + Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate. + remediation: |- + Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. + Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. + ``` --kubelet-certificate-authority= ``` - impact: | - You require TLS to be configured on apiserver as well as kubelets. + impact: You require TLS to be configured on apiserver as well as kubelets. default_value: | By default, `--kubelet-certificate-authority` argument is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/kubelet-authentication-authorization/](https://kubernetes.io/docs/admin/kubelet-authentication-authorization/) - 3. [https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet](https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/kubelet-authentication-authorization/ + - https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.6 - - API Server + - CIS + - Kubernetes + - CIS 1.2.6 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_7/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_7/data.yaml index ac07bfccad..9361f13aaf 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_7/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_7/data.yaml @@ -1,43 +1,41 @@ metadata: - id: 811f3dd3-7fbc-5141-83b7-724730ec158d + id: 3781a71f-54e5-5987-9da2-d72a7d139af7 name: Ensure that the --authorization-mode argument is not set to AlwaysAllow - (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Do not always authorize all requests. - rationale: > - The API Server, can be configured to allow all requests. This mode should - not be used on any production cluster. - audit: | - Run the following command on the control plane node: + rule_number: 1.2.7 + profile_applicability: '* Level 1 - Master Node' + description: Do not always authorize all requests. + rationale: |- + The API Server, can be configured to allow all requests. + This mode should not be used on any production cluster. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--authorization-mode` parameter to - values other than `AlwaysAllow`. One such example could be as below. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. + One such example could be as below. + ``` --authorization-mode=RBAC ``` - impact: | - Only authorized requests will be served. + impact: Only authorized requests will be served. default_value: | By default, `AlwaysAllow` is not enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/authorization/](https://kubernetes.io/docs/admin/authorization/) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/authorization/ section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.7 - - API Server + - CIS + - Kubernetes + - CIS 1.2.7 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_8/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_8/data.yaml index dacd185c04..df08a3f946 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_8/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_8/data.yaml @@ -1,45 +1,41 @@ metadata: - id: d4942f42-b0b7-5fab-9d43-bfcf3373ac57 - name: Ensure that the --authorization-mode argument includes Node (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Restrict kubelet nodes to reading only objects associated with them. - rationale: > - The `Node` authorization mode only allows kubelets to read `Secret`, - `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects - associated with their nodes. - audit: | - Run the following command on the control plane node: + id: bc294f39-513a-528c-8207-72ddd221054d + name: Ensure that the --authorization-mode argument includes Node + rule_number: 1.2.8 + profile_applicability: '* Level 1 - Master Node' + description: Restrict kubelet nodes to reading only objects associated with them. + rationale: |- + The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`. - remediation: | - Edit the API server pod specification file - `/etc/kubernetes/manifests/kube-apiserver.yaml` - on the control plane node and set the `--authorization-mode` parameter to a - value that includes `Node`. + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`. + ``` --authorization-mode=Node,RBAC ``` - impact: | - None + impact: None default_value: | By default, `Node` authorization is not enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-apiserver/](https://kubernetes.io/docs/admin/kube-apiserver/) - 2. [https://kubernetes.io/docs/admin/authorization/node/](https://kubernetes.io/docs/admin/authorization/node/) - 3. [https://github.com/kubernetes/kubernetes/pull/46076](https://github.com/kubernetes/kubernetes/pull/46076) - 4. [https://acotten.com/post/kube17-security](https://acotten.com/post/kube17-security) + references: + - https://kubernetes.io/docs/admin/kube-apiserver/ + - https://kubernetes.io/docs/admin/authorization/node/ + - https://github.com/kubernetes/kubernetes/pull/46076 + - https://acotten.com/post/kube17-security section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.8 - - API Server + - CIS + - Kubernetes + - CIS 1.2.8 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_2_9/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_2_9/data.yaml index 47292299fe..023c4426f0 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_2_9/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_2_9/data.yaml @@ -1,43 +1,40 @@ metadata: - id: ed33b6ba-d276-5b5c-9cdc-85b62de52be1 - name: Ensure that the --authorization-mode argument includes RBAC (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Turn on Role Based Access Control. - rationale: > - Role Based Access Control (RBAC) allows fine-grained control over the - operations that - different entities can perform on different objects in the cluster. It is recommended to use - the RBAC authorization mode. - audit: | - Run the following command on the control plane node: + id: b10e1f96-660c-501e-81e4-9ceac82a9907 + name: Ensure that the --authorization-mode argument includes RBAC + rule_number: 1.2.9 + profile_applicability: '* Level 1 - Master Node' + description: Turn on Role Based Access Control. + rationale: |- + Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. + It is recommended to use the RBAC authorization mode. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-apiserver ``` + Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`. - remediation: | - Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the `--authorization-mode` parameter to a - value that includes `RBAC`, for example: + remediation: |- + Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example: + ``` --authorization-mode=Node,RBAC ``` - impact: > - When RBAC is enabled you will need to ensure that appropriate RBAC settings - (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access. + impact: |- + When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access. default_value: | By default, `RBAC` authorization is not enabled. - references: | - 1. [https://kubernetes.io/docs/reference/access-authn-authz/rbac/](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + references: + - https://kubernetes.io/docs/reference/access-authn-authz/rbac/ section: API Server - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.2.9 - - API Server + - CIS + - Kubernetes + - CIS 1.2.9 + - API Server benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_3_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_3_2/data.yaml index fc4ebe8aa8..644e9f13d4 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_3_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_3_2/data.yaml @@ -1,43 +1,41 @@ metadata: - id: ed431503-ac95-5312-a02b-cf9b51778ad4 - name: Ensure that the --profiling argument is set to false (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Disable profiling, if not needed. - rationale: > + id: 75faeb95-6d19-5205-85b9-4bb1471292c4 + name: Ensure that the --profiling argument is set to false + rule_number: 1.3.2 + profile_applicability: '* Level 1 - Master Node' + description: Disable profiling, if not needed. + rationale: |- Profiling allows for the identification of specific performance bottlenecks. - It generates a significant amount of program data that could potentially be - exploited to uncover system and program details. If you are not experiencing - any bottlenecks and do not need the profiler for troubleshooting purposes, - it is recommended to turn it off to reduce the potential attack surface. - audit: | - Run the following command on the control plane node: + It generates a significant amount of program data that could potentially be exploited to uncover system and program details. + If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-controller-manager ``` + Verify that the `--profiling` argument is set to `false`. - remediation: | - Edit the Controller Manager pod specification file - `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the control plane node and set the below parameter. + remediation: |- + Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter. + ``` --profiling=false ``` - impact: | - Profiling information would not be available. + impact: Profiling information would not be available. default_value: | By default, profiling is enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-controller-manager/](https://kubernetes.io/docs/admin/kube-controller-manager/) - 2. [https://github.com/kubernetes/community/blob/master/contributors/devel/sig-scalability/profiling.md](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-scalability/profiling.md) + references: + - https://kubernetes.io/docs/admin/kube-controller-manager/ + - https://github.com/kubernetes/community/blob/master/contributors/devel/profiling.md section: Controller Manager - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.3.2 - - Controller Manager + - CIS + - Kubernetes + - CIS 1.3.2 + - Controller Manager benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_3_3/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_3_3/data.yaml index e7384b9e0a..1f20092e8d 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_3_3/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_3_3/data.yaml @@ -1,61 +1,47 @@ metadata: - id: f59951c7-ec2f-52b1-96d7-a7777badc2a7 - name: - Ensure that the --use-service-account-credentials argument is set to true - (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Use individual service account credentials for each controller. - rationale: > - The controller manager creates a service account per controller in the - `kube-system` namespace, generates a credential for it, and builds a - dedicated API client with that service account credential for each - controller loop to use. Setting the `--use-service-account-credentials` to - true runs each control loop within the controller manager using a separate - service account credential. When used in combination with RBAC, this ensures - that the control loops run with the minimum permissions required to perform - their intended tasks. - audit: | - Run the following command on the control plane node: + id: e103a398-f4ee-55cb-8b5c-88772e226b17 + name: Ensure that the --use-service-account-credentials argument is set to true + rule_number: 1.3.3 + profile_applicability: '* Level 1 - Master Node' + description: Use individual service account credentials for each controller. + rationale: |- + The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. + Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. + When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-controller-manager ``` + Verify that the `--use-service-account-credentials` argument is set to `true`. - remediation: | - Edit the Controller Manager pod specification file - `/etc/kubernetes/manifests/kube-controller-manager.yaml` - on the control plane node to set the below parameter. + remediation: |- + Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter. + ``` --use-service-account-credentials=true ``` - impact: > - Whatever authorizer is configured for the cluster, it must grant sufficient - permissions to the service accounts to perform their intended tasks. When - using the RBAC authorizer, those roles are created and bound to the - appropriate service accounts in the `kube-system` namespace automatically - with default roles and rolebindings that are auto-reconciled on startup. If - using other authorization methods (ABAC, Webhook, etc), the cluster deployer - is responsible for granting appropriate permissions to the service accounts - (the required permissions can be seen by inspecting the - `controller-roles.yaml` and `controller-role-bindings.yaml` files for the - RBAC roles. + impact: |- + Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. + + If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles. default_value: | By default, `--use-service-account-credentials` is set to false. - references: | - 1. [https://kubernetes.io/docs/admin/kube-controller-manager/](https://kubernetes.io/docs/admin/kube-controller-manager/) - 2. [https://kubernetes.io/docs/admin/service-accounts-admin/](https://kubernetes.io/docs/admin/service-accounts-admin/) - 3. [https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml](https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml) - 4. [https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml](https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml) - 5. [https://kubernetes.io/docs/admin/authorization/rbac/#controller-roles](https://kubernetes.io/docs/admin/authorization/rbac/#controller-roles) + references: + - https://kubernetes.io/docs/admin/kube-controller-manager/ + - https://kubernetes.io/docs/admin/service-accounts-admin/ + - https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml + - https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml + - https://kubernetes.io/docs/admin/authorization/rbac/#controller-roles section: Controller Manager - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.3.3 - - Controller Manager + - CIS + - Kubernetes + - CIS 1.3.3 + - Controller Manager benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_3_4/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_3_4/data.yaml index 276b532a9b..017490bd1f 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_3_4/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_3_4/data.yaml @@ -1,46 +1,41 @@ metadata: - id: e48fe269-49e1-56da-85f8-b52c9a47d918 - name: Ensure that the --service-account-private-key-file argument is set as - appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Explicitly set a service account private key file for service accounts on - the controller manager. - rationale: > - To ensure that keys for service account tokens can be rotated as needed, a - separate public/private key pair should be used for signing service account - tokens. The private key should be specified to the controller manager with - `--service-account-private-key-file` as appropriate. - audit: | - Run the following command on the control plane node: + id: fb88f47c-1309-5fa3-af23-499c217cb70a + name: Ensure that the --service-account-private-key-file argument is set as appropriate + rule_number: 1.3.4 + profile_applicability: '* Level 1 - Master Node' + description: Explicitly set a service account private key file for service accounts + on the controller manager. + rationale: |- + To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. + The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-controller-manager ``` + Verify that the `--service-account-private-key-file` argument is set as appropriate. - remediation: | - Edit the Controller Manager pod specification file - `/etc/kubernetes/manifests/kube-controller-manager.yaml` - on the control plane node and set the `--service-account-private-key-file` - parameter to the private key file for service accounts. + remediation: |- + Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts. + ``` --service-account-private-key-file= ``` - impact: > - You would need to securely maintain the key file and rotate the keys based - on your organization's key rotation policy. + impact: |- + You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy. default_value: | By default, `--service-account-private-key-file` it not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-controller-manager/](https://kubernetes.io/docs/admin/kube-controller-manager/) + references: + - https://kubernetes.io/docs/admin/kube-controller-manager/ section: Controller Manager - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.3.4 - - Controller Manager + - CIS + - Kubernetes + - CIS 1.3.4 + - Controller Manager benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_3_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_3_5/data.yaml index 4a4ae6cf41..615df909cb 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_3_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_3_5/data.yaml @@ -1,49 +1,43 @@ metadata: - id: 81efecae-af9e-5630-9ef0-1947aa17e376 - name: Ensure that the --root-ca-file argument is set as appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Allow pods to verify the API server's serving certificate before - establishing connections. - rationale: > - Processes running within pods that need to contact the API server must - verify the API server's serving certificate. Failing to do so could be a - subject to man-in-the-middle attacks. Providing the root certificate for the - API server's serving certificate to the controller manager with the - `--root-ca-file` argument allows the controller manager to inject the - trusted bundle into pods so that they can verify TLS connections to the API - server. - audit: | - Run the following command on the control plane node: + id: 2cb45802-52cb-582a-a6f2-0d19e1903cf1 + name: Ensure that the --root-ca-file argument is set as appropriate + rule_number: 1.3.5 + profile_applicability: '* Level 1 - Master Node' + description: Allow pods to verify the API server's serving certificate before establishing + connections. + rationale: |- + Processes running within pods that need to contact the API server must verify the API server's serving certificate. + Failing to do so could be a subject to man-in-the-middle attacks. + + Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-controller-manager ``` - Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file - containing the root certificate for the API server's serving certificate. - remediation: | - Edit the Controller Manager pod specification file - `/etc/kubernetes/manifests/kube-controller-manager.yaml` - on the control plane node and set the `--root-ca-file` parameter to - the certificate bundle file. + + Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate. + remediation: |- + Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`. + ``` --root-ca-file= ``` - impact: | - You need to setup and maintain root certificate authority file. + impact: You need to setup and maintain root certificate authority file. default_value: | By default, `--root-ca-file` is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kube-controller-manager/](https://kubernetes.io/docs/admin/kube-controller-manager/) - 2. [https://github.com/kubernetes/kubernetes/issues/11000](https://github.com/kubernetes/kubernetes/issues/11000) + references: + - https://kubernetes.io/docs/admin/kube-controller-manager/ + - https://github.com/kubernetes/kubernetes/issues/11000 section: Controller Manager - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.3.5 - - Controller Manager + - CIS + - Kubernetes + - CIS 1.3.5 + - Controller Manager benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_3_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_3_6/data.yaml index 2f2e500384..f22ad9da3f 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_3_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_3_6/data.yaml @@ -1,52 +1,46 @@ metadata: - id: a75f69ac-866e-5fbe-b9ce-b5a2a8c20834 + id: 2b5d962b-52a6-558f-beed-6da38c4af362 name: Ensure that the RotateKubeletServerCertificate argument is set to true - (Automated) - profile_applicability: | - * Level 2 - Master Node - description: | - Enable kubelet server certificate rotation on controller-manager. - rationale: > - `RotateKubeletServerCertificate` causes the kubelet to both request a - serving certificate after bootstrapping its client credentials and rotate - the certificate as its existing credentials expire. This automated periodic - rotation ensures that the there are no downtimes due to expired certificates - and thus addressing availability in the CIA security triad. Note: This - recommendation only applies if you let kubelets get their certificates from - the API server. In case your kubelet certificates come from an outside - authority/tool (e.g. Vault) then you need to take care of rotation yourself. - audit: | - Run the following command on the control plane node: + rule_number: 1.3.6 + profile_applicability: '* Level 2 - Master Node' + description: Enable kubelet server certificate rotation on controller-manager. + rationale: |- + `RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. + This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad. + + Note: This recommendation only applies if you let kubelets get their certificates from the API server. + In case your kubelet certificates come from an outside authority/tool (e.g. + Vault) then you need to take care of rotation yourself. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-controller-manager ``` + Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`. - remediation: | - Edit the Controller Manager pod specification file - `/etc/kubernetes/manifests/kube-controller-manager.yaml` - on the control plane node and set the `--feature-gates` parameter to - include `RotateKubeletServerCertificate=true`. + remediation: |- + Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. + ``` --feature-gates=RotateKubeletServerCertificate=true ``` - impact: | - None - default_value: > - By default, `RotateKubeletServerCertificate` is set to "true" this - recommendation verifies that it has not been disabled. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#approval-controller](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#approval-controller) - 2. [https://github.com/kubernetes/features/issues/267](https://github.com/kubernetes/features/issues/267) - 3. [https://github.com/kubernetes/kubernetes/pull/45059](https://github.com/kubernetes/kubernetes/pull/45059) - 4. [https://kubernetes.io/docs/admin/kube-controller-manager/](https://kubernetes.io/docs/admin/kube-controller-manager/) + impact: None + default_value: | + By default, `RotateKubeletServerCertificate` is set to "true" this recommendation verifies that it has not been disabled. + references: + - https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#approval-controller + - https://github.com/kubernetes/features/issues/267 + - https://github.com/kubernetes/kubernetes/pull/45059 + - https://kubernetes.io/docs/admin/kube-controller-manager/ section: Controller Manager - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.3.6 - - Controller Manager + - CIS + - Kubernetes + - CIS 1.3.6 + - Controller Manager benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_3_7/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_3_7/data.yaml index d8f5eb0a1c..44902bfc26 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_3_7/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_3_7/data.yaml @@ -1,40 +1,36 @@ metadata: - id: f0d72b13-5130-5045-b94d-bc8d7e00b7fc - name: Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not bind the Controller Manager service to non-loopback insecure + id: 091b1308-4f72-59ef-9103-4b548386ae27 + name: Ensure that the --bind-address argument is set to 127.0.0.1 + rule_number: 1.3.7 + profile_applicability: '* Level 1 - Master Node' + description: Do not bind the Controller Manager service to non-loopback insecure addresses. - rationale: > - The Controller Manager API service which runs on port 10252/TCP by default - is used for health and metrics information and is available without - authentication or encryption. As such it should only be bound to a localhost - interface, to minimize the cluster's attack surface - audit: | - Run the following command on the control plane node: + rationale: |- + The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. + As such it should only be bound to a localhost interface, to minimize the cluster's attack surface + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-controller-manager ``` + Verify that the `--bind-address` argument is set to 127.0.0.1 - remediation: | - Edit the Controller Manager pod specification file - `/etc/kubernetes/manifests/kube-controller-manager.yaml` - on the control plane node and ensure the correct value for the `--bind-address parameter` - impact: | - None + remediation: |- + Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter + impact: None default_value: | By default, the `--bind-address` parameter is set to 0.0.0.0 - references: | - 1. [https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) + references: + - https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/ section: Controller Manager - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.3.7 - - Controller Manager + - CIS + - Kubernetes + - CIS 1.3.7 + - Controller Manager benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_4_1/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_4_1/data.yaml index 32407f45c5..614491997d 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_4_1/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_4_1/data.yaml @@ -1,44 +1,41 @@ metadata: - id: 63ea14ed-b605-5fe1-b35d-6b254f16d8ab - name: Ensure that the --profiling argument is set to false (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Disable profiling, if not needed. - rationale: > + id: 75faeb95-6d19-5205-85b9-4bb1471292c4 + name: Ensure that the --profiling argument is set to false + rule_number: 1.4.1 + profile_applicability: '* Level 1 - Master Node' + description: Disable profiling, if not needed. + rationale: |- Profiling allows for the identification of specific performance bottlenecks. - It generates a significant amount of program data that could potentially be - exploited to uncover system and program details. If you are not experiencing - any bottlenecks and do not need the profiler for troubleshooting purposes, - it is recommended to turn it off to reduce the potential attack surface. - audit: | - Run the following command on the control plane node: + It generates a significant amount of program data that could potentially be exploited to uncover system and program details. + If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface. + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-scheduler ``` + Verify that the `--profiling` argument is set to `false`. - remediation: | - Edit the Scheduler pod specification file - `/etc/kubernetes/manifests/kube-scheduler.yaml` - file on the control plane node and set the below parameter. + remediation: |- + Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter. + ``` --profiling=false ``` - impact: | - Profiling information would not be available. + impact: Profiling information would not be available. default_value: | By default, profiling is enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kube-scheduler/](https://kubernetes.io/docs/admin/kube-scheduler/) - 2. [https://github.com/kubernetes/community/blob/master/contributors/devel/sig-scalability/profiling.md](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-scalability/profiling.md) + references: + - https://kubernetes.io/docs/admin/kube-scheduler/ + - https://github.com/kubernetes/community/blob/master/contributors/devel/profiling.md section: Scheduler - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.4.1 - - Scheduler + - CIS + - Kubernetes + - CIS 1.4.1 + - Scheduler benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_1_4_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_1_4_2/data.yaml index 8800cacc85..e3dc005798 100644 --- a/bundle/compliance/cis_k8s/rules/cis_1_4_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_1_4_2/data.yaml @@ -1,40 +1,35 @@ metadata: - id: 54fabe1f-8eb5-5015-82b5-30db926064c0 - name: Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Do not bind the scheduler service to non-loopback insecure addresses. - rationale: > - The Scheduler API service which runs on port 10251/TCP by default is used - for health and metrics information and is available without authentication - or encryption. As such it should only be bound to a localhost interface, to - minimize the cluster's attack surface - audit: | - Run the following command on the control plane node: + id: 091b1308-4f72-59ef-9103-4b548386ae27 + name: Ensure that the --bind-address argument is set to 127.0.0.1 + rule_number: 1.4.2 + profile_applicability: '* Level 1 - Master Node' + description: Do not bind the scheduler service to non-loopback insecure addresses. + rationale: |- + The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. + As such it should only be bound to a localhost interface, to minimize the cluster's attack surface + audit: |- + Run the following command on the Control Plane node: + ``` ps -ef | grep kube-scheduler ``` + Verify that the `--bind-address` argument is set to 127.0.0.1 - remediation: | - Edit the Scheduler pod specification file - `/etc/kubernetes/manifests/kube-scheduler.yaml` - on the control plane node and ensure the correct value for the `--bind-address` - parameter - impact: | - None + remediation: |- + Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter + impact: None default_value: | By default, the `--bind-address` parameter is set to 0.0.0.0 - references: | - 1. [https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) + references: + - https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/ section: Scheduler - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 1.4.2 - - Scheduler + - CIS + - Kubernetes + - CIS 1.4.2 + - Scheduler benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_2_1/data.yaml b/bundle/compliance/cis_k8s/rules/cis_2_1/data.yaml index 16bcc0cb3f..7c6ef944e7 100644 --- a/bundle/compliance/cis_k8s/rules/cis_2_1/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_2_1/data.yaml @@ -1,44 +1,43 @@ metadata: - id: 670e64f4-c52c-5efa-b0dd-6dce8175e4c0 - name: Ensure that the --cert-file and --key-file arguments are set as - appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Configure TLS encryption for the etcd service. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. These objects are - sensitive in nature and should be encrypted in transit. - audit: | + id: 15413809-b667-59d0-a54a-972ba4826eb4 + name: Ensure that the --cert-file and --key-file arguments are set as appropriate + rule_number: '2.1' + profile_applicability: '* Level 1 - Master Node' + description: Configure TLS encryption for the etcd service. + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should be encrypted in transit. + audit: |- Run the following command on the etcd server node + ``` ps -ef | grep etcd ``` + Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate. - remediation: | + remediation: |- Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the - control plane node and set the below parameters. + + Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters. + ``` --cert-file= --key-file= ``` - impact: | - Client connections only over TLS would be served. + impact: Client connections only over TLS would be served. default_value: | By default, TLS encryption is not set. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) + references: + - https://coreos.com/etcd/docs/latest/op-guide/security.html + - https://kubernetes.io/docs/admin/etcd/ section: etcd - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 2.1 - - etcd + - CIS + - Kubernetes + - CIS 2.1 + - etcd benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_2_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_2_2/data.yaml index 375d1dbfe8..6b5d9ed7ab 100644 --- a/bundle/compliance/cis_k8s/rules/cis_2_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_2_2/data.yaml @@ -1,47 +1,43 @@ metadata: - id: a6194f98-6534-5308-9683-1adb4914cc46 - name: Ensure that the --client-cert-auth argument is set to true (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Enable client authentication on etcd service. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. These objects are - sensitive in nature and should not be available to unauthenticated clients. - You should enable the client authentication via valid certificates to secure - the access to the etcd service. - audit: | + id: 8e4b745a-d2db-5fe5-9a60-7c5029599d27 + name: Ensure that the --client-cert-auth argument is set to true + rule_number: '2.2' + profile_applicability: '* Level 1 - Master Node' + description: Enable client authentication on etcd service. + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should not be available to unauthenticated clients. + You should enable the client authentication via valid certificates to secure the access to the etcd service. + audit: |- Run the following command on the etcd server node: + ``` ps -ef | grep etcd ``` + Verify that the `--client-cert-auth` argument is set to `true`. - remediation: | - Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` - on - the control plane - node and set the below parameter. + remediation: |- + Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. + ``` --client-cert-auth="true" ``` - impact: > - All clients attempting to access the etcd server will require a valid client + impact: All clients attempting to access the etcd server will require a valid client certificate. default_value: | By default, the etcd service can be queried by unauthenticated clients. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) - 3. [https://coreos.com/etcd/docs/latest/op-guide/configuration.html#client-cert-auth](https://coreos.com/etcd/docs/latest/op-guide/configuration.html#client-cert-auth) + references: + - https://coreos.com/etcd/docs/latest/op-guide/security.html + - https://kubernetes.io/docs/admin/etcd/ + - https://coreos.com/etcd/docs/latest/op-guide/configuration.html#client-cert-auth section: etcd - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 2.2 - - etcd + - CIS + - Kubernetes + - CIS 2.2 + - etcd benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_2_3/data.yaml b/bundle/compliance/cis_k8s/rules/cis_2_3/data.yaml index 1bb51919fb..026a16bff4 100644 --- a/bundle/compliance/cis_k8s/rules/cis_2_3/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_2_3/data.yaml @@ -1,46 +1,42 @@ metadata: - id: 43af3bd9-c0b8-5f06-b1c0-8a1295983524 - name: Ensure that the --auto-tls argument is not set to true (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - Do not use self-signed certificates for TLS. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. These objects are - sensitive in nature and should not be available to unauthenticated clients. - You should enable the client authentication via valid certificates to secure - the access to the etcd service. - audit: | + id: 0c11cba6-a115-5485-bf50-f7f57a7deffa + name: Ensure that the --auto-tls argument is not set to true + rule_number: '2.3' + profile_applicability: '* Level 1 - Master Node' + description: Do not use self-signed certificates for TLS. + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should not be available to unauthenticated clients. + You should enable the client authentication via valid certificates to secure the access to the etcd service. + audit: |- Run the following command on the etcd server node: + ``` ps -ef | grep etcd ``` + Verify that if the `--auto-tls` argument exists, it is not set to `true`. - remediation: | - Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` - on - the control plane - node and either remove the `--auto-tls` parameter or set it to `false`. + remediation: |- + Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`. + ``` --auto-tls=false ``` - impact: | - Clients will not be able to use self-signed certificates for TLS. + impact: Clients will not be able to use self-signed certificates for TLS. default_value: | By default, `--auto-tls` is set to `false`. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) - 3. [https://coreos.com/etcd/docs/latest/op-guide/configuration.html#auto-tls](https://coreos.com/etcd/docs/latest/op-guide/configuration.html#auto-tls) + references: + - https://coreos.com/etcd/docs/latest/op-guide/security.html + - https://kubernetes.io/docs/admin/etcd/ + - https://coreos.com/etcd/docs/latest/op-guide/configuration.html#auto-tls section: etcd - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 2.3 - - etcd + - CIS + - Kubernetes + - CIS 2.3 + - etcd benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_2_4/data.yaml b/bundle/compliance/cis_k8s/rules/cis_2_4/data.yaml index d3501748a8..9af3c2ec94 100644 --- a/bundle/compliance/cis_k8s/rules/cis_2_4/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_2_4/data.yaml @@ -1,53 +1,50 @@ metadata: - id: 7295eff3-3d9d-5032-8552-bcce9cba8a26 - name: - Ensure that the --peer-cert-file and --peer-key-file arguments are set as - appropriate (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - etcd should be configured to make use of TLS encryption for peer connections. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. These objects are - sensitive in nature and should be encrypted in transit and also amongst - peers in the etcd clusters. - audit: | + id: 16d8e9bd-f09c-5195-b474-7d0861150d44 + name: Ensure that the --peer-cert-file and --peer-key-file arguments are set as + appropriate + rule_number: '2.4' + profile_applicability: '* Level 1 - Master Node' + description: etcd should be configured to make use of TLS encryption for peer connections. + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters. + audit: |- Run the following command on the etcd server node: + ``` ps -ef | grep etcd ``` + Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate. - **Note:** This recommendation is applicable only for etcd clusters. If you are using only one - etcd server in your environment then this recommendation is not applicable. - remediation: | - Follow the etcd service documentation and configure peer TLS encryption as - appropriate - for your etcd cluster. - Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the - control plane node and set the below parameters. + + **Note:** This recommendation is applicable only for etcd clusters. + If you are using only one etcd server in your environment then this recommendation is not applicable. + remediation: |- + Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster. + + Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters. + ``` --peer-client-file= --peer-key-file= ``` - impact: | - etcd cluster peers would need to set up TLS for their communication. + impact: etcd cluster peers would need to set up TLS for their communication. default_value: | **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) + references: + - https://coreos.com/etcd/docs/latest/op-guide/security.html + - https://kubernetes.io/docs/admin/etcd/ section: etcd - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 2.4 - - etcd + - CIS + - Kubernetes + - CIS 2.4 + - etcd benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_2_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_2_5/data.yaml index 2cc721d839..4da673940e 100644 --- a/bundle/compliance/cis_k8s/rules/cis_2_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_2_5/data.yaml @@ -1,51 +1,48 @@ metadata: - id: b26d85f5-ac08-535f-8b5d-8e461a0cf679 - name: Ensure that the --peer-client-cert-auth argument is set to true (Automated) - profile_applicability: | - * Level 1 - Master Node - description: | - etcd should be configured for peer authentication. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. These objects are - sensitive in nature and should be accessible only by authenticated etcd - peers in the etcd cluster. - audit: | + id: be86166d-d578-5af3-a36c-fabca73ff99c + name: Ensure that the --peer-client-cert-auth argument is set to true + rule_number: '2.5' + profile_applicability: '* Level 1 - Master Node' + description: etcd should be configured for peer authentication. + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. + audit: |- Run the following command on the etcd server node: + ``` ps -ef | grep etcd ``` + Verify that the `--peer-client-cert-auth` argument is set to `true`. - **Note:** This recommendation is applicable only for etcd clusters. If you are using only one - etcd server in your environment then this recommendation is not applicable. - remediation: | - Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` - on - the control plane - node and set the below parameter. + + **Note:** This recommendation is applicable only for etcd clusters. + If you are using only one etcd server in your environment then this recommendation is not applicable. + remediation: |- + Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. + ``` --peer-client-cert-auth=true ``` - impact: > - All peers attempting to communicate with the etcd server will require a - valid client certificate for authentication. + impact: |- + All peers attempting to communicate with the etcd server will require a valid client certificate for authentication. default_value: | **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) - 3. [https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-client-cert-auth](https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-client-cert-auth) + references: + - https://coreos.com/etcd/docs/latest/op-guide/security.html + - https://kubernetes.io/docs/admin/etcd/ + - https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-client-cert-auth section: etcd - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 2.5 - - etcd + - CIS + - Kubernetes + - CIS 2.5 + - etcd benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_2_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_2_6/data.yaml index 04dd3e8baf..19279949b9 100644 --- a/bundle/compliance/cis_k8s/rules/cis_2_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_2_6/data.yaml @@ -1,52 +1,48 @@ metadata: - id: 59f978f4-c825-578a-bc21-ab5f395c5cd9 - name: Ensure that the --peer-auto-tls argument is not set to true (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not use automatically generated self-signed certificates for TLS + id: 6fa22113-1614-5998-94c8-7fba2c6d151f + name: Ensure that the --peer-auto-tls argument is not set to true + rule_number: '2.6' + profile_applicability: '* Level 1 - Master Node' + description: Do not use automatically generated self-signed certificates for TLS connections between peers. - rationale: > - etcd is a highly-available key value store used by Kubernetes deployments - for persistent storage of all of its REST API objects. These objects are - sensitive in nature and should be accessible only by authenticated etcd - peers in the etcd cluster. Hence, do not use self- signed certificates for - authentication. - audit: | + rationale: |- + etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. + These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. + Hence, do not use self-signed certificates for authentication. + audit: |- Run the following command on the etcd server node: + ``` ps -ef | grep etcd ``` + Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`. - **Note:** This recommendation is applicable only for etcd clusters. If you are using only one - etcd server in your environment then this recommendation is not applicable. - remediation: | - Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` - on - the control plane - node and either remove the `--peer-auto-tls` parameter or set it to `false`. + **Note:** This recommendation is applicable only for etcd clusters. + If you are using only one etcd server in your environment then this recommendation is not applicable. + remediation: |- + Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`. + ``` --peer-auto-tls=false ``` - impact: > - All peers attempting to communicate with the etcd server will require a - valid client certificate for authentication. + impact: |- + All peers attempting to communicate with the etcd server will require a valid client certificate for authentication. default_value: | **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`. - references: | - 1. [https://coreos.com/etcd/docs/latest/op-guide/security.html](https://coreos.com/etcd/docs/latest/op-guide/security.html) - 2. [https://kubernetes.io/docs/admin/etcd/](https://kubernetes.io/docs/admin/etcd/) - 3. [https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-auto-tls](https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-auto-tls) + references: + - https://coreos.com/etcd/docs/latest/op-guide/security.html + - https://kubernetes.io/docs/admin/etcd/ + - https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-auto-tls section: etcd - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 2.6 - - etcd + - CIS + - Kubernetes + - CIS 2.6 + - etcd benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_1_1/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_1_1/data.yaml index 339ada2efa..02a47b7110 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_1_1/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_1_1/data.yaml @@ -1,58 +1,55 @@ metadata: - id: 9144ba59-0a3e-59fb-b96e-e3f73e7aaf66 - name: Ensure that the kubelet service file permissions are set to 644 or more - restrictive (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: > - Ensure that the `kubelet` service file has permissions of `644` or more - restrictive. - rationale: > - The `kubelet` service file controls various parameters that set the behavior - of the `kubelet` service in the worker node. You should restrict its file - permissions to maintain the integrity of the file. The file should be - writable by only the administrators on the system. - audit: | - Automated AAC auditing has been modified to allow CIS-CAT to input a - variable - for the / of - the kubelet service config file. - Please set $kubelet_service_config= based on the file location on your system + id: 46c54b14-2a9f-5a4c-9c9e-4a71280422ee + name: Ensure that the kubelet service file permissions are set to 644 or more restrictive + rule_number: 4.1.1 + profile_applicability: '* Level 1 - Worker Node' + description: Ensure that the `kubelet` service file has permissions of `644` or + more restrictive. + rationale: |- + The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. + You should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- + Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet service config file. + + Please set $kubelet_service_config= based on the file location on your system + for example: ``` export kubelet_service_config=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` + To perform the audit manually: - Run the below command (based on the file location on your system) on the each worker - node. For example, + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` stat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - each - worker - node. For example, + remediation: |- + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` - chmod 755 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` - impact: | - None + impact: None default_value: | By default, the `kubelet` service file has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) - 2. [https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes) - 3. [https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in](https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in) + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes + - https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.1.1 - - Worker Node Configuration Files + - CIS + - Kubernetes + - CIS 4.1.1 + - Worker Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_1_10/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_1_10/data.yaml index d191536e97..ab83a914e9 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_1_10/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_1_10/data.yaml @@ -1,56 +1,53 @@ metadata: - id: 192af64f-4521-584c-ae84-5c7ad8a11597 - name: Ensure that the kubelet --config configuration file ownership is set to - root:root (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: > - Ensure that if the kubelet refers to a configuration file with the - `--config` argument, that file is owned by root:root. - rationale: > - The kubelet reads various parameters, including security settings, from a - config file specified by the `--config` argument. If this file is specified - you should restrict its file permissions to maintain the integrity of the - file. The file should be owned by root:root. - audit: | - Automated AAC auditing has been modified to allow CIS-CAT to input a - variable - for the / of - the kubelet config yaml file. - Please set $kubelet_config_yaml= based on the file location on your system + id: 4ee863b2-fde3-583c-83e4-0b809bf4389e + name: Ensure that the kubelet --config configuration file ownership is set to root:root + rule_number: 4.1.10 + profile_applicability: '* Level 1 - Worker Node' + description: |- + Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root. + rationale: |- + The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. + If this file is specified you should restrict its file permissions to maintain the integrity of the file. + The file should be owned by root:root. + audit: |- + Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config yaml file. + + Please set $kubelet_config_yaml= based on the file location on your system + for example: ``` export kubelet_config_yaml=/var/lib/kubelet/config.yaml + ``` + To perform the audit manually: - Run the below command (based on the file location on your system) on the each worker - node. For example, + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` stat -c %a /var/lib/kubelet/config.yaml - ``` - Verify that the ownership is set to `root:root`. - remediation: | - Run the following command (using the config file location identied in the - Audit step) + ```Verify that the ownership is set to `root:root`. + remediation: |- + Run the following command (using the config file location identied in the Audit step) + ``` chown root:root /etc/kubernetes/kubelet.conf ``` - impact: | - None + impact: None default_value: > By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`. - references: | - 1. [https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/) + references: + - https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.1.10 - - Worker Node Configuration Files + - CIS + - Kubernetes + - CIS 4.1.10 + - Worker Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_1_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_1_2/data.yaml index 5d663ace01..5865e07fda 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_1_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_1_2/data.yaml @@ -1,57 +1,53 @@ metadata: - id: 827e1022-2702-55c4-aa65-3315517bb6c0 + id: 5316a432-8098-500b-ab01-f13d37e2d66e name: Ensure that the kubelet service file ownership is set to root:root - (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: | - Ensure that the `kubelet` service file ownership is set to `root:root`. - rationale: > - The `kubelet` service file controls various parameters that set the behavior - of the `kubelet` service in the worker node. You should set its file - ownership to maintain the integrity of the file. The file should be owned by - `root:root`. - audit: | - Automated AAC auditing has been modified to allow CIS-CAT to input a - variable - for the / of - the kubelet service config file. - Please set $kubelet_service_config= based on the file location on your system + rule_number: 4.1.2 + profile_applicability: '* Level 1 - Worker Node' + description: Ensure that the `kubelet` service file ownership is set to `root:root`. + rationale: |- + The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet service config file. + + Please set $kubelet_service_config= based on the file location on your system + for example: ``` export kubelet_service_config=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` + To perform the audit manually: - Run the below command (based on the file location on your system) on the each worker - node. For example, + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` stat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` Verify that the ownership is set to `root:root`. - remediation: | - Run the below command (based on the file location on your system) on the - each - worker - node. For example, + remediation: |- + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` - impact: | - None + impact: None default_value: | By default, `kubelet` service file ownership is set to `root:root`. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) - 2. [https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes) - 3. [https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in](https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in) + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes + - https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.1.2 - - Worker Node Configuration Files + - CIS + - Kubernetes + - CIS 4.1.2 + - Worker Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_1_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_1_5/data.yaml index c060fdf8b7..939aba93eb 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_1_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_1_5/data.yaml @@ -1,58 +1,54 @@ metadata: - id: 3be1207a-0cfe-5dbd-abde-97e50421466d - name: - Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 - or more restrictive (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: > - Ensure that the `kubelet.conf` file has permissions of `644` or more + id: 406fd115-6c53-5bed-b8d6-b5def651b284 + name: Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 + or more restrictive + rule_number: 4.1.5 + profile_applicability: '* Level 1 - Worker Node' + description: Ensure that the `kubelet.conf` file has permissions of `644` or more restrictive. - rationale: > - The `kubelet.conf` file is the kubeconfig file for the node, and controls - various parameters that set the behavior and identity of the worker node. - You should restrict its file permissions to maintain the integrity of the - file. The file should be writable by only the administrators on the system. - audit: | - Automated AAC auditing has been modified to allow CIS-CAT to input a - variable - for the / of - the kubelet config file. - Please set $kubelet_config= based on the file location on your system + rationale: |- + The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. + You should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- + Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config file. + + Please set $kubelet_config= based on the file location on your system + for example: ``` export kubelet_config=/etc/kubernetes/kubelet.conf ``` + To perform the audit manually: - Run the below command (based on the file location on your system) on the each worker - node. For example, + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` - stat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + stat -c %a /etc/kubernetes/kubelet.conf ``` - Verify that the ownership is set to `root:root`. Verify that the permissions are `644` or more - restrictive. - remediation: | - Run the below command (based on the file location on your system) on the - each - worker - node. For example, + + Verify that the ownership is set to `root:root`.Verify that the permissions are `644` or more restrictive. + remediation: |- + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` chmod 644 /etc/kubernetes/kubelet.conf ``` - impact: | - None + impact: None default_value: | By default, `kubelet.conf` file has permissions of `640`. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) + references: + - https://kubernetes.io/docs/admin/kubelet/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.1.5 - - Worker Node Configuration Files + - CIS + - Kubernetes + - CIS 4.1.5 + - Worker Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_1_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_1_6/data.yaml index 674653ac43..ef454b97f1 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_1_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_1_6/data.yaml @@ -1,57 +1,52 @@ metadata: - id: 743de18a-f988-55e3-b6a9-0b692d1e25fb - name: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) - profile_applicability: | - • Level 1 - Worker Node - description: | - Ensure that the kubelet.conf file ownership is set to root:root. - rationale: > - The kubelet.conf file is the kubeconfig file for the node, and controls various parameters - - that set the behavior and identity of the worker node. You should set its file ownership to - - maintain the integrity of the file. The file should be owned by root:root. - audit: > - Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of - - the kubelet config file. - - Please set $kubelet_config= based on the file location on your system + id: 37ad604b-0db6-5227-bd00-9bfde1253a82 + name: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root + rule_number: 4.1.6 + profile_applicability: '* Level 1 - Worker Node' + description: Ensure that the `kubelet.conf` file ownership is set to `root:root`. + rationale: |- + The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. + You should set its file ownership to maintain the integrity of the file. + The file should be owned by `root:root`. + audit: |- + Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config file. + + Please set $kubelet_config= based on the file location on your system for example: - - `export kubelet_config=/etc/kubernetes/kubelet.conf` + ``` + export kubelet_config=/etc/kubernetes/kubelet.conf + ``` To perform the audit manually: + Run the below command (based on the file location on your system) on the each worker node. + For example, - Run the below command (based on the file location on your system) on the each worker - - node. For example, ``` stat -c %U %G /etc/kubernetes/kubelet.conf ``` + Verify that the ownership is set to `root:root`. - remediation: > - Run the below command (based on the file location on your system) on the - - each worker node. For example, + remediation: |- + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` chown root:root /etc/kubernetes/kubelet.conf ``` - impact: | - None + impact: None default_value: | By default, `kubelet.conf` file ownership is set to `root:root`. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ + references: + - https://kubernetes.io/docs/admin/kubelet/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.1.6 - - Worker Node Configuration Files + - CIS + - Kubernetes + - CIS 4.1.6 + - Worker Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_1_9/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_1_9/data.yaml index 18c0102425..f4113ddd38 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_1_9/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_1_9/data.yaml @@ -1,56 +1,55 @@ metadata: - id: 655e7d25-f5a9-547f-847c-70ee6c1ca801 - name: - Ensure that the kubelet --config configuration file has permissions set to - 644 or more restrictive (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: > - Ensure that if the kubelet refers to a configuration file with the - `--config` argument, that file has permissions of 644 or more restrictive. - rationale: > - The kubelet reads various parameters, including security settings, from a - config file specified by the `--config` argument. If this file is specified - you should restrict its file permissions to maintain the integrity of the - file. The file should be writable by only the administrators on the system. - audit: | - Automated AAC auditing has been modified to allow CIS-CAT to input a - variable - for the / of - the kubelet config yaml file. - Please set $kubelet_config_yaml= based on the file location on your system + id: f13be1cb-5ce3-50d5-937e-2b64b1b769fd + name: Ensure that the kubelet --config configuration file has permissions set to + 644 or more restrictive + rule_number: 4.1.9 + profile_applicability: '* Level 1 - Worker Node' + description: |- + Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive. + rationale: |- + The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. + If this file is specified you should restrict its file permissions to maintain the integrity of the file. + The file should be writable by only the administrators on the system. + audit: |- + Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config yaml file. + + Please set $kubelet_config_yaml= based on the file location on your system + for example: ``` export kubelet_config_yaml=/var/lib/kubelet/config.yaml + ``` + To perform the audit manually: - Run the below command (based on the file location on your system) on the each worker - node. For example, + Run the below command (based on the file location on your system) on the each worker node. + For example, + ``` stat -c %a /var/lib/kubelet/config.yaml ``` + Verify that the permissions are `644` or more restrictive. - remediation: | - Run the following command (using the config file location identied in the - Audit step) + remediation: |- + Run the following command (using the config file location identied in the Audit step) + ``` chmod 644 /var/lib/kubelet/config.yaml ``` - impact: | - None + impact: None default_value: > By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 644. - references: | - 1. [https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/) + references: + - https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ section: Worker Node Configuration Files - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.1.9 - - Worker Node Configuration Files + - CIS + - Kubernetes + - CIS 4.1.9 + - Worker Node Configuration Files benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_1/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_1/data.yaml index 4026d8ab1e..b97a58f356 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_1/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_1/data.yaml @@ -1,56 +1,58 @@ metadata: - id: a082f4e6-67d7-56c4-9764-42db2030a552 - name: Ensure that the --anonymous-auth argument is set to false (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: | - Disable anonymous requests to the Kubelet server. - rationale: > - When enabled, requests that are not rejected by other configured - authentication methods are treated as anonymous requests. These requests are - then served by the Kubelet server. You should rely on authentication to - authorize access and disallow anonymous requests. - audit: | - If using a Kubelet configuration file, check that there is an entry for - `authentication: anonymous: enabled` set to `false`. + id: 7ead9422-3286-5a8a-9b3e-9fa9af02607c + name: Ensure that the --anonymous-auth argument is set to false + rule_number: 4.2.1 + profile_applicability: '* Level 1 - Worker Node' + description: Disable anonymous requests to the Kubelet server. + rationale: |- + When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. + These requests are then served by the Kubelet server. + You should rely on authentication to authorize access and disallow anonymous requests. + audit: |- + If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`. + + Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that the `--anonymous-auth` argument is set to `false`. - This executable argument may be omitted, provided there is a corresponding entry set to - `false` in the Kubelet config file. - remediation: | - If using a Kubelet config file, edit the file to set `authentication: - anonymous: enabled` - to - `false`. - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + + + This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file. + remediation: |- + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`. + + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + ``` --anonymous-auth=false ``` - Based on your system, restart the `kubelet` service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - Anonymous requests will be rejected. + impact: Anonymous requests will be rejected. default_value: | By default, anonymous access is enabled. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) - 2. [https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication](https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication) + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.2.1 - - Kubelet + - CIS + - Kubernetes + - CIS 4.2.1 + - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_10/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_10/data.yaml index 88e78b5448..f654ef679a 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_10/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_10/data.yaml @@ -1,44 +1,43 @@ metadata: - id: dc91f4c4-4f0e-59ba-a0e1-96e996736787 - name: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: | - Setup TLS connection on the Kubelets. - rationale: | - The connections from the apiserver to the kubelet are used for fetching logs for pods, - attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding - functionality. These connections terminate at the kubelet’s HTTPS endpoint. By default, the - apiserver does not verify the kubelet’s serving certificate, which makes the connection - subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public - networks. - audit: | + id: 719ccb48-0106-5c2a-a563-b31620da736a + name: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set + as appropriate + rule_number: 4.2.10 + profile_applicability: '* Level 1 - Worker Node' + description: Setup TLS connection on the Kubelets. + rationale: |- + The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding functionality. + These connections terminate at the kubelet’s HTTPS endpoint. + By default, the apiserver does not verify the kubelet’s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` - Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as - appropriate. - If these arguments are not present, check that there is a Kubelet config specified by `--config` - and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile. - remediation: | - If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate - file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the - corresponding private key file. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set - the below parameters in KUBELET_CERTIFICATE_ARGS variable. + + Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate. + + If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile. + remediation: |- + If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file. + + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= --tls-private-key-file= - Based on your system, restart the kubelet service. For example: + Based on your system, restart the kubelet service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: + impact: None default_value: references: + - nan section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -46,5 +45,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_11/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_11/data.yaml index 56abb9d509..2665799504 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_11/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_11/data.yaml @@ -1,54 +1,50 @@ metadata: - id: d94918af-be06-5d3c-b880-6d9d0f23a1e7 - name: Ensure that the --rotate-certificates argument is not set to false (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: | - Enable kubelet client certificate rotation. - rationale: > - The `--rotate-certificates` setting causes the kubelet to rotate its client - certificates by creating new CSRs as its existing credentials expire. - This automated periodic rotation - ensures that the there is no downtime due to expired certificates and thus addressing - availability in the CIA security triad. - Note: This recommendation only applies if you let kubelets get their certificates from the - API server. In case your kubelet certificates come from an outside authority/tool (e.g. + id: bc4855e5-8921-5622-9784-5723c6d56091 + name: Ensure that the --rotate-certificates argument is not set to false + rule_number: 4.2.11 + profile_applicability: '* Level 1 - Worker Node' + description: Enable kubelet client certificate rotation. + rationale: |- + The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. + This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad. + + **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. + In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself. - Note: This feature also require the `RotateKubeletClientCertificate` feature gate to be - enabled (which is the default since Kubernetes v1.7) - audit: | + + **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7) + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that the `--rotate-certificates` argument is not present, or is set to `true`. - If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet - config file specified by `--config`, that file does not contain `rotateCertificates: false`. - remediation: | - If using a Kubelet config file, edit the file to add the line - `rotateCertificates: true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` - variable. - Based on your system, restart the kubelet service. For example: + + If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`. + remediation: |- + If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value. + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable. + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload - systemctl restart kubelet.service ``` - impact: | - None + impact: None default_value: | By default, kubelet client certificate rotation is enabled. - references: | - 1. [https://github.com/kubernetes/kubernetes/pull/41912](https://github.com/kubernetes/kubernetes/pull/41912) - 2. [https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration) - 3. [https://kubernetes.io/docs/imported/release/notes/](https://kubernetes.io/docs/imported/release/notes/) - 4. [https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/) + references: + - https://github.com/kubernetes/kubernetes/pull/41912 + - https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration + - https://kubernetes.io/docs/imported/release/notes/ + - https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -56,5 +52,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 - id: cis_k8s \ No newline at end of file + version: v1.0.1 + id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_12/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_12/data.yaml index 8b794fcf60..c322439732 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_12/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_12/data.yaml @@ -1,49 +1,48 @@ metadata: - id: 46d69440-3946-50f6-83b3-a0987551afa2 - name: Verify that the RotateKubeletServerCertificate argument is set to true (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: | - Enable kubelet server certificate rotation. - rationale: > - `RotateKubeletServerCertificate` causes the kubelet to both request a serving - certificate - after bootstrapping its client credentials and rotate the certificate as its existing credentials - expire. This automated periodic rotation ensures that the there are no downtimes due to - expired certificates and thus addressing availability in the CIA security triad. - Note: This recommendation only applies if you let kubelets get their certificates from the - API server. In case your kubelet certificates come from an outside authority/tool (e.g. + id: ba803c66-9d80-5c30-b951-35b54a3bde31 + name: Verify that the RotateKubeletServerCertificate argument is set to true + rule_number: 4.2.12 + profile_applicability: '* Level 1 - Worker Node' + description: Enable kubelet server certificate rotation. + rationale: |- + `RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. + This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad. + + Note: This recommendation only applies if you let kubelets get their certificates from the API server. + In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself. - audit: | - Ignore this check if `serverTLSBootstrap` is `true` in the kubelet config file - or if the `--rotate-server-certificates` parameter is set on kubelet + audit: |- + Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet + Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`. - remediation: | - Edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` - on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. + remediation: |- + Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. + ``` --feature-gates=RotateKubeletServerCertificate=true ``` - Based on your system, restart the kubelet service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload - systemctl restart kubelet.service ``` - impact: | - None + impact: None default_value: | By default, kubelet server certificate rotation is enabled. - references: | - 1. [https://github.com/kubernetes/kubernetes/pull/45059](https://github.com/kubernetes/kubernetes/pull/45059) - 2. [https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration) + references: + - https://github.com/kubernetes/kubernetes/pull/45059 + - https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -51,5 +50,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_13/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_13/data.yaml index 5510de68d4..eba183a534 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_13/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_13/data.yaml @@ -1,66 +1,61 @@ metadata: - id: f9d84d7f-a741-5f2a-a454-704554380bca - name: Ensure that the Kubelet only makes use of Strong CryptographicCiphers (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: > - Ensure that the Kubelet is configured to only use strong cryptographic + id: 05afb6ee-e198-5d85-97e4-05d86c73b1e4 + name: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers + rule_number: 4.2.13 + profile_applicability: '* Level 1 - Worker Node' + description: Ensure that the Kubelet is configured to only use strong cryptographic ciphers. - rationale: > - TLS ciphers have had a number of known vulnerabilities and weaknesses, which - can - reduce the protection provided by them. By default Kubernetes supports a number of TLS - ciphersuites including some that have security concerns, weakening the protection - provided. - audit: | - The set of cryptographic ciphers currently considered secure is the - following: - * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - * `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - * `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - * `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` - * `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - * `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - * `TLS_RSA_WITH_AES_256_GCM_SHA384` - * `TLS_RSA_WITH_AES_128_GCM_SHA256` + rationale: |- + TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. + By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided. + audit: |- + The set of cryptographic ciphers currently considered secure is the following: + + - `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` + - `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` + - `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + - `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` + - `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` + - `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` + - `TLS_RSA_WITH_AES_256_GCM_SHA384` + - `TLS_RSA_WITH_AES_128_GCM_SHA256` Run the following command on each node: + ``` ps -ef | grep kubelet ``` - If the `--tls-cipher-suites` argument is present, ensure it only contains values included in - this set. - If it is not present check that there is a Kubelet config file specified by `--config`, and that - file sets `TLSCipherSuites:` to only include values from this set. - remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites`: to - `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - ,`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` - ,`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - ,`TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset - of these values. - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - set the `--tls-cipher-suites` parameter as follows, or to a subset of these values. + + If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set. + + + If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set. + remediation: |- + If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values. + + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values. + + ``` - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM - _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM - _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM - _SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 ``` - Based on your system, restart the kubelet service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart kubelet.service ``` - impact: | - Kubelet clients that cannot support modern cryptographic ciphers will not be - able to make connections to the Kubelet API. + impact: |- + Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API. default_value: | By default the Kubernetes API server supports a wide range of TLS ciphers references: + - nan section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -68,5 +63,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_2/data.yaml index 2b96f7fe57..51c79c29c2 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_2/data.yaml @@ -1,56 +1,55 @@ metadata: - id: 307e747c-5998-5bf8-a8e3-d24aab71e558 + id: 3781a71f-54e5-5987-9da2-d72a7d139af7 name: Ensure that the --authorization-mode argument is not set to AlwaysAllow - (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: | - Do not allow all requests. Enable explicit authorization. - rationale: > - Kubelets, by default, allow all authenticated requests (even anonymous ones) - without needing explicit authorization checks from the apiserver. You should - restrict this behavior and only allow explicitly authorized requests. - audit: | + rule_number: 4.2.2 + profile_applicability: '* Level 1 - Worker Node' + description: "Do not allow all requests.\nEnable explicit authorization." + rationale: |- + Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. + You should restrict this behavior and only allow explicitly authorized requests. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` - If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If - it is not present check that there is a Kubelet config file specified by `--config`, and that file - sets `authorization: mode` to something other than `AlwaysAllow`. - It is also possible to review the running configuration of a Kubelet via the `/configz` - endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate - credentials will provide details of the Kubelet's configuration. - remediation: | - If using a Kubelet config file, edit the file to set `authorization: mode` - to - `Webhook`. - If using executable arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + + If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. + If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`. + + It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). + Accessing these with appropriate credentials will provide details of the Kubelet's configuration. + remediation: |- + If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`. + + + If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + ``` --authorization-mode=Webhook ``` - Based on your system, restart the `kubelet` service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - Unauthorized requests will be denied. + impact: Unauthorized requests will be denied. default_value: | By default, `--authorization-mode` argument is set to `AlwaysAllow`. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) - 2. [https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication](https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication) + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.2.2 - - Kubelet + - CIS + - Kubernetes + - CIS 4.2.2 + - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_3/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_3/data.yaml index 3551d6f980..0b2dad20bf 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_3/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_3/data.yaml @@ -1,60 +1,55 @@ metadata: - id: a406209b-2765-5d90-91ba-4e872802c450 - name: Ensure that the --client-ca-file argument is set as appropriate (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: | - Enable Kubelet authentication using certificates. - rationale: > - The connections from the apiserver to the kubelet are used for fetching logs - for pods, attaching (through kubectl) to running pods, and using the - kubelet's port-forwarding functionality. These connections terminate at the - kubelet's HTTPS endpoint. By default, the apiserver does not verify the - kubelet's serving certificate, which makes the connection subject to - man-in-the-middle attacks, and unsafe to run over untrusted and/or public - networks. Enabling Kubelet certificate authentication ensures that the - apiserver could authenticate the Kubelet before submitting any requests. - audit: | + id: 5097a7f7-be0e-57f1-81d7-665334381426 + name: Ensure that the --client-ca-file argument is set as appropriate + rule_number: 4.2.3 + profile_applicability: '* Level 1 - Worker Node' + description: Enable Kubelet authentication using certificates. + rationale: |- + The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding functionality. + These connections terminate at the kubelet’s HTTPS endpoint. + By default, the apiserver does not verify the kubelet’s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. + Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` - Verify that the `--client-ca-file` argument exists and is set to the location of the client - certificate authority file. - If the `--client-ca-file` argument is not present, check that there is a Kubelet config file - specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the - location of the client certificate authority file. - remediation: | - If using a Kubelet config file, edit the file to set `authentication: - x509:clientCAFile` - to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + + Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file. + + If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file. + remediation: |- + If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file. + + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + ``` --client-ca-file= ``` - Based on your system, restart the `kubelet` service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - You require TLS to be configured on apiserver as well as kubelets. + impact: You require TLS to be configured on apiserver as well as kubelets. default_value: | By default, `--client-ca-file` argument is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) - 2. [https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/) + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/ section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.2.3 - - Kubelet + - CIS + - Kubernetes + - CIS 4.2.3 + - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_4/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_4/data.yaml index dcf1f7a00b..55b2439ed1 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_4/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_4/data.yaml @@ -1,46 +1,48 @@ metadata: - id: 55983039-6973-57a9-9ed2-fb577c0be1f6 - name: Verify that the --read-only-port argument is set to 0 (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: | - Disable the read-only port. - rationale: > - The Kubelet process provides a read-only API in addition to the main Kubelet - API. - Unauthenticated access is provided to this read-only API which could possibly retrieve - potentially sensitive information about the cluster. - audit: | + id: c29d500e-8baf-5dad-b985-935be71d042f + name: Verify that the --read-only-port argument is set to 0 + rule_number: 4.2.4 + profile_applicability: '* Level 1 - Worker Node' + description: Disable the read-only port. + rationale: |- + The Kubelet process provides a read-only API in addition to the main Kubelet API. + Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that the `--read-only-port` argument exists and is set to `0`. - If the `--read-only-port` argument is not present, check that there is a Kubelet config file - specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`. - remediation: | + + If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. + Check that if there is a `readOnlyPort` entry in the file, it is set to `0`. + remediation: |- If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - ``` + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + + ``` --read-only-port=0 ``` - Based on your system, restart the kubelet service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - Removal of the read-only port will require that any service which made use - of it will need to be re-configured to use the main Kubelet API. + impact: |- + Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API. default_value: | By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by --config the default value for `readOnlyPort` is `0`. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) + references: + - https://kubernetes.io/docs/admin/kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -48,5 +50,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 - id: cis_k8s \ No newline at end of file + version: v1.0.1 + id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_5/data.yaml index 0a1c170d9f..984061c108 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_5/data.yaml @@ -1,50 +1,51 @@ metadata: - id: db3446ed-5542-59b0-a9e3-499814d5bde3 - name: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: | - Do not disable timeouts on streaming connections. - rationale: > - Setting idle timeouts ensures that you are protected against - Denial-of-Service attacks, - inactive connections and running out of ephemeral ports. - Note: By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be - too high for your environment. Setting this as appropriate would additionally ensure that - such streaming connections are timed out after serving legitimate use cases. - audit: | + id: 5f80fd5e-772e-5562-a310-be9b33d1f9d3 + name: Ensure that the --streaming-connection-idle-timeout argument is not set to + 0 + rule_number: 4.2.5 + profile_applicability: '* Level 1 - Worker Node' + description: Do not disable timeouts on streaming connections. + rationale: |- + Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports. + + + **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. + Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`. - If the argument is not present, and there is a Kubelet config file specified by `--config`, - check that it does not set `streamingConnectionIdleTimeout` to `0`. - remediation: | - If using a Kubelet config file, edit the file to set - `streamingConnectionIdleTimeout` to a value other than `0`. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and + If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0. + remediation: |- + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0. + + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. ``` --streaming-connection-idle-timeout=5m ``` - Based on your system, restart the kubelet service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - Long-lived connections could be interrupted. + impact: Long-lived connections could be interrupted. default_value: | By default, `--streaming-connection-idle-timeout` is set to 4 hours. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) - 2. [https://github.com/kubernetes/kubernetes/pull/18552](https://github.com/kubernetes/kubernetes/pull/18552) + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://github.com/kubernetes/kubernetes/pull/18552 section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -52,5 +53,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 - id: cis_k8s \ No newline at end of file + version: v1.0.1 + id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_6/data.yaml index 9b992d221f..6c1462e2e7 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_6/data.yaml @@ -1,54 +1,55 @@ metadata: - id: 1727f238-cf56-52bb-86ed-ddf0c141eebc - name: Ensure that the --protect-kernel-defaults argument is set to true (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: > - Protect tuned kernel parameters from overriding kubelet default kernel + id: 0a2bee94-296f-56da-b5a5-308e81517990 + name: Ensure that the --protect-kernel-defaults argument is set to true + rule_number: 4.2.6 + profile_applicability: '* Level 1 - Worker Node' + description: Protect tuned kernel parameters from overriding kubelet default kernel parameter values. - rationale: > - Kernel parameters are usually tuned and hardened by the system - administrators before putting the systems into production. These parameters - protect the kernel and the system. Your kubelet kernel defaults that rely on - such parameters should be appropriately set to match the desired secured - system state. Ignoring this could potentially lead to running pods with - undesired kernel behavior. - audit: | + rationale: |- + Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. + These parameters protect the kernel and the system. + Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. + Ignoring this could potentially lead to running pods with undesired kernel behavior. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that the `--protect-kernel-defaults` argument is set to `true`. - If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet - config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`. - remediation: | - If using a Kubelet config file, edit the file to set `protectKernelDefaults: - true`. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + + If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`. + remediation: |- + If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`. + + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + ``` --protect-kernel-defaults=true ``` - Based on your system, restart the `kubelet` service. For example: + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - You would have to re-tune kernel parameters to match kubelet parameters. + impact: You would have to re-tune kernel parameters to match kubelet parameters. default_value: | By default, `--protect-kernel-defaults` is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) + references: + - https://kubernetes.io/docs/admin/kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.2.6 - - Kubelet + - CIS + - Kubernetes + - CIS 4.2.6 + - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_7/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_7/data.yaml index 1cef8ba166..ae66c7a9bc 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_7/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_7/data.yaml @@ -1,56 +1,51 @@ metadata: - id: ddf0a2b1-5b54-5960-a749-89f351dcc04b + id: d649c9bf-85a8-5247-9ac0-53635680af10 name: Ensure that the --make-iptables-util-chains argument is set to true - (Automated) - profile_applicability: | - * Level 1 - Worker Node - description: | - Allow Kubelet to manage iptables. - rationale: > - Kubelets can automatically manage the required changes to iptables based on - how you choose your networking options for the pods. It is recommended to - let kubelets manage the changes to iptables. This ensures that the iptables - configuration remains in sync with pods networking configuration. Manually - configuring iptables with dynamic pod network configuration changes might - hamper the communication between pods/containers and to the outside world. + rule_number: 4.2.7 + profile_applicability: '* Level 1 - Worker Node' + description: Allow Kubelet to manage iptables. + rationale: |- + Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. + It is recommended to let kubelets manage the changes to iptables. + This ensures that the iptables configuration remains in sync with pods networking configuration. + Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open. - audit: | + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`. - If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config - file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to - `false`. - remediation: | - If using a Kubelet config file, edit the file to set - `makeIPTablesUtilChains: true`. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - remove the `--make-iptables-util-chains` argument from the - `KUBELET_SYSTEM_PODS_ARGS` variable. - Based on your system, restart the `kubelet` service. For example: + + If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`. + remediation: |- + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`. + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable. + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: > - Kubelet would manage the iptables on the system and keep it in sync. If you - are using any other iptables management solution, then there might be some - conflicts. + impact: |- + Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts. default_value: | By default, `--make-iptables-util-chains` argument is set to `true`. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) + references: + - https://kubernetes.io/docs/admin/kubelet/ section: Kubelet - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 4.2.7 - - Kubelet + - CIS + - Kubernetes + - CIS 4.2.7 + - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_8/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_8/data.yaml index a73ba4d4aa..03ea5a0b43 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_8/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_8/data.yaml @@ -1,44 +1,42 @@ metadata: - id: 58b41542-b102-5c9a-81fb-89aa5bc0fcb8 - name: Ensure that the --hostname-override argument is not set (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: | - Do not override node hostnames. - rationale: > - Overriding hostnames could potentially break TLS setup between the kubelet - and the apiserver. - Additionally, with overridden hostnames, it becomes increasingly difficult to - associate logs with a particular node and process them for security analytics. Hence, you - should setup your kubelet nodes with resolvable FQDNs and avoid overriding the - hostnames with IPs. - audit: | + id: 6f2e9eb5-d55f-5eaa-9f8d-472f246d1343 + name: Ensure that the --hostname-override argument is not set + rule_number: 4.2.8 + profile_applicability: '* Level 1 - Worker Node' + description: Do not override node hostnames. + rationale: |- + Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. + Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. + Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` + Verify that `--hostname-override` argument does not exist. - Note This setting is not configurable via the Kubelet config file. - remediation: | - Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` - on each worker node and remove the `--hostname-override` argument from the - `KUBELET_SYSTEM_PODS_ARGS` variable. - Based on your system, restart the `kubelet` service. For example: + + **Note** This setting is not configurable via the Kubelet config file. + remediation: |- + Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable. + + Based on your system, restart the `kubelet` service. + For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - Some cloud providers may require this flag to ensure that hostname matches - names issued by the cloud provider. - In these environments, this recommendation should not apply. + impact: |- + Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply. default_value: | By default, `--hostname-override argument` is not set. - references: | - 1. [https://kubernetes.io/docs/admin/kubelet/](https://kubernetes.io/docs/admin/kubelet/) - 2. [https://github.com/kubernetes/kubernetes/issues/22063](https://github.com/kubernetes/kubernetes/issues/22063) + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://github.com/kubernetes/kubernetes/issues/22063 section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -46,5 +44,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_4_2_9/data.yaml b/bundle/compliance/cis_k8s/rules/cis_4_2_9/data.yaml index c0ea28d026..72069ba2c6 100644 --- a/bundle/compliance/cis_k8s/rules/cis_4_2_9/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_4_2_9/data.yaml @@ -1,49 +1,48 @@ metadata: - id: 8dad2026-5cfd-5398-ba90-9c329ae6b2ca - name: Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) - profile_applicability: | - * Level 2 - Worker Node - description: | - Security relevant information should be captured. + id: ec063233-fc31-5f50-8ecd-ada8cba33192 + name: Ensure that the --event-qps argument is set to 0 or a level which ensures + appropriate event capture + rule_number: 4.2.9 + profile_applicability: '* Level 2 - Worker Node' + description: |- + Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. - Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial - of service on the kubelet. - rationale: | - It is important to capture all events and not restrict event creation. - Events are an important source of security information and analytics that ensure that your environment is - consistently monitored using the event data. - audit: | + Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet. + rationale: |- + It is important to capture all events and not restrict event creation. + Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data. + audit: |- Run the following command on each node: + ``` ps -ef | grep kubelet ``` - Review the value set for the `--event-qps` argument and determine whether this has been - set to an appropriate level for the cluster. The value of `0` can be used to ensure that all - events are captured. - If the `--event-qps` argument does not exist, check that there is a Kubelet config file - specified by `--config` and review the value in this location. - remediation: | + + Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. + The value of `0` can be used to ensure that all events are captured. + + If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location. + remediation: |- If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level. - If using command line arguments, edit the kubelet service file - `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and - set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - Based on your system, restart the `kubelet` service. + + If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + + Based on your system, restart the `kubelet` service. For example: + ``` systemctl daemon-reload systemctl restart kubelet.service ``` - impact: | - Setting this parameter to `0` could result in a denial of service condition due to excessive - events being created. The cluster's event processing and storage systems should be scaled - to handle expected event loads. + impact: |- + Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads. default_value: | By default, `--event-qps` argument is set to `5`. - references: | - 1. https://kubernetes.io/docs/admin/kubelet/ - 2. https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go + references: + - https://kubernetes.io/docs/admin/kubelet/ + - https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go section: Kubelet - version: "1.0" + version: '1.0' tags: - CIS - Kubernetes @@ -51,5 +50,5 @@ metadata: - Kubelet benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 - id: cis_k8s \ No newline at end of file + version: v1.0.1 + id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_1_3/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_1_3/data.yaml index 6f8f1965e3..405310a7f5 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_1_3/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_1_3/data.yaml @@ -1,47 +1,43 @@ metadata: - id: 8c46796a-9b8d-585b-8c01-48cde4eff2ec - name: Minimize wildcard use in Roles and ClusterRoles (Manual) - profile_applicability: | - * Level 1 - Worker Node - description: > - Kubernetes Roles and ClusterRoles provide access to resources based on sets - of objects and - actions that can be taken on those objects. It is possible to set either of these to be the - wildcard "*" which matches all items. - Use of wildcards is not optimal from a security perspective as it may allow for inadvertent - access to be granted when new resources are added to the Kubernetes API either as CRDs - or in later versions of the product. - rationale: > - The principle of least privilege recommends that users are provided only the - access - required for their role and nothing more. The use of wildcard rights grants is likely to - provide excessive rights to the Kubernetes API. - audit: | - Retrieve the roles defined across each namespaces in the cluster and review - for wildcards + id: c1467bb9-fc01-5264-81e9-8a6b4f85b03d + name: Minimize wildcard use in Roles and ClusterRoles + rule_number: 5.1.3 + profile_applicability: '* Level 1 - Worker Node' + description: |- + Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. + It is possible to set either of these to be the wildcard "*" which matches all items. + + + Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product. + rationale: |- + The principle of least privilege recommends that users are provided only the access required for their role and nothing more. + The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API. + audit: |- + Retrieve the roles defined across each namespaces in the cluster and review for wildcards + ``` kubectl get roles --all-namespaces -o yaml ``` + Retrieve the cluster roles defined in the cluster and review for wildcards + ``` kubectl get clusterroles -o yaml ``` - remediation: > - Where possible replace any use of wildcards in clusterroles and roles with - specific objects - or actions. - impact: | - None. + remediation: |- + Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + impact: None default_value: references: + - nan section: RBAC and Service Accounts - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.1.3 - - RBAC and Service Accounts + - CIS + - Kubernetes + - CIS 5.1.3 + - RBAC and Service Accounts benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_1_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_1_5/data.yaml index 1c32c178f6..ff2d674bdb 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_1_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_1_5/data.yaml @@ -1,52 +1,44 @@ metadata: - id: 2b399496-f79d-5533-8a86-4ea00b95e3bd - name: Ensure that default service accounts are not actively used. (Manual) - profile_applicability: | - * Level 1 - Master Node - description: > - The `default` service account should not be used to ensure that rights granted - to - applications can be more easily audited and reviewed. - rationale: > - Kubernetes provides a `default` service account which is used by cluster - workloads where - no specific service account is assigned to the pod. - Where access to the Kubernetes API from a pod is required, a specific service account - should be created for that pod, and rights granted to that service account. - The default service account should be configured such that it does not provide a service - account token and does not have any explicit rights assignments. - audit: > - For each namespace in the cluster, review the rights assigned to the default - service account - and ensure that it has no roles or cluster roles bound to it apart from the defaults. - Additionally ensure that the `automountServiceAccountToken: false` setting is in place for - each default service account. - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires - specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value + id: 29ab476c-31fc-51a5-b24d-6842c1e27ba4 + name: Ensure that default service accounts are not actively used. + rule_number: 5.1.5 + profile_applicability: '* Level 1 - Master Node' + description: |- + The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed. + rationale: |- + Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod. + + Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. + + The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + audit: |- + For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults. + + Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account. + remediation: |- + Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. + + Modify the configuration of each default service account to include this value + ``` automountServiceAccountToken: false ``` - impact: > - All workloads which require access to the Kubernetes API will require an - explicit service - account to be created. + impact: |- + All workloads which require access to the Kubernetes API will require an explicit service account to be created. default_value: | By default the `default` service account allows for its service account token to be mounted in pods in its namespace. - references: | - 1. [https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) + references: + - https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ section: RBAC and Service Accounts - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.1.5 - - RBAC and Service Accounts + - CIS + - Kubernetes + - CIS 5.1.5 + - RBAC and Service Accounts benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_1_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_1_6/data.yaml index c4465d56f7..dcc8780a25 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_1_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_1_6/data.yaml @@ -1,44 +1,36 @@ metadata: - id: 7037cf42-1f9b-5064-afde-e74a69cf96eb - name: Ensure that Service Account Tokens are only mounted where necessary (Manual) - profile_applicability: | - * Level 1 - Master Node - description: > - Service accounts tokens should not be mounted in pods except where the - workload - running in the pod explicitly needs to communicate with the API server - rationale: > - Mounting service account tokens inside pods can provide an avenue for - privilege escalation - attacks where an attacker is able to compromise a single pod in the cluster. + id: 06f347db-c8b9-52bb-8e44-8f4af01d2a4f + name: Ensure that Service Account Tokens are only mounted where necessary + rule_number: 5.1.6 + profile_applicability: '* Level 1 - Master Node' + description: |- + Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server + rationale: |- + Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster. + Avoiding mounting these tokens removes this attack avenue. - audit: | - Review pod and service account objects in the cluster and ensure that the - option below is - set, unless the resource explicitly requires this access. + audit: |- + Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access. + ``` automountServiceAccountToken: false ``` - remediation: > - Modify the definition of pods and service accounts which do not need to - mount service - account tokens to disable it. - impact: > - Pods mounted without service account tokens will not be able to communicate - with the API - server, except where the resource is available to unauthenticated principals. + remediation: |- + Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. + impact: |- + Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals. default_value: | By default, all pods get a service account token mounted in them. - references: | - 1. [https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) + references: + - https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ section: RBAC and Service Accounts - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.1.6 - - RBAC and Service Accounts + - CIS + - Kubernetes + - CIS 5.1.6 + - RBAC and Service Accounts benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_10/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_10/data.yaml index 70705761cb..6afb9b4fda 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_10/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_10/data.yaml @@ -1,42 +1,33 @@ metadata: - id: cfcc0315-7d89-5b9c-ab1a-eaf860e8942f - name: Minimize the admission of containers with capabilities assigned (Manual) - profile_applicability: | - * Level 2 - Master Node - description: | - Do not generally permit containers with capabilities - rationale: > - Containers run with a default set of capabilities as assigned by the - Container Runtime. Capabilities are parts of the rights generally granted on - a Linux system to the root user. In many cases applications running in - containers do not require any capabilities to operate, so from the - perspective of the principal of least privilege use of capabilities should - be minimized. - audit: | - List the policies in use for each namespace in the cluster, ensure that at least one policy - requires that capabilities are dropped by all containers. - remediation: | + id: b193abe9-297a-5650-8740-939e9fd3bd8f + name: Minimize the admission of containers with capabilities assigned + rule_number: 5.2.10 + profile_applicability: '* Level 2 - Master Node' + description: Do not generally permit containers with capabilities + rationale: |- + Containers run with a default set of capabilities as assigned by the Container Runtime. + Capabilities are parts of the rights generally granted on a Linux system to the root user. + + In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized. + audit: |- + List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers. + remediation: |- Review the use of capabilites in applications runnning on your cluster. - Where - a namespace - contains applicaions which do not require any Linux capabities to operate consider adding - a policy which forbids the admission of containers which do not drop all capabilities. - impact: | - Pods with containers require capabilities to operate will not be permitted. + Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities. + impact: Pods with containers require capabilities to operate will not be permitted. default_value: | By default, there are no restrictions on the creation of containers with additional capabilities - references: | - 1. https://kubernetes.io/docs/concepts/security/pod-security-standards/ - 2. https://www.nccgroup.trust/uk/our-research/abusing-privileged-and- - unprivileged-linux-containers/ + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ + - https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.10 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.10 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_2/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_2/data.yaml index ab194febe0..c5ced4268d 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_2/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_2/data.yaml @@ -1,44 +1,38 @@ metadata: - id: c978fb31-5706-5bdf-93fb-7cb84a35c63c - name: Minimize the admission of privileged containers (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not generally permit containers to be run with the - `securityContext.privileged` flag set to `true`. - rationale: > - Privileged containers have access to all Linux Kernel capabilities and - devices. A container running with full privileges can do almost everything - that the host can do. This flag exists to allow special use-cases, like - manipulating the network stack and accessing devices. There should be at - least one admission control policy defined which does not permit privileged - containers. If you need to run privileged containers, this should be defined - in a separate policy and you should carefully check ensure - that only limited service accounts and users are given permission to use - that policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that each policy disallows - the admission of privileged containers. - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of privileged containers. - impact: > - Pods defined with `spec.containers[].securityContext.privileged: true`, - `spec.initContainers[].securityContext.privileged: true` and - `spec.ephemeralContainers[].securityContext.privileged: true` - will not be permitted. + id: 2ebc00e8-2e43-5828-a5bb-b56c81733ec6 + name: Minimize the admission of privileged containers + rule_number: 5.2.2 + profile_applicability: '* Level 1 - Master Node' + description: |- + Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`. + rationale: |- + Privileged containers have access to all Linux Kernel capabilities and devices. + A container running with full privileges can do almost everything that the host can do. + This flag exists to allow special use-cases, like manipulating the network stack and accessing devices. + + + There should be at least one admission control policy defined which does not permit privileged containers. + + + If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers. + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers. + impact: |- + Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted. default_value: | By default, there are no restrictions on the creation of privileged containers. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.2 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.2 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_3/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_3/data.yaml index d5e4571162..4d2287d9f0 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_3/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_3/data.yaml @@ -1,44 +1,36 @@ metadata: - id: 8371e3cb-c8a3-5b4c-876a-3c68b995d30b - name: - Minimize the admission of containers wishing to share the host process ID - namespace (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not generally permit containers to be run with the `hostPID` flag set to - true. - rationale: > - A container running in the host's PID namespace can inspect processes - running outside the container. If the container also has access to ptrace - capabilities this can be used to escalate privileges outside of the - container. There should be at least one admission control policy defined - which does not permit containers to share the host PID namespace. If you - need to run containers which require hostPID, this should be defined in a - separate policy and you should carefully check to ensure that - only limited service accounts and users are given permission to use that - policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that each policy disallows - the admission of `hostPID` containers - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostPID` containers. - impact: > - Pods defined with `spec.hostPID: true` will not be permitted unless they are - run under a specific policy. + id: 9e2858f0-3e6b-571e-b675-3de189ef13e9 + name: Minimize the admission of containers wishing to share the host process ID + namespace + rule_number: 5.2.3 + profile_applicability: '* Level 1 - Master Node' + description: Do not generally permit containers to be run with the `hostPID` flag + set to true. + rationale: |- + A container running in the host's PID namespace can inspect processes running outside the container. + If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container. + + There should be at least one admission control policy defined which does not permit containers to share the host PID namespace. + + If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. + impact: |- + Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy. default_value: | By default, PodSecurityPolicies are not defined. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.3 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.3 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_4/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_4/data.yaml index 259e49f4ee..55839caf03 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_4/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_4/data.yaml @@ -1,42 +1,34 @@ metadata: - id: 41e05dea-5fdd-50a7-9149-5be11cd8a63e - name: Minimize the admission of containers wishing to share the hostIPC - namespace (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not generally permit containers to be run with the `hostIPC` flag set to - true. - rationale: > - A container running in the host's IPC namespace can use IPC to interact with - processes outside the container. There should be at least one - admission control policy defined which does not permit containers to share - the host IPC namespace. If you need to run containers which - require hostIPC, this should be defined in a separate policy and you should - carefully check RBAC controls to ensure that only limited service accounts - and users are given permission to use that policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that each policy disallows - the admission of `hostIPC` containers - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostIPC` containers. - impact: > - Pods defined with `spec.hostIPC: true` will not be permitted unless they are - run under a - specific policy. + id: 8bc174f7-9a5b-5599-bc64-22f8ea0b28f5 + name: Minimize the admission of containers wishing to share the host IPC namespace + rule_number: 5.2.4 + profile_applicability: '* Level 1 - Master Node' + description: Do not generally permit containers to be run with the `hostIPC` flag + set to true. + rationale: |- + A container running in the host's IPC namespace can use IPC to interact with processes outside the container. + + There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace. + + If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. + impact: |- + Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy. default_value: | By default, there are no restrictions on the creation of hostIPC containers. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.4 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.4 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_5/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_5/data.yaml index a5f45ea627..b3f25be52a 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_5/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_5/data.yaml @@ -1,42 +1,34 @@ metadata: - id: 1f4cc187-dedb-553d-b41f-8e26682415b5 - name: Minimize the admission of containers wishing to share the host network - namespace (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not generally permit containers to be run with the `hostNetwork` flag set - to true. - rationale: > - A container running in the host's network namespace could access the local - loopback device, and could access network traffic to and from other pods. - There should be at least one admission control policy defined which does not - permit containers to share the host network namespace. If you need to - run containers which require access to the host's network namespaces, this should be defined in a - separate policy and you should carefully check to ensure that - only limited service accounts and users are given permission to use that - policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that each policy disallows - the admission of `hostNetwork` containers - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostNetwork` containers. - impact: > - Pods defined with `spec.hostNetwork: true` will not be permitted unless they - are run under a specific policy. + id: 03b75f04-40a0-5b43-becb-8227b24eea5d + name: Minimize the admission of containers wishing to share the host network namespace + rule_number: 5.2.5 + profile_applicability: '* Level 1 - Master Node' + description: Do not generally permit containers to be run with the `hostNetwork` + flag set to true. + rationale: |- + A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods. + + There should be at least one admission control policy defined which does not permit containers to share the host network namespace. + + If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. + impact: |- + Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy. default_value: | By default, there are no restrictions on the creation of `hostNetwork` containers. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.5 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.5 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_6/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_6/data.yaml index 0c4acc9401..3695a68d79 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_6/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_6/data.yaml @@ -1,47 +1,40 @@ metadata: - id: bbe88e70-e917-5e9d-8993-3b5dd61a56c9 - name: Minimize the admission of containers with allowPrivilegeEscalation (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not generally permit containers to be run with the - `allowPrivilegeEscalation` flag set to true. - Allowing this right can lead to a process running a container getting more rights - than it started with. - It's important to note that these rights are still constrained by the overall container - sandbox, and this setting does not relate to the use of privileged containers. - rationale: > - A container running with the `allowPrivilegeEscalation` flag set to `true` - may have processes that can gain more privileges than their parent. There - should be at least one admission control policy defined which does not permit - containers to allow privilege escalation. The option exists (and is - defaulted to true) to permit setuid binaries to run. If you have need to run - containers which use setuid binaries or require privilege escalation, this - should be defined in a separate policy and you should carefully check - to ensure that only limited service accounts and users are given - permission to use that policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that each policy disallows - the admission of containers which allow privilege escalation. - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of conatiners with `.spec.allowPrivilegeEscalation` set to `true`. - impact: > - Pods defined with `spec.allowPrivilegeEscalation: true` will not be - permitted unless they are run under a specific policy. + id: 48c203ce-2a1f-5b64-8b46-ef8d0de4f801 + name: Minimize the admission of containers with allowPrivilegeEscalation + rule_number: 5.2.6 + profile_applicability: '* Level 1 - Master Node' + description: |- + Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. + Allowing this right can lead to a process running a container getting more rights than it started with. + + It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers. + rationale: |- + A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent. + + There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. + The option exists (and is defaulted to true) to permit setuid binaries to run. + + + If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation. + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`. + impact: |- + Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy. default_value: | By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.6 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.6 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_7/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_7/data.yaml index e0815f3c30..e40cb097ab 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_7/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_7/data.yaml @@ -1,42 +1,36 @@ metadata: - id: b2d85ce5-e820-580d-98ba-a7e3ac83eeb3 - name: Minimize the admission of root containers (Automated) - profile_applicability: | - * Level 2 - Master Node - description: | - Do not generally permit containers to be run as the root user. - rationale: > - Containers may run as any Linux user. Containers which run as the root user, - whilst constrained by Container Runtime security features still have a - escalated likelihood of container breakout. Ideally, all containers should - run as a defined non-UID 0 user. There should be at least one - admission control policy defined which does not permit root - containers. If you need to run root containers, this should be defined in a - separate policy and you should carefully check to ensure that - only limited service accounts and users are given permission to use that - policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that each policy restricts - the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of - UIDs not including 0. - remediation: | - Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` - or `MustRunAs` with the range of UIDs not including 0, is set. - impact: | - Pods with containers which run as the root user will not be permitted. + id: 847fd97a-ec24-556d-b7d6-ceaa3dd27106 + name: Minimize the admission of root containers + rule_number: 5.2.7 + profile_applicability: '* Level 2 - Master Node' + description: Do not generally permit containers to be run as the root user. + rationale: |- + Containers may run as any Linux user. + Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout. + + Ideally, all containers should run as a defined non-UID 0 user. + + There should be at least one admission control policy defined which does not permit root containers. + + If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0. + remediation: |- + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set. + impact: Pods with containers which run as the root user will not be permitted. default_value: | By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.7 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.7 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_8/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_8/data.yaml index cef17fa23f..f21c089c45 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_8/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_8/data.yaml @@ -1,44 +1,39 @@ metadata: - id: 1d4fadeb-808b-55cb-80db-fe01409e4ebc - name: Minimize the admission of containers with the NET_RAW capability (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not generally permit containers with the potentially dangerous NET_RAW + id: cef6324e-75cf-522f-b527-92c29bdc8986 + name: Minimize the admission of containers with the NET_RAW capability + rule_number: 5.2.8 + profile_applicability: '* Level 1 - Master Node' + description: Do not generally permit containers with the potentially dangerous NET_RAW capability. - rationale: > - Containers run with a default set of capabilities as assigned by the - Container Runtime. By default this can include potentially dangerous - capabilities. With Docker as the container runtime the NET_RAW capability is - enabled which may be misused by malicious containers. Ideally, all - containers should drop this capability. There should be at least one admission control policy defined which does not permit - containers with the NET_RAW capability. - If you need to run containers with this capability, this should be defined in a separate - policy and you should carefully check to ensure that only limited service accounts and - users are given permission to use that policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that at least one policy - disallows the admission of containers with the `NET_RAW` capability. - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with the `NET_RAW` capability. - impact: > - Pods with containers which run with the NET_RAW capability will not be - permitted. + rationale: |- + Containers run with a default set of capabilities as assigned by the Container Runtime. + By default this can include potentially dangerous capabilities. + With Docker as the container runtime the NET_RAW capability is enabled which may be misused by malicious containers. + + Ideally, all containers should drop this capability. + + There should be at least one admission control policy defined which does not permit containers with the NET_RAW capability. + + If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability. + remediation: |- + Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability. + impact: Pods with containers which run with the NET_RAW capability will not be permitted. default_value: | By default, there are no restrictions on the creation of containers with the `NET_RAW` capability. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) - 2. [https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/](https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ + - https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.8 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.8 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/cis_k8s/rules/cis_5_2_9/data.yaml b/bundle/compliance/cis_k8s/rules/cis_5_2_9/data.yaml index 0714640855..84293dfd9b 100644 --- a/bundle/compliance/cis_k8s/rules/cis_5_2_9/data.yaml +++ b/bundle/compliance/cis_k8s/rules/cis_5_2_9/data.yaml @@ -1,45 +1,36 @@ metadata: - id: 74a82a38-4266-59f0-9ed0-39ef03bc72d1 - name: Minimize the admission of containers with added capabilities (Automated) - profile_applicability: | - * Level 1 - Master Node - description: > - Do not generally permit containers with capabilities assigned beyond the - default set. - rationale: > - Containers run with a default set of capabilities as assigned by the - Container Runtime. Capabilities outside this set can be added to containers - which could expose them to risks of container breakout attacks. There should - be at least one policy defined which prevents containers - with capabilities beyond the default set from launching. If you need to run - containers with additional capabilities, this should be defined in a - separate policy and you should carefully check to ensure that - only limited service accounts and users are given permission to use that - policy. - audit: | - List the policies in use for each namespace in the cluster, ensure that policies are present - which prevent `allowedCapabilities` to be set to anything other than an empty array. - remediation: | - Ensure that `allowedCapabilities` is not present in policies for the cluster - unless - it is set to an - empty array. - impact: > - Pods with containers which require capabilities outwith the default set will - not be permitted. + id: a7192e59-24c4-552f-a93e-12dff2f881c1 + name: Minimize the admission of containers with added capabilities + rule_number: 5.2.9 + profile_applicability: '* Level 1 - Master Node' + description: Do not generally permit containers with capabilities assigned beyond + the default set. + rationale: |- + Containers run with a default set of capabilities as assigned by the Container Runtime. + Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks. + + There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching. + + If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy. + audit: |- + List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array. + remediation: |- + Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array. + impact: Pods with containers which require capabilities outwith the default set + will not be permitted. default_value: | By default, there are no restrictions on adding capabilities to containers. - references: | - 1. [https://kubernetes.io/docs/concepts/security/pod-security-standards/](https://kubernetes.io/docs/concepts/security/pod-security-standards/) - 2. [https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/](https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/) + references: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/ + - https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/ section: Pod Security Standards - version: "1.0" + version: '1.0' tags: - - CIS - - Kubernetes - - CIS 5.2.9 - - Pod Security Standards + - CIS + - Kubernetes + - CIS 5.2.9 + - Pod Security Standards benchmark: name: CIS Kubernetes V1.23 - version: v1.0.0 + version: v1.0.1 id: cis_k8s diff --git a/bundle/compliance/lib/output_validations/output_validations.rego b/bundle/compliance/lib/output_validations/output_validations.rego index bce180caf7..70eb6e8994 100644 --- a/bundle/compliance/lib/output_validations/output_validations.rego +++ b/bundle/compliance/lib/output_validations/output_validations.rego @@ -3,7 +3,7 @@ package compliance.lib.output_validations import data.compliance import future.keywords.every -validate_common_kuberentes_provider_metadata(metadata) { +validate_common_provider_metadata(metadata) { metadata.id metadata.name metadata.profile_applicability @@ -21,22 +21,11 @@ validate_common_kuberentes_provider_metadata(metadata) { metadata.benchmark.name metadata.benchmark.version metadata.benchmark.id + metadata.rule_number } -validate_k8s_metadata(metadata) { - validate_common_kuberentes_provider_metadata(metadata) -} else = false { - true -} - -validate_eks_metadata(metadata) { - validate_common_kuberentes_provider_metadata(metadata) -} else = false { - true -} - -validate_aws_metadata(metadata) { - validate_common_kuberentes_provider_metadata(metadata) +validate_metadata(metadata) { + validate_common_provider_metadata(metadata) } else = false { true } @@ -47,15 +36,18 @@ test_validate_rule_metadata { all_eks_rules := [rule | rule := compliance.cis_eks.rules[rule_id]] all_aws_rules := [rule | rule := compliance.cis_aws.rules[rule_id]] + print("Validating K8s rules") every k8s_rule in all_k8s_rules { - validate_k8s_metadata(k8s_rule.metadata) + validate_metadata(k8s_rule.metadata) } + print("Validating EKS rules") every eks_rule in all_eks_rules { - validate_eks_metadata(eks_rule.metadata) + validate_metadata(eks_rule.metadata) } + print("Validating AWS rules") every aws_rule in all_aws_rules { - validate_aws_metadata(aws_rule.metadata) + validate_metadata(aws_rule.metadata) } } diff --git a/bundle/compliance/lib/output_validations/test.rego b/bundle/compliance/lib/output_validations/test.rego index d5891c5fab..0382e21a57 100644 --- a/bundle/compliance/lib/output_validations/test.rego +++ b/bundle/compliance/lib/output_validations/test.rego @@ -16,25 +16,16 @@ valid_metadata := { "section": "rule section", "version": "rule version", "tags": ["tag 1", "tag 2"], - "benchmark": {"name": "benchmark", "version": "v1.0.0", "id": "cis_k8s"}, -} - -eks_valid_metadata := { - "name": "rule name", - "description": "rule description", - "impact": "rule impact", - "tags": ["tag 1", "tag 2"], - "benchmark": {"name": "benchmark", "version": "v1.0.0", "id": "cis_eks"}, - "remediation": "rule remidiation", + "benchmark": { + "name": "benchmark", + "version": "v1.0.0", + "id": "cis_k8s", + }, + "rule_number": "1.2.3", } test_required_metadata_fields { every key, _ in valid_metadata { - not validate_k8s_metadata(object.remove(valid_metadata, [key])) - not validate_aws_metadata(object.remove(valid_metadata, [key])) - } - - every key, _ in eks_valid_metadata { - not validate_eks_metadata(object.remove(eks_valid_metadata, [key])) + not validate_metadata(object.remove(valid_metadata, [key])) } } diff --git a/cis_policies_generator/input/CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.0.1.xlsx b/cis_policies_generator/input/CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.0.1.xlsx new file mode 100644 index 0000000000..021f27c3bf Binary files /dev/null and b/cis_policies_generator/input/CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.0.1.xlsx differ diff --git a/cis_policies_generator/input/CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.1.0.xlsx b/cis_policies_generator/input/CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.1.0.xlsx deleted file mode 100644 index 4417762207..0000000000 Binary files a/cis_policies_generator/input/CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.1.0.xlsx and /dev/null differ diff --git a/cis_policies_generator/package-lock.json b/cis_policies_generator/package-lock.json index 736fdf8337..24794f02d9 100644 --- a/cis_policies_generator/package-lock.json +++ b/cis_policies_generator/package-lock.json @@ -5,7 +5,6 @@ "requires": true, "packages": { "": { - "name": "cis_policies_generator", "version": "1.0.0", "dependencies": { "@types/node": "^18.6.3", diff --git a/cis_policies_generator/src/index.ts b/cis_policies_generator/src/index.ts index e99b7f2da6..ed362b7971 100644 --- a/cis_policies_generator/src/index.ts +++ b/cis_policies_generator/src/index.ts @@ -163,7 +163,6 @@ function generateRulesMetadataFiles(benchmarks: BenchmarkSchema[]): void { const ruleNumber = rule.rule_number!.replaceAll(".", "_"); const rule_folder = `../bundle/compliance/${benchmark_id}/rules/cis_${ruleNumber}` const metadata_file = rule_folder + "/data.yaml"; - delete rule.rule_number; if (fs.existsSync(rule_folder)) { _.assign(rule, getExistingValues(metadata_file)); diff --git a/dev/README.md b/dev/README.md new file mode 100644 index 0000000000..df6777b908 --- /dev/null +++ b/dev/README.md @@ -0,0 +1,33 @@ +# Dev tools + +## Rules Assets Generators + +### `generate_rule_metadata.py` + +This script generates the metadata for a rule. It is used to generate the metadata for the rules in the `rules` directory. + +**Usage:** + +```shell +poetry run python dev/generate_rule_metadata.py --benchmark --rules +``` + +**Example 1** - Generate all rules metadata from all benchmarks: + +```shell +poetry run python dev/generate_rule_metadata.py +``` + +**Example 2** - Generate two specific rules metadata from CIS AWS: + +```shell +poetry run python dev/generate_rule_metadata.py --benchmark cis_aws --rules "1.8" "1.9" +``` + +### Limitations + +The script currently has the following limitations: + +- It only works with Excel spreadsheets as input. +- It does not generate default values for rules. Default values must be added manually if they are not present in the input spreadsheet. +- Rules rego implementation is required before running the script. The script will fail if the rego implementation is not present. diff --git a/dev/common.py b/dev/common.py new file mode 100644 index 0000000000..f2dfb8c48c --- /dev/null +++ b/dev/common.py @@ -0,0 +1,149 @@ +import os + +import git +import pandas as pd +import regex as re +from ruamel.yaml.scalarstring import PreservedScalarString as pss + +repo_root = git.Repo('.', search_parent_directories=True) +rules_dir = os.path.join(repo_root.working_dir, "bundle/compliance") + +CODE_BLOCK_SIZE = 100 + +benchmark = { + "cis_k8s": "CIS_Kubernetes_V1.23_Benchmark_v1.0.1.xlsx", + "cis_eks": "CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.0.1.xlsx", + "cis_aws": "CIS_Amazon_Web_Services_Foundations_Benchmark_v1.5.0.xlsx", +} + +relevant_sheets = { + "cis_k8s": ["Level 1 - Master Node", "Level 2 - Master Node", "Level 1 - Worker Node", "Level 2 - Worker Node"], + "cis_eks": ["Level 1", "Level 2"], + "cis_aws": ["Level 1", "Level 2"], +} + +default_selected_columns_map = { + "cis_k8s": { + "Section #": "Section", + "Recommendation #": "Rule Number", + "Title": "Title", + "Assessment Status": "Type", + + }, + "cis_eks": { + "section #": "Section", + "recommendation #": "Rule Number", + "title": "Title", + "scoring status": "Type", + + }, + "cis_aws": { + "Section #": "Section", + "Recommendation #": "Rule Number", + "Title": "Title", + "Assessment Status": "Type", + }, +} + + +def parse_rules_data_from_excel(benchmark_id, selected_columns=None, selected_rules=None): + """ + Parse rules data from Excel file for current service. + :param selected_rules: List of rules to parse + :param selected_columns: Dictionary with columns to select from the sheet + :param benchmark_id: Benchmark ID + :return: Pandas DataFrame with rules data for current service and sections + """ + if selected_columns is None: + selected_columns = default_selected_columns_map + + benchmark_name = benchmark[benchmark_id] + data_path = f"../cis_policies_generator/input/{benchmark_name}" + + sheets = relevant_sheets[benchmark_id] + rules_data = pd.DataFrame() + sections_df = pd.DataFrame() + for sheet_name in sheets: + print(f"Processing sheet '{sheet_name}'") + excel_file = pd.read_excel(data_path, sheet_name=sheet_name) + + # Select only the columns you want to include in the Markdown table + data = excel_file[selected_columns[benchmark_id].keys()] + + # Update Table headers + data.columns = selected_columns[benchmark_id].values() + + # Remove rows with empty values in the "Rule Number" column and convert to string + sections_curr_sheet = data.loc[data["Rule Number"].isna(), ["Section", "Title"]].astype(str) + + # Filter out section information + data = data[data["Rule Number"].notna()].astype(str) + + # Only keep the rules that are selected + if selected_rules is not None: + data = data[data["Rule Number"].isin(selected_rules)] + + # Add a new column with the sheet name + data = data.assign(profile_applicability=sheet_name) + + rules_data = pd.concat([rules_data, data]).drop_duplicates(subset="Rule Number").reset_index(drop=True) + sections_df = pd.concat([sections_df, sections_curr_sheet]).drop_duplicates(subset="Section").reset_index( + drop=True) + + sections = {section: title for section, title in sections_df.values} + + return rules_data, sections + + +def check_and_fix_numbered_list(text): + # Split the text into lines + lines = text.split('\n') + + # Find the lines that start with a number and a period, and store their indices + numbered_lines = [(i, line) for i, line in enumerate(lines) if re.match(r'^\d+\.', line)] + + # Check if the numbered lines are consecutively numbered + for i, (index, line) in enumerate(numbered_lines): + # Extract the number from the line + line_number = int(line.split('.')[0]) + + # Check if the line number is correct + if line_number != i + 1: + # The line number is not correct, fix it by replacing the line with the correct line number + corrected_line = f"{i + 1}. {line.removeprefix(str(line_number) + '. ')}" + lines[index] = corrected_line + + # Join the lines back into a single string and return the result + return '\n'.join(lines) + + +def add_new_line_after_period(text): + # Split the text into lines + lines = text.split('\n') + + # Find the lines that start with a number and a period + numbered_lines = [line for line in lines if re.match(r'^\d+\.', line)] + + # Iterate through the lines and add a new line after a period, unless the line is a numbered line + for i, line in enumerate(lines): + if line not in numbered_lines: + lines[i] = line.replace(". ", ".\n") + + # Join the lines back into a single string and return the result + return '\n'.join(lines) + + +def fix_code_blocks(text: str): + text = add_new_line_after_period(text) + return check_and_fix_numbered_list(text) + + +def apply_pss_recursively(data): + if isinstance(data, dict): + return {key: apply_pss_recursively(value) for key, value in data.items()} + elif isinstance(data, list): + return [value for value in data] + elif isinstance(data, str): + return pss(data) if len(data) > CODE_BLOCK_SIZE else data + else: + return data diff --git a/dev/generate_rule_metadata.py b/dev/generate_rule_metadata.py new file mode 100644 index 0000000000..90437e2547 --- /dev/null +++ b/dev/generate_rule_metadata.py @@ -0,0 +1,203 @@ +import argparse +import os +import uuid + +import pandas as pd + +import common +from dataclasses import dataclass, asdict + +from ruamel.yaml import YAML + +yml = YAML() + + +@dataclass +class Benchmark: + name: str + version: str + id: str + + +@dataclass +class Rule: + id: str + name: str + rule_number: str + profile_applicability: str + description: str + rationale: str + audit: str + remediation: str + impact: str + default_value: str + references: list[str] + section: str + version: str + tags: list[str] + benchmark: Benchmark + + +selected_columns_map = { + "cis_k8s": { + "Section #": "Section", + "Recommendation #": "Rule Number", + "Title": "Title", + "Description": "description", + "Rational Statement": "rationale", + "Audit Procedure": "audit", + "Remediation Procedure": "remediation", + "Impact Statement": "impact", + # "": "default_value", # todo: talk with CIS team to add this column to the excel + "references": "references", + "Assessment Status": "type", + }, + "cis_eks": { + "section #": "Section", + "recommendation #": "Rule Number", + "title": "Title", + "description": "description", + "rationale statement": "rationale", + "audit procedure": "audit", + "remediation procedure": "remediation", + "impact statement": "impact", + # "": "default_value", # todo: talk with CIS team to add this column to the excel + "references": "references", + "scoring status": "type", + }, + "cis_aws": { + "Section #": "Section", + "Recommendation #": "Rule Number", + "Title": "Title", + "Description": "description", + "Rational Statement": "rationale", + "Audit Procedure": "audit", + "Remediation Procedure": "remediation", + "Impact Statement": "impact", + # "": "default_value", # todo: talk with CIS team to add this column to the excel + "References": "references", + "Assessment Status": "type", + } +} + + +def parse_refs(refs: str): + """ + Parse references - they are split by `:` which is the worst token possible for urls... + """ + ref = [f"http{ref}" for ref in refs.split(":http") if ref] + ref[0] = ref[0].removeprefix("http") + return ref + + +def read_existing_default_value(rule_number, benchmark_id): + """ + Read default value from existing rule (The excel file doesn't contain default values) + :param rule_number: Rule number + :param benchmark_id: Benchmark ID + :return: Default value + """ + rule_dir = os.path.join(common.rules_dir, f"{benchmark_id}/rules", f"cis_{rule_number.replace('.', '_')}") + try: + with open(os.path.join(rule_dir, "data.yaml"), "r") as f: + default_value = yml.load(f)["metadata"]["default_value"] + return default_value + except FileNotFoundError: + print(f"{benchmark_id}/{rule_number} is missing default value - please make sure to add it manually") + return "" + + +def generate_metadata(benchmark_id: str, raw_data: pd.DataFrame, benchmark_metadata: Benchmark, sections: dict): + """ + Generate metadata for rules + :param benchmark_id: Benchmark ID + :param raw_data: ‘Raw’ data from the spreadsheet + :param benchmark_metadata: Benchmark metadata + :param sections: Section metadata + :return: List of Rule objects + """ + metadata = [] + benchmark_tag = benchmark_id.removesuffix("cis_").upper() if benchmark_id != "cis_k8s" else f"Kubernetes" + for rule in raw_data.to_dict(orient="records"): + r = Rule( + id=str(uuid.uuid5(uuid.NAMESPACE_DNS, f"{benchmark_metadata.name} {rule['Title']}")), + name=rule["Title"], + rule_number=rule["Rule Number"], + profile_applicability=f"* {rule['profile_applicability']}", + description=common.fix_code_blocks(rule["description"]), + rationale=common.fix_code_blocks(rule.get("rationale", "")), + audit=common.fix_code_blocks(rule.get("audit", "")), + remediation=common.fix_code_blocks(rule.get("remediation", "")), + impact=rule.get("impact", "") if rule.get("impact", "") != "nan" else "None", + default_value=rule.get("default_value", read_existing_default_value(rule["Rule Number"], benchmark_id)), + references=parse_refs(rule.get("references", "")), + section=sections[rule["Section"]], + tags=["CIS", benchmark_tag, f"CIS {rule['Rule Number']}", sections[rule["Section"]]], + version="1.0", + benchmark=benchmark_metadata, + ) + metadata.append(r) + + return metadata + + +def save_metadata(metadata: list[Rule], benchmark_id): + """ + Save metadata to file + :param metadata: List of Rule objects + :param benchmark_id: Benchmark ID + :return: None + """ + for rule in metadata: + rule_dir = os.path.join(common.rules_dir, f"{benchmark_id}/rules", f"cis_{rule.rule_number.replace('.', '_')}") + try: + with open(os.path.join(rule_dir, "data.yaml"), "w+") as f: + yml.dump({"metadata": common.apply_pss_recursively(asdict(rule))}, f) + + except FileNotFoundError: + continue # ignore rules that are not implemented + + +if __name__ == "__main__": + os.chdir(common.repo_root.working_dir + "/dev") + + parser = argparse.ArgumentParser( + description="CIS Benchmark parser CLI", + ) + parser.add_argument( + "-b", + "--benchmark", + default=common.benchmark.keys(), + choices=common.benchmark.keys(), + help="benchmark to be used for the rules template generation (default: all benchmarks). " + "for example: `--benchmark cis_eks` or `--benchmark cis_eks cis_aws`", + nargs="+", + ) + parser.add_argument( + "-r", + "--rules", + help="set of specific rules to be parsed (default: all rules).", + nargs="+", + ) + args = parser.parse_args() + + if type(args.benchmark) is str: + args.benchmark = [args.benchmark] + + for benchmark_id in args.benchmark: + print(f"### Processing {benchmark_id.replace('_', ' ').upper()}") + + # Parse Excel data + raw_data, sections = common.parse_rules_data_from_excel( + selected_columns=selected_columns_map, + benchmark_id=benchmark_id, + selected_rules=args.rules, + ) + + benchmark_metadata = Benchmark( + name=common.benchmark[benchmark_id].split("Benchmark")[0].replace("_", " ").removesuffix(" "), + version=common.benchmark[benchmark_id].split("Benchmark")[1].removeprefix("_").removesuffix(".xlsx"), + id=f"{benchmark_id}", + ) + metadata = generate_metadata(benchmark_id, raw_data, benchmark_metadata, sections) + save_metadata(metadata, benchmark_id) diff --git a/dev/update_rule_status.py b/dev/update_rule_status.py index 521be05808..6298f7b7b6 100644 --- a/dev/update_rule_status.py +++ b/dev/update_rule_status.py @@ -1,60 +1,23 @@ import os -import pandas as pd -import git +import common """ Generates Markdown tables with implemented rules status for all services. """ -repo = git.Repo('.', search_parent_directories=True) -os.chdir(repo.working_dir + "/dev") - -benchmark = { - "k8s": "CIS_Kubernetes_V1.23_Benchmark_v1.0.1.xlsx", - "eks": "CIS_Amazon_Elastic_Kubernetes_Service_(EKS)_Benchmark_v1.1.0.xlsx", - "aws": "CIS_Amazon_Web_Services_Foundations_Benchmark_v1.5.0.xlsx", -} - -relevant_sheets = { - "k8s": ["Level 1 - Master Node", "Level 2 - Master Node", "Level 1 - Worker Node", "Level 2 - Worker Node"], - "eks": ["Level 1", "Level 2"], - "aws": ["Level 1", "Level 2"], -} - -selected_columns_map = { - "k8s": { - "Section #": "Section", - "Recommendation #": "Rule Number", - "Title": "Description", - "Assessment Status": "Type", - }, - "eks": { - "section #": "Section", - "recommendation #": "Rule Number", - "title": "Description", - "assessment status": "Type", - }, - "aws": { - "Section #": "Section", - "Recommendation #": "Rule Number", - "Title": "Description", - "Assessment Status": "Type", - }, -} - - -def get_implemented_rules(all_rules, service): + +def get_implemented_rules(all_rules, benchmark_id): """ Get list of implemented rules in the repository for current service. :param all_rules: List of all rules for specified benchmark - :param service: Service name (k8s, eks, aws) + :param benchmark_id: Benchmark ID :return: List of implemented rules """ # Set all rules as not implemented by default implemented_rules = {str(rule): ":x:" for rule in all_rules} # ❌ # Construct path to rules directory for current service - rules_dir = os.path.join("../bundle", "compliance", f"cis_{service}", "rules") + rules_dir = os.path.join("../bundle", "compliance", f"{benchmark_id}", "rules") # Get list of all rule files in the rules directory rule_files = os.listdir(rules_dir) @@ -70,50 +33,39 @@ def get_implemented_rules(all_rules, service): return implemented_rules -def generate_md_table(service): +def generate_md_table(benchmark_id): """ Generate Markdown table with implemented rules status for current service. - :param service: Service name (k8s, eks, aws) + :param benchmark_id: Benchmark ID :return: Markdown table """ - benchmark_name = benchmark[service] - data_path = f"../cis_policies_generator/input/{benchmark_name}" - - sheets = relevant_sheets[service] - full_data = pd.DataFrame() - for sheet_name in sheets: - print(f"Processing sheet '{sheet_name}'") - excel_file = pd.read_excel(data_path, sheet_name=sheet_name) + rules_data, sections = common.parse_rules_data_from_excel(benchmark_id) - # Select only the columns you want to include in the Markdown table - data = excel_file[selected_columns_map[service].keys()] - - # Update Table headers - data.columns = selected_columns_map[service].values() - - # Remove rows with empty values in the "Rule Number" column and convert to string - data = data[data["Rule Number"].notna()].astype(str) - - full_data = pd.concat([full_data, data]).drop_duplicates(subset="Rule Number").reset_index(drop=True) + # Rename "Title" column to "Description" + rules_data.rename(columns={'Title': 'Description'}, inplace=True) # Get list of all rules in sheet - all_rules = full_data["Rule Number"].to_list() + all_rules = rules_data["Rule Number"].to_list() # Get list of implemented rules - implemented_rules = get_implemented_rules(all_rules, service) + implemented_rules = get_implemented_rules(all_rules, benchmark_id) # Add implemented rules' column to the data for rule, status in implemented_rules.items(): - full_data.loc[full_data["Rule Number"] == rule, "Implemented"] = status + rules_data.loc[rules_data["Rule Number"] == rule, "Implemented"] = status new_order = ["Rule Number", "Section", "Description", "Implemented", "Type"] - full_data = full_data.reindex(columns=new_order) - full_data = full_data.sort_values("Rule Number") + rules_data = rules_data.reindex(columns=new_order) + rules_data = rules_data.sort_values("Rule Number") - full_data["Rule Number"] = full_data["Rule Number"].apply(get_rule_path, service=service, implemented_rules=implemented_rules) + rules_data["Rule Number"] = rules_data["Rule Number"].apply( + get_rule_path, + benchmark_id=benchmark_id, + implemented_rules=implemented_rules + ) # Convert DataFrame to Markdown table - table = full_data.to_markdown(index=False, tablefmt="github") + table = rules_data.to_markdown(index=False, tablefmt="github") # Add table title total_implemented = len([rule for rule, status in implemented_rules.items() if status == ":white_check_mark:"]) @@ -123,16 +75,16 @@ def generate_md_table(service): return table, description, status -def get_rule_path(rule, service, implemented_rules): +def get_rule_path(rule, benchmark_id, implemented_rules): """ Get rule path for specified rule and service. :param implemented_rules: ‘Implemented’ column values :param rule: Rule number - :param service: Service name (k8s, eks, aws) + :param benchmark_id: Benchmark ID :return: Rule path in the repository """ if implemented_rules[rule] == ":white_check_mark:": - return f"[{rule}](bundle/compliance/cis_{service}/rules/cis_{rule.replace('.', '_')})" + return f"[{rule}](bundle/compliance/{benchmark_id}/rules/cis_{rule.replace('.', '_')})" else: return rule @@ -152,13 +104,13 @@ def update_main_readme_status_badge(percentage, service): readme = f.readlines() if service == "k8s": - badge = f"[![CIS {service.upper()}]({badge_api}/CIS-Kubernetes%20({percentage:.1f}%25)-326CE5?" \ + badge = f"[![CIS {service.upper()}]({badge_api}/CIS-Kubernetes%20({percentage:.0f}%25)-326CE5?" \ f"logo=Kubernetes)](RULES.md#k8s-cis-benchmark)\n" elif service == "eks": - badge = f"[![CIS {service.upper()}]({badge_api}/CIS-Amazon%20EKS%20({percentage:.1f}%25)-FF9900?" \ + badge = f"[![CIS {service.upper()}]({badge_api}/CIS-Amazon%20EKS%20({percentage:.0f}%25)-FF9900?" \ f"logo=Amazon+EKS)](RULES.md#eks-cis-benchmark)\n" elif service == "aws": - badge = f"[![CIS {service.upper()}]({badge_api}/CIS-AWS%20({percentage:.1f}%25)-232F3E?l" \ + badge = f"[![CIS {service.upper()}]({badge_api}/CIS-AWS%20({percentage:.0f}%25)-232F3E?l" \ f"ogo=Amazon+AWS)](RULES.md#aws-cis-benchmark)\n" badge_line = get_badge_line_number(readme, service) @@ -180,13 +132,18 @@ def get_badge_line_number(readme, service): return i -with open("../RULES.md", "w") as f: - f.write(f"# Rules Status") - for service in benchmark.keys(): - print(f"Generating Markdown table for '{service}' service") - f.write(f"\n\n## {service.upper()} CIS Benchmark\n\n") - table, description, percentage = generate_md_table(service) - f.write(description) - f.write(table) - update_main_readme_status_badge(percentage * 100, service) - f.write("\n") +if __name__ == "__main__": + # Set working directory to the dev directory + os.chdir(common.repo_root.working_dir + "/dev") + + # Write Markdown table to file + with open("../RULES.md", "w") as f: + f.write(f"# Rules Status") + for benchmark_id in common.benchmark.keys(): + print(f"Generating Markdown table for '{benchmark_id}' service") + f.write(f"\n\n## {benchmark_id.removeprefix('cis_').upper()} CIS Benchmark\n\n") + table, description, percentage = generate_md_table(benchmark_id) + f.write(description) + f.write(table) + update_main_readme_status_badge(percentage * 100, benchmark_id.removeprefix('cis_')) + f.write("\n") diff --git a/poetry.lock b/poetry.lock index f2608811d8..253acc06f8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -107,6 +107,37 @@ category = "main" optional = false python-versions = "*" +[[package]] +name = "regex" +version = "2022.10.31" +description = "Alternative regular expression module, to replace re." +category = "main" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "ruamel.yaml" +version = "0.17.21" +description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +category = "main" +optional = false +python-versions = ">=3" + +[package.dependencies] +"ruamel.yaml.clib" = {version = ">=0.2.6", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.11\""} + +[package.extras] +docs = ["ryd"] +jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] + +[[package]] +name = "ruamel.yaml.clib" +version = "0.2.7" +description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +category = "main" +optional = false +python-versions = ">=3.5" + [[package]] name = "six" version = "1.16.0" @@ -137,7 +168,7 @@ widechars = ["wcwidth"] [metadata] lock-version = "1.1" python-versions = "^3.10" -content-hash = "99d9871b9807d48b19cd4e112bc04d3ebca8c50e898e199f0b531636ed095cbd" +content-hash = "1fe272b4bc3e2a2daeef73561152645c834273dd400302c3fb5061882c125d7c" [metadata.files] et-xmlfile = [] @@ -153,6 +184,9 @@ python-dateutil = [ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] pytz = [] +regex = [] +"ruamel.yaml" = [] +"ruamel.yaml.clib" = [] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, diff --git a/pyproject.toml b/pyproject.toml index a8544bafcc..87f280aed1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,6 +12,8 @@ openpyxl = "^3.0.10" tabulate = "^0.9.0" numpy = "^1.23.5" Jinja2 = "^3.1.2" +regex = "^2022.10.31" +"ruamel.yaml" = "^0.17.21" [tool.poetry.dev-dependencies]